2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/opt.h"
25 #include "libavutil/avstring.h"
35 * @file libavformat/utils.c
36 * various utility functions for use within FFmpeg
39 unsigned avformat_version(void)
41 return LIBAVFORMAT_VERSION_INT;
44 /* fraction handling */
47 * f = val + (num / den) + 0.5.
49 * 'num' is normalized so that it is such as 0 <= num < den.
51 * @param f fractional number
52 * @param val integer value
53 * @param num must be >= 0
54 * @param den must be >= 1
56 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
69 * Fractional addition to f: f = f + (incr / f->den).
71 * @param f fractional number
72 * @param incr increment, can be positive or negative
74 static void av_frac_add(AVFrac *f, int64_t incr)
87 } else if (num >= den) {
94 /** head of registered input format linked list */
95 AVInputFormat *first_iformat = NULL;
96 /** head of registered output format linked list */
97 AVOutputFormat *first_oformat = NULL;
99 AVInputFormat *av_iformat_next(AVInputFormat *f)
101 if(f) return f->next;
102 else return first_iformat;
105 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
107 if(f) return f->next;
108 else return first_oformat;
111 void av_register_input_format(AVInputFormat *format)
115 while (*p != NULL) p = &(*p)->next;
120 void av_register_output_format(AVOutputFormat *format)
124 while (*p != NULL) p = &(*p)->next;
129 int match_ext(const char *filename, const char *extensions)
137 ext = strrchr(filename, '.');
143 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
146 if (!strcasecmp(ext1, ext))
156 static int match_format(const char *name, const char *names)
164 namelen = strlen(name);
165 while ((p = strchr(names, ','))) {
166 len = FFMAX(p - names, namelen);
167 if (!strncasecmp(name, names, len))
171 return !strcasecmp(name, names);
174 AVOutputFormat *guess_format(const char *short_name, const char *filename,
175 const char *mime_type)
177 AVOutputFormat *fmt, *fmt_found;
178 int score_max, score;
180 /* specific test for image sequences */
181 #if CONFIG_IMAGE2_MUXER
182 if (!short_name && filename &&
183 av_filename_number_test(filename) &&
184 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
185 return guess_format("image2", NULL, NULL);
188 /* Find the proper file type. */
192 while (fmt != NULL) {
194 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
196 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
198 if (filename && fmt->extensions &&
199 match_ext(filename, fmt->extensions)) {
202 if (score > score_max) {
211 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
212 const char *mime_type)
214 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
217 AVOutputFormat *stream_fmt;
218 char stream_format_name[64];
220 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
221 stream_fmt = guess_format(stream_format_name, NULL, NULL);
230 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
231 const char *filename, const char *mime_type, enum CodecType type){
232 if(type == CODEC_TYPE_VIDEO){
233 enum CodecID codec_id= CODEC_ID_NONE;
235 #if CONFIG_IMAGE2_MUXER
236 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
237 codec_id= av_guess_image2_codec(filename);
240 if(codec_id == CODEC_ID_NONE)
241 codec_id= fmt->video_codec;
243 }else if(type == CODEC_TYPE_AUDIO)
244 return fmt->audio_codec;
246 return CODEC_ID_NONE;
249 AVInputFormat *av_find_input_format(const char *short_name)
252 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
253 if (match_format(short_name, fmt->name))
259 /* memory handling */
261 void av_destruct_packet(AVPacket *pkt)
264 pkt->data = NULL; pkt->size = 0;
267 void av_init_packet(AVPacket *pkt)
269 pkt->pts = AV_NOPTS_VALUE;
270 pkt->dts = AV_NOPTS_VALUE;
273 pkt->convergence_duration = 0;
275 pkt->stream_index = 0;
276 pkt->destruct= av_destruct_packet_nofree;
279 int av_new_packet(AVPacket *pkt, int size)
282 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
283 return AVERROR(ENOMEM);
284 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
286 return AVERROR(ENOMEM);
287 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
292 pkt->destruct = av_destruct_packet;
296 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
298 int ret= av_new_packet(pkt, size);
303 pkt->pos= url_ftell(s);
305 ret= get_buffer(s, pkt->data, size);
314 int av_dup_packet(AVPacket *pkt)
316 if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
318 /* We duplicate the packet and don't forget to add the padding again. */
319 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
320 return AVERROR(ENOMEM);
321 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
323 return AVERROR(ENOMEM);
325 memcpy(data, pkt->data, pkt->size);
326 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
328 pkt->destruct = av_destruct_packet;
333 int av_filename_number_test(const char *filename)
336 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
339 static AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
341 AVInputFormat *fmt1, *fmt;
345 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
346 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
349 if (fmt1->read_probe) {
350 score = fmt1->read_probe(pd);
351 } else if (fmt1->extensions) {
352 if (match_ext(pd->filename, fmt1->extensions)) {
356 if (score > *score_max) {
359 }else if (score == *score_max)
365 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
367 return av_probe_input_format2(pd, is_opened, &score);
370 static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
373 fmt = av_probe_input_format2(pd, 1, &score);
376 if (!strcmp(fmt->name, "mp3")) {
377 st->codec->codec_id = CODEC_ID_MP3;
378 st->codec->codec_type = CODEC_TYPE_AUDIO;
379 } else if (!strcmp(fmt->name, "ac3")) {
380 st->codec->codec_id = CODEC_ID_AC3;
381 st->codec->codec_type = CODEC_TYPE_AUDIO;
382 } else if (!strcmp(fmt->name, "mpegvideo")) {
383 st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
384 st->codec->codec_type = CODEC_TYPE_VIDEO;
385 } else if (!strcmp(fmt->name, "m4v")) {
386 st->codec->codec_id = CODEC_ID_MPEG4;
387 st->codec->codec_type = CODEC_TYPE_VIDEO;
388 } else if (!strcmp(fmt->name, "h264")) {
389 st->codec->codec_id = CODEC_ID_H264;
390 st->codec->codec_type = CODEC_TYPE_VIDEO;
396 /************************************************************/
397 /* input media file */
400 * Open a media file from an IO stream. 'fmt' must be specified.
402 int av_open_input_stream(AVFormatContext **ic_ptr,
403 ByteIOContext *pb, const char *filename,
404 AVInputFormat *fmt, AVFormatParameters *ap)
408 AVFormatParameters default_ap;
412 memset(ap, 0, sizeof(default_ap));
415 if(!ap->prealloced_context)
416 ic = avformat_alloc_context();
420 err = AVERROR(ENOMEM);
425 ic->duration = AV_NOPTS_VALUE;
426 ic->start_time = AV_NOPTS_VALUE;
427 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
429 /* allocate private data */
430 if (fmt->priv_data_size > 0) {
431 ic->priv_data = av_mallocz(fmt->priv_data_size);
432 if (!ic->priv_data) {
433 err = AVERROR(ENOMEM);
437 ic->priv_data = NULL;
440 if (ic->iformat->read_header) {
441 err = ic->iformat->read_header(ic, ap);
446 if (pb && !ic->data_offset)
447 ic->data_offset = url_ftell(ic->pb);
449 #if LIBAVFORMAT_VERSION_MAJOR < 53
450 ff_metadata_demux_compat(ic);
458 av_freep(&ic->priv_data);
459 for(i=0;i<ic->nb_streams;i++) {
460 AVStream *st = ic->streams[i];
462 av_free(st->priv_data);
463 av_free(st->codec->extradata);
473 /** size of probe buffer, for guessing file type from file contents */
474 #define PROBE_BUF_MIN 2048
475 #define PROBE_BUF_MAX (1<<20)
477 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
480 AVFormatParameters *ap)
483 AVProbeData probe_data, *pd = &probe_data;
484 ByteIOContext *pb = NULL;
488 pd->filename = filename;
493 /* guess format if no file can be opened */
494 fmt = av_probe_input_format(pd, 0);
497 /* Do not open file if the format does not need it. XXX: specific
498 hack needed to handle RTSP/TCP */
499 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
500 /* if no file needed do not try to open one */
501 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
505 url_setbufsize(pb, buf_size);
508 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
509 int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0;
510 /* read probe data */
511 pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE);
512 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
513 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
514 if (url_fseek(pb, 0, SEEK_SET) < 0) {
516 if (url_fopen(&pb, filename, URL_RDONLY) < 0) {
522 /* guess file format */
523 fmt = av_probe_input_format2(pd, 1, &score);
528 /* if still no format found, error */
534 /* check filename in case an image number is expected */
535 if (fmt->flags & AVFMT_NEEDNUMBER) {
536 if (!av_filename_number_test(filename)) {
537 err = AVERROR_NUMEXPECTED;
541 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
554 /*******************************************************/
556 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
557 AVPacketList **plast_pktl){
558 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
563 (*plast_pktl)->next = pktl;
565 *packet_buffer = pktl;
567 /* add the packet in the buffered packet list */
573 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
579 AVPacketList *pktl = s->raw_packet_buffer;
583 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
584 s->raw_packet_buffer = pktl->next;
591 ret= s->iformat->read_packet(s, pkt);
594 st= s->streams[pkt->stream_index];
596 switch(st->codec->codec_type){
597 case CODEC_TYPE_VIDEO:
598 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
600 case CODEC_TYPE_AUDIO:
601 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
603 case CODEC_TYPE_SUBTITLE:
604 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
608 if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
611 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
613 if(st->codec->codec_id == CODEC_ID_PROBE){
614 AVProbeData *pd = &st->probe_data;
616 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
617 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
618 pd->buf_size += pkt->size;
619 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
621 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
622 set_codec_from_probe_data(st, pd, 1);
623 if(st->codec->codec_id != CODEC_ID_PROBE){
632 /**********************************************************/
635 * Get the number of samples of an audio frame. Return -1 on error.
637 static int get_audio_frame_size(AVCodecContext *enc, int size)
641 if(enc->codec_id == CODEC_ID_VORBIS)
644 if (enc->frame_size <= 1) {
645 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
647 if (bits_per_sample) {
648 if (enc->channels == 0)
650 frame_size = (size << 3) / (bits_per_sample * enc->channels);
652 /* used for example by ADPCM codecs */
653 if (enc->bit_rate == 0)
655 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
658 frame_size = enc->frame_size;
665 * Return the frame duration in seconds. Return 0 if not available.
667 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
668 AVCodecParserContext *pc, AVPacket *pkt)
674 switch(st->codec->codec_type) {
675 case CODEC_TYPE_VIDEO:
676 if(st->time_base.num*1000LL > st->time_base.den){
677 *pnum = st->time_base.num;
678 *pden = st->time_base.den;
679 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
680 *pnum = st->codec->time_base.num;
681 *pden = st->codec->time_base.den;
682 if (pc && pc->repeat_pict) {
683 *pnum = (*pnum) * (1 + pc->repeat_pict);
687 case CODEC_TYPE_AUDIO:
688 frame_size = get_audio_frame_size(st->codec, pkt->size);
692 *pden = st->codec->sample_rate;
699 static int is_intra_only(AVCodecContext *enc){
700 if(enc->codec_type == CODEC_TYPE_AUDIO){
702 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
703 switch(enc->codec_id){
705 case CODEC_ID_MJPEGB:
707 case CODEC_ID_RAWVIDEO:
708 case CODEC_ID_DVVIDEO:
709 case CODEC_ID_HUFFYUV:
710 case CODEC_ID_FFVHUFF:
715 case CODEC_ID_JPEG2000:
723 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
724 int64_t dts, int64_t pts)
726 AVStream *st= s->streams[stream_index];
727 AVPacketList *pktl= s->packet_buffer;
729 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
732 st->first_dts= dts - st->cur_dts;
735 for(; pktl; pktl= pktl->next){
736 if(pktl->pkt.stream_index != stream_index)
738 //FIXME think more about this check
739 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
740 pktl->pkt.pts += st->first_dts;
742 if(pktl->pkt.dts != AV_NOPTS_VALUE)
743 pktl->pkt.dts += st->first_dts;
745 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
746 st->start_time= pktl->pkt.pts;
748 if (st->start_time == AV_NOPTS_VALUE)
749 st->start_time = pts;
752 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
754 AVPacketList *pktl= s->packet_buffer;
757 if(st->first_dts != AV_NOPTS_VALUE){
758 cur_dts= st->first_dts;
759 for(; pktl; pktl= pktl->next){
760 if(pktl->pkt.stream_index == pkt->stream_index){
761 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
763 cur_dts -= pkt->duration;
766 pktl= s->packet_buffer;
767 st->first_dts = cur_dts;
768 }else if(st->cur_dts)
771 for(; pktl; pktl= pktl->next){
772 if(pktl->pkt.stream_index != pkt->stream_index)
774 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
775 && !pktl->pkt.duration){
776 pktl->pkt.dts= cur_dts;
777 if(!st->codec->has_b_frames)
778 pktl->pkt.pts= cur_dts;
779 cur_dts += pkt->duration;
780 pktl->pkt.duration= pkt->duration;
784 if(st->first_dts == AV_NOPTS_VALUE)
785 st->cur_dts= cur_dts;
788 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
789 AVCodecParserContext *pc, AVPacket *pkt)
791 int num, den, presentation_delayed, delay, i;
794 /* do we have a video B-frame ? */
795 delay= st->codec->has_b_frames;
796 presentation_delayed = 0;
797 /* XXX: need has_b_frame, but cannot get it if the codec is
800 pc && pc->pict_type != FF_B_TYPE)
801 presentation_delayed = 1;
803 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
804 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
805 pkt->dts -= 1LL<<st->pts_wrap_bits;
808 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
809 // we take the conservative approach and discard both
810 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
811 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
812 av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
813 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
816 if (pkt->duration == 0) {
817 compute_frame_duration(&num, &den, st, pc, pkt);
819 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
821 if(pkt->duration != 0 && s->packet_buffer)
822 update_initial_durations(s, st, pkt);
826 /* correct timestamps with byte offset if demuxers only have timestamps
827 on packet boundaries */
828 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
829 /* this will estimate bitrate based on this frame's duration and size */
830 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
831 if(pkt->pts != AV_NOPTS_VALUE)
833 if(pkt->dts != AV_NOPTS_VALUE)
837 if (pc && pc->dts_sync_point >= 0) {
838 // we have synchronization info from the parser
839 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
841 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
842 if (pkt->dts != AV_NOPTS_VALUE) {
843 // got DTS from the stream, update reference timestamp
844 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
845 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
846 } else if (st->reference_dts != AV_NOPTS_VALUE) {
847 // compute DTS based on reference timestamp
848 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
849 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
851 if (pc->dts_sync_point > 0)
852 st->reference_dts = pkt->dts; // new reference
856 /* This may be redundant, but it should not hurt. */
857 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
858 presentation_delayed = 1;
860 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
861 /* interpolate PTS and DTS if they are not present */
862 //We skip H264 currently because delay and has_b_frames are not reliably set
863 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
864 if (presentation_delayed) {
865 /* DTS = decompression timestamp */
866 /* PTS = presentation timestamp */
867 if (pkt->dts == AV_NOPTS_VALUE)
868 pkt->dts = st->last_IP_pts;
869 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
870 if (pkt->dts == AV_NOPTS_VALUE)
871 pkt->dts = st->cur_dts;
873 /* this is tricky: the dts must be incremented by the duration
874 of the frame we are displaying, i.e. the last I- or P-frame */
875 if (st->last_IP_duration == 0)
876 st->last_IP_duration = pkt->duration;
877 if(pkt->dts != AV_NOPTS_VALUE)
878 st->cur_dts = pkt->dts + st->last_IP_duration;
879 st->last_IP_duration = pkt->duration;
880 st->last_IP_pts= pkt->pts;
881 /* cannot compute PTS if not present (we can compute it only
882 by knowing the future */
883 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
884 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
885 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
886 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
887 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
888 pkt->pts += pkt->duration;
889 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
893 /* presentation is not delayed : PTS and DTS are the same */
894 if(pkt->pts == AV_NOPTS_VALUE)
896 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
897 if(pkt->pts == AV_NOPTS_VALUE)
898 pkt->pts = st->cur_dts;
900 if(pkt->pts != AV_NOPTS_VALUE)
901 st->cur_dts = pkt->pts + pkt->duration;
905 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
906 st->pts_buffer[0]= pkt->pts;
907 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
908 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
909 if(pkt->dts == AV_NOPTS_VALUE)
910 pkt->dts= st->pts_buffer[0];
911 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
912 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
914 if(pkt->dts > st->cur_dts)
915 st->cur_dts = pkt->dts;
918 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
921 if(is_intra_only(st->codec))
922 pkt->flags |= PKT_FLAG_KEY;
925 /* keyframe computation */
926 if (pc->key_frame == 1)
927 pkt->flags |= PKT_FLAG_KEY;
928 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
929 pkt->flags |= PKT_FLAG_KEY;
932 pkt->convergence_duration = pc->convergence_duration;
935 void av_destruct_packet_nofree(AVPacket *pkt)
937 pkt->data = NULL; pkt->size = 0;
940 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
948 /* select current input stream component */
951 if (!st->need_parsing || !st->parser) {
952 /* no parsing needed: we just output the packet as is */
953 /* raw data support */
954 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
955 compute_pkt_fields(s, st, NULL, pkt);
958 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
959 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
960 st->cur_ptr, st->cur_len,
961 st->cur_pkt.pts, st->cur_pkt.dts);
962 st->cur_pkt.pts = AV_NOPTS_VALUE;
963 st->cur_pkt.dts = AV_NOPTS_VALUE;
964 /* increment read pointer */
968 /* return packet if any */
970 pkt->pos = st->cur_pkt.pos; // Isn't quite accurate but close.
973 pkt->stream_index = st->index;
974 pkt->pts = st->parser->pts;
975 pkt->dts = st->parser->dts;
976 pkt->destruct = av_destruct_packet_nofree;
977 compute_pkt_fields(s, st, st->parser, pkt);
979 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
980 ff_reduce_index(s, st->index);
981 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
982 0, 0, AVINDEX_KEYFRAME);
989 av_free_packet(&st->cur_pkt);
994 /* read next packet */
995 ret = av_read_packet(s, &cur_pkt);
997 if (ret == AVERROR(EAGAIN))
999 /* return the last frames, if any */
1000 for(i = 0; i < s->nb_streams; i++) {
1002 if (st->parser && st->need_parsing) {
1003 av_parser_parse(st->parser, st->codec,
1004 &pkt->data, &pkt->size,
1006 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
1011 /* no more packets: really terminate parsing */
1014 st = s->streams[cur_pkt.stream_index];
1015 st->cur_pkt= cur_pkt;
1017 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1018 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1019 st->cur_pkt.pts < st->cur_pkt.dts){
1020 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1021 st->cur_pkt.stream_index,
1025 // av_free_packet(&st->cur_pkt);
1029 if(s->debug & FF_FDEBUG_TS)
1030 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1031 st->cur_pkt.stream_index,
1038 st->cur_ptr = st->cur_pkt.data;
1039 st->cur_len = st->cur_pkt.size;
1040 if (st->need_parsing && !st->parser) {
1041 st->parser = av_parser_init(st->codec->codec_id);
1043 /* no parser available: just output the raw packets */
1044 st->need_parsing = AVSTREAM_PARSE_NONE;
1045 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1046 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1048 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
1049 st->parser->next_frame_offset=
1050 st->parser->cur_offset= st->cur_pkt.pos;
1055 if(s->debug & FF_FDEBUG_TS)
1056 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
1066 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1070 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1073 pktl = s->packet_buffer;
1075 AVPacket *next_pkt= &pktl->pkt;
1077 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1078 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1079 if( pktl->pkt.stream_index == next_pkt->stream_index
1080 && next_pkt->dts < pktl->pkt.dts
1081 && pktl->pkt.pts != pktl->pkt.dts //not b frame
1082 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
1083 next_pkt->pts= pktl->pkt.dts;
1087 pktl = s->packet_buffer;
1090 if( next_pkt->pts != AV_NOPTS_VALUE
1091 || next_pkt->dts == AV_NOPTS_VALUE
1093 /* read packet from packet buffer, if there is data */
1095 s->packet_buffer = pktl->next;
1101 int ret= av_read_frame_internal(s, pkt);
1103 if(pktl && ret != AVERROR(EAGAIN)){
1110 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1111 &s->packet_buffer_end)) < 0)
1112 return AVERROR(ENOMEM);
1114 assert(!s->packet_buffer);
1115 return av_read_frame_internal(s, pkt);
1120 /* XXX: suppress the packet queue */
1121 static void flush_packet_queue(AVFormatContext *s)
1126 pktl = s->packet_buffer;
1129 s->packet_buffer = pktl->next;
1130 av_free_packet(&pktl->pkt);
1135 /*******************************************************/
1138 int av_find_default_stream_index(AVFormatContext *s)
1140 int first_audio_index = -1;
1144 if (s->nb_streams <= 0)
1146 for(i = 0; i < s->nb_streams; i++) {
1148 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1151 if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
1152 first_audio_index = i;
1154 return first_audio_index >= 0 ? first_audio_index : 0;
1158 * Flush the frame reader.
1160 static void av_read_frame_flush(AVFormatContext *s)
1165 flush_packet_queue(s);
1169 /* for each stream, reset read state */
1170 for(i = 0; i < s->nb_streams; i++) {
1174 av_parser_close(st->parser);
1176 av_free_packet(&st->cur_pkt);
1178 st->last_IP_pts = AV_NOPTS_VALUE;
1179 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1180 st->reference_dts = AV_NOPTS_VALUE;
1187 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1190 for(i = 0; i < s->nb_streams; i++) {
1191 AVStream *st = s->streams[i];
1193 st->cur_dts = av_rescale(timestamp,
1194 st->time_base.den * (int64_t)ref_st->time_base.num,
1195 st->time_base.num * (int64_t)ref_st->time_base.den);
1199 void ff_reduce_index(AVFormatContext *s, int stream_index)
1201 AVStream *st= s->streams[stream_index];
1202 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1204 if((unsigned)st->nb_index_entries >= max_entries){
1206 for(i=0; 2*i<st->nb_index_entries; i++)
1207 st->index_entries[i]= st->index_entries[2*i];
1208 st->nb_index_entries= i;
1212 int av_add_index_entry(AVStream *st,
1213 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1215 AVIndexEntry *entries, *ie;
1218 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1221 entries = av_fast_realloc(st->index_entries,
1222 &st->index_entries_allocated_size,
1223 (st->nb_index_entries + 1) *
1224 sizeof(AVIndexEntry));
1228 st->index_entries= entries;
1230 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1233 index= st->nb_index_entries++;
1234 ie= &entries[index];
1235 assert(index==0 || ie[-1].timestamp < timestamp);
1237 ie= &entries[index];
1238 if(ie->timestamp != timestamp){
1239 if(ie->timestamp <= timestamp)
1241 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1242 st->nb_index_entries++;
1243 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1244 distance= ie->min_distance;
1248 ie->timestamp = timestamp;
1249 ie->min_distance= distance;
1256 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1259 AVIndexEntry *entries= st->index_entries;
1260 int nb_entries= st->nb_index_entries;
1269 timestamp = entries[m].timestamp;
1270 if(timestamp >= wanted_timestamp)
1272 if(timestamp <= wanted_timestamp)
1275 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1277 if(!(flags & AVSEEK_FLAG_ANY)){
1278 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1279 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1290 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1291 AVInputFormat *avif= s->iformat;
1292 int64_t pos_min, pos_max, pos, pos_limit;
1293 int64_t ts_min, ts_max, ts;
1297 if (stream_index < 0)
1301 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1305 ts_min= AV_NOPTS_VALUE;
1306 pos_limit= -1; //gcc falsely says it may be uninitialized
1308 st= s->streams[stream_index];
1309 if(st->index_entries){
1312 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1313 index= FFMAX(index, 0);
1314 e= &st->index_entries[index];
1316 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1318 ts_min= e->timestamp;
1320 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1327 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1328 assert(index < st->nb_index_entries);
1330 e= &st->index_entries[index];
1331 assert(e->timestamp >= target_ts);
1333 ts_max= e->timestamp;
1334 pos_limit= pos_max - e->min_distance;
1336 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1337 pos_max,pos_limit, ts_max);
1342 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1347 url_fseek(s->pb, pos, SEEK_SET);
1349 av_update_cur_dts(s, st, ts);
1354 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1356 int64_t start_pos, filesize;
1360 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1363 if(ts_min == AV_NOPTS_VALUE){
1364 pos_min = s->data_offset;
1365 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1366 if (ts_min == AV_NOPTS_VALUE)
1370 if(ts_max == AV_NOPTS_VALUE){
1372 filesize = url_fsize(s->pb);
1373 pos_max = filesize - 1;
1376 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1378 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1379 if (ts_max == AV_NOPTS_VALUE)
1383 int64_t tmp_pos= pos_max + 1;
1384 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1385 if(tmp_ts == AV_NOPTS_VALUE)
1389 if(tmp_pos >= filesize)
1395 if(ts_min > ts_max){
1397 }else if(ts_min == ts_max){
1402 while (pos_min < pos_limit) {
1404 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1408 assert(pos_limit <= pos_max);
1411 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1412 // interpolate position (better than dichotomy)
1413 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1414 + pos_min - approximate_keyframe_distance;
1415 }else if(no_change==1){
1416 // bisection, if interpolation failed to change min or max pos last time
1417 pos = (pos_min + pos_limit)>>1;
1419 /* linear search if bisection failed, can only happen if there
1420 are very few or no keyframes between min/max */
1425 else if(pos > pos_limit)
1429 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1435 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1437 if(ts == AV_NOPTS_VALUE){
1438 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1441 assert(ts != AV_NOPTS_VALUE);
1442 if (target_ts <= ts) {
1443 pos_limit = start_pos - 1;
1447 if (target_ts >= ts) {
1453 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1454 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1457 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1459 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1460 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1461 pos, ts_min, target_ts, ts_max);
1467 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1468 int64_t pos_min, pos_max;
1472 if (stream_index < 0)
1475 st= s->streams[stream_index];
1478 pos_min = s->data_offset;
1479 pos_max = url_fsize(s->pb) - 1;
1481 if (pos < pos_min) pos= pos_min;
1482 else if(pos > pos_max) pos= pos_max;
1484 url_fseek(s->pb, pos, SEEK_SET);
1487 av_update_cur_dts(s, st, ts);
1492 static int av_seek_frame_generic(AVFormatContext *s,
1493 int stream_index, int64_t timestamp, int flags)
1499 st = s->streams[stream_index];
1501 index = av_index_search_timestamp(st, timestamp, flags);
1503 if(index < 0 || index==st->nb_index_entries-1){
1507 if(st->nb_index_entries){
1508 assert(st->index_entries);
1509 ie= &st->index_entries[st->nb_index_entries-1];
1510 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1512 av_update_cur_dts(s, st, ie->timestamp);
1514 if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
1520 ret = av_read_frame(s, &pkt);
1521 }while(ret == AVERROR(EAGAIN));
1524 av_free_packet(&pkt);
1525 if(stream_index == pkt.stream_index){
1526 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1530 index = av_index_search_timestamp(st, timestamp, flags);
1535 av_read_frame_flush(s);
1536 if (s->iformat->read_seek){
1537 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1540 ie = &st->index_entries[index];
1541 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1543 av_update_cur_dts(s, st, ie->timestamp);
1548 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1553 av_read_frame_flush(s);
1555 if(flags & AVSEEK_FLAG_BYTE)
1556 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1558 if(stream_index < 0){
1559 stream_index= av_find_default_stream_index(s);
1560 if(stream_index < 0)
1563 st= s->streams[stream_index];
1564 /* timestamp for default must be expressed in AV_TIME_BASE units */
1565 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1568 /* first, we try the format specific seek */
1569 if (s->iformat->read_seek)
1570 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1577 if(s->iformat->read_timestamp)
1578 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1580 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1583 /*******************************************************/
1586 * Returns TRUE if the stream has accurate duration in any stream.
1588 * @return TRUE if the stream has accurate duration for at least one component.
1590 static int av_has_duration(AVFormatContext *ic)
1595 for(i = 0;i < ic->nb_streams; i++) {
1596 st = ic->streams[i];
1597 if (st->duration != AV_NOPTS_VALUE)
1604 * Estimate the stream timings from the one of each components.
1606 * Also computes the global bitrate if possible.
1608 static void av_update_stream_timings(AVFormatContext *ic)
1610 int64_t start_time, start_time1, end_time, end_time1;
1611 int64_t duration, duration1;
1615 start_time = INT64_MAX;
1616 end_time = INT64_MIN;
1617 duration = INT64_MIN;
1618 for(i = 0;i < ic->nb_streams; i++) {
1619 st = ic->streams[i];
1620 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1621 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1622 if (start_time1 < start_time)
1623 start_time = start_time1;
1624 if (st->duration != AV_NOPTS_VALUE) {
1625 end_time1 = start_time1
1626 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1627 if (end_time1 > end_time)
1628 end_time = end_time1;
1631 if (st->duration != AV_NOPTS_VALUE) {
1632 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1633 if (duration1 > duration)
1634 duration = duration1;
1637 if (start_time != INT64_MAX) {
1638 ic->start_time = start_time;
1639 if (end_time != INT64_MIN) {
1640 if (end_time - start_time > duration)
1641 duration = end_time - start_time;
1644 if (duration != INT64_MIN) {
1645 ic->duration = duration;
1646 if (ic->file_size > 0) {
1647 /* compute the bitrate */
1648 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1649 (double)ic->duration;
1654 static void fill_all_stream_timings(AVFormatContext *ic)
1659 av_update_stream_timings(ic);
1660 for(i = 0;i < ic->nb_streams; i++) {
1661 st = ic->streams[i];
1662 if (st->start_time == AV_NOPTS_VALUE) {
1663 if(ic->start_time != AV_NOPTS_VALUE)
1664 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1665 if(ic->duration != AV_NOPTS_VALUE)
1666 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1671 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1673 int64_t filesize, duration;
1677 /* if bit_rate is already set, we believe it */
1678 if (ic->bit_rate == 0) {
1680 for(i=0;i<ic->nb_streams;i++) {
1681 st = ic->streams[i];
1682 bit_rate += st->codec->bit_rate;
1684 ic->bit_rate = bit_rate;
1687 /* if duration is already set, we believe it */
1688 if (ic->duration == AV_NOPTS_VALUE &&
1689 ic->bit_rate != 0 &&
1690 ic->file_size != 0) {
1691 filesize = ic->file_size;
1693 for(i = 0; i < ic->nb_streams; i++) {
1694 st = ic->streams[i];
1695 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1696 if (st->duration == AV_NOPTS_VALUE)
1697 st->duration = duration;
1703 #define DURATION_MAX_READ_SIZE 250000
1705 /* only usable for MPEG-PS streams */
1706 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1708 AVPacket pkt1, *pkt = &pkt1;
1710 int read_size, i, ret;
1712 int64_t filesize, offset, duration;
1716 /* flush packet queue */
1717 flush_packet_queue(ic);
1719 for(i=0;i<ic->nb_streams;i++) {
1720 st = ic->streams[i];
1722 av_parser_close(st->parser);
1724 av_free_packet(&st->cur_pkt);
1728 /* we read the first packets to get the first PTS (not fully
1729 accurate, but it is enough now) */
1730 url_fseek(ic->pb, 0, SEEK_SET);
1733 if (read_size >= DURATION_MAX_READ_SIZE)
1735 /* if all info is available, we can stop */
1736 for(i = 0;i < ic->nb_streams; i++) {
1737 st = ic->streams[i];
1738 if (st->start_time == AV_NOPTS_VALUE)
1741 if (i == ic->nb_streams)
1745 ret = av_read_packet(ic, pkt);
1746 }while(ret == AVERROR(EAGAIN));
1749 read_size += pkt->size;
1750 st = ic->streams[pkt->stream_index];
1751 if (pkt->pts != AV_NOPTS_VALUE) {
1752 if (st->start_time == AV_NOPTS_VALUE)
1753 st->start_time = pkt->pts;
1755 av_free_packet(pkt);
1758 /* estimate the end time (duration) */
1759 /* XXX: may need to support wrapping */
1760 filesize = ic->file_size;
1761 offset = filesize - DURATION_MAX_READ_SIZE;
1765 url_fseek(ic->pb, offset, SEEK_SET);
1768 if (read_size >= DURATION_MAX_READ_SIZE)
1772 ret = av_read_packet(ic, pkt);
1773 }while(ret == AVERROR(EAGAIN));
1776 read_size += pkt->size;
1777 st = ic->streams[pkt->stream_index];
1778 if (pkt->pts != AV_NOPTS_VALUE &&
1779 st->start_time != AV_NOPTS_VALUE) {
1780 end_time = pkt->pts;
1781 duration = end_time - st->start_time;
1783 if (st->duration == AV_NOPTS_VALUE ||
1784 st->duration < duration)
1785 st->duration = duration;
1788 av_free_packet(pkt);
1791 fill_all_stream_timings(ic);
1793 url_fseek(ic->pb, old_offset, SEEK_SET);
1794 for(i=0; i<ic->nb_streams; i++){
1796 st->cur_dts= st->first_dts;
1797 st->last_IP_pts = AV_NOPTS_VALUE;
1801 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1805 /* get the file size, if possible */
1806 if (ic->iformat->flags & AVFMT_NOFILE) {
1809 file_size = url_fsize(ic->pb);
1813 ic->file_size = file_size;
1815 if ((!strcmp(ic->iformat->name, "mpeg") ||
1816 !strcmp(ic->iformat->name, "mpegts")) &&
1817 file_size && !url_is_streamed(ic->pb)) {
1818 /* get accurate estimate from the PTSes */
1819 av_estimate_timings_from_pts(ic, old_offset);
1820 } else if (av_has_duration(ic)) {
1821 /* at least one component has timings - we use them for all
1823 fill_all_stream_timings(ic);
1825 /* less precise: use bitrate info */
1826 av_estimate_timings_from_bit_rate(ic);
1828 av_update_stream_timings(ic);
1834 for(i = 0;i < ic->nb_streams; i++) {
1835 st = ic->streams[i];
1836 printf("%d: start_time: %0.3f duration: %0.3f\n",
1837 i, (double)st->start_time / AV_TIME_BASE,
1838 (double)st->duration / AV_TIME_BASE);
1840 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1841 (double)ic->start_time / AV_TIME_BASE,
1842 (double)ic->duration / AV_TIME_BASE,
1843 ic->bit_rate / 1000);
1848 static int has_codec_parameters(AVCodecContext *enc)
1851 switch(enc->codec_type) {
1852 case CODEC_TYPE_AUDIO:
1853 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
1854 if(!enc->frame_size &&
1855 (enc->codec_id == CODEC_ID_VORBIS ||
1856 enc->codec_id == CODEC_ID_AAC))
1859 case CODEC_TYPE_VIDEO:
1860 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1866 return enc->codec_id != CODEC_ID_NONE && val != 0;
1869 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1873 int got_picture, data_size, ret=0;
1876 if(!st->codec->codec){
1877 codec = avcodec_find_decoder(st->codec->codec_id);
1880 ret = avcodec_open(st->codec, codec);
1885 if(!has_codec_parameters(st->codec)){
1886 switch(st->codec->codec_type) {
1887 case CODEC_TYPE_VIDEO:
1888 ret = avcodec_decode_video(st->codec, &picture,
1889 &got_picture, data, size);
1891 case CODEC_TYPE_AUDIO:
1892 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1893 samples = av_malloc(data_size);
1896 ret = avcodec_decode_audio2(st->codec, samples,
1897 &data_size, data, size);
1908 unsigned int codec_get_tag(const AVCodecTag *tags, int id)
1910 while (tags->id != CODEC_ID_NONE) {
1918 enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
1921 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
1922 if(tag == tags[i].tag)
1925 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
1926 if( toupper((tag >> 0)&0xFF) == toupper((tags[i].tag >> 0)&0xFF)
1927 && toupper((tag >> 8)&0xFF) == toupper((tags[i].tag >> 8)&0xFF)
1928 && toupper((tag >>16)&0xFF) == toupper((tags[i].tag >>16)&0xFF)
1929 && toupper((tag >>24)&0xFF) == toupper((tags[i].tag >>24)&0xFF))
1932 return CODEC_ID_NONE;
1935 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
1938 for(i=0; tags && tags[i]; i++){
1939 int tag= codec_get_tag(tags[i], id);
1945 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
1948 for(i=0; tags && tags[i]; i++){
1949 enum CodecID id= codec_get_id(tags[i], tag);
1950 if(id!=CODEC_ID_NONE) return id;
1952 return CODEC_ID_NONE;
1955 static void compute_chapters_end(AVFormatContext *s)
1959 for (i=0; i+1<s->nb_chapters; i++)
1960 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
1961 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
1962 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
1963 s->chapters[i]->end = s->chapters[i+1]->start;
1966 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
1967 assert(s->start_time != AV_NOPTS_VALUE);
1968 assert(s->duration > 0);
1969 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
1971 s->chapters[i]->time_base);
1975 /* absolute maximum size we read until we abort */
1976 #define MAX_READ_SIZE 5000000
1978 #define MAX_STD_TIMEBASES (60*12+5)
1979 static int get_std_framerate(int i){
1980 if(i<60*12) return i*1001;
1981 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
1985 * Is the time base unreliable.
1986 * This is a heuristic to balance between quick acceptance of the values in
1987 * the headers vs. some extra checks.
1988 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
1989 * MPEG-2 commonly misuses field repeat flags to store different framerates.
1990 * And there are "variable" fps files this needs to detect as well.
1992 static int tb_unreliable(AVCodecContext *c){
1993 if( c->time_base.den >= 101L*c->time_base.num
1994 || c->time_base.den < 5L*c->time_base.num
1995 /* || c->codec_tag == AV_RL32("DIVX")
1996 || c->codec_tag == AV_RL32("XVID")*/
1997 || c->codec_id == CODEC_ID_MPEG2VIDEO
1998 || c->codec_id == CODEC_ID_H264
2004 int av_find_stream_info(AVFormatContext *ic)
2006 int i, count, ret, read_size, j;
2008 AVPacket pkt1, *pkt;
2009 int64_t last_dts[MAX_STREAMS];
2010 int64_t duration_gcd[MAX_STREAMS]={0};
2011 int duration_count[MAX_STREAMS]={0};
2012 double (*duration_error)[MAX_STD_TIMEBASES];
2013 int64_t old_offset = url_ftell(ic->pb);
2014 int64_t codec_info_duration[MAX_STREAMS]={0};
2015 int codec_info_nb_frames[MAX_STREAMS]={0};
2017 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
2018 if (!duration_error) return AVERROR(ENOMEM);
2020 for(i=0;i<ic->nb_streams;i++) {
2021 st = ic->streams[i];
2022 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2023 /* if(!st->time_base.num)
2025 if(!st->codec->time_base.num)
2026 st->codec->time_base= st->time_base;
2028 //only for the split stuff
2030 st->parser = av_parser_init(st->codec->codec_id);
2031 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2032 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2037 for(i=0;i<MAX_STREAMS;i++){
2038 last_dts[i]= AV_NOPTS_VALUE;
2044 if(url_interrupt_cb()){
2045 ret= AVERROR(EINTR);
2049 /* check if one codec still needs to be handled */
2050 for(i=0;i<ic->nb_streams;i++) {
2051 st = ic->streams[i];
2052 if (!has_codec_parameters(st->codec))
2054 /* variable fps and no guess at the real fps */
2055 if( tb_unreliable(st->codec)
2056 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
2058 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2060 if(st->first_dts == AV_NOPTS_VALUE)
2063 if (i == ic->nb_streams) {
2064 /* NOTE: if the format has no header, then we need to read
2065 some packets to get most of the streams, so we cannot
2067 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2068 /* if we found the info for all the codecs, we can stop */
2073 /* we did not get all the codec info, but we read too much data */
2074 if (read_size >= MAX_READ_SIZE) {
2079 /* NOTE: a new stream can be added there if no header in file
2080 (AVFMTCTX_NOHEADER) */
2081 ret = av_read_frame_internal(ic, &pkt1);
2082 if(ret == AVERROR(EAGAIN))
2086 ret = -1; /* we could not have all the codec parameters before EOF */
2087 for(i=0;i<ic->nb_streams;i++) {
2088 st = ic->streams[i];
2089 if (!has_codec_parameters(st->codec)){
2091 avcodec_string(buf, sizeof(buf), st->codec, 0);
2092 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
2100 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2101 if(av_dup_packet(pkt) < 0) {
2102 av_free(duration_error);
2103 return AVERROR(ENOMEM);
2106 read_size += pkt->size;
2108 st = ic->streams[pkt->stream_index];
2109 if(codec_info_nb_frames[st->index]>1)
2110 codec_info_duration[st->index] += pkt->duration;
2111 if (pkt->duration != 0)
2112 codec_info_nb_frames[st->index]++;
2115 int index= pkt->stream_index;
2116 int64_t last= last_dts[index];
2117 int64_t duration= pkt->dts - last;
2119 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2120 double dur= duration * av_q2d(st->time_base);
2122 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2123 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2124 if(duration_count[index] < 2)
2125 memset(duration_error[index], 0, sizeof(*duration_error));
2126 for(i=1; i<MAX_STD_TIMEBASES; i++){
2127 int framerate= get_std_framerate(i);
2128 int ticks= lrintf(dur*framerate/(1001*12));
2129 double error= dur - ticks*1001*12/(double)framerate;
2130 duration_error[index][i] += error*error;
2132 duration_count[index]++;
2133 // ignore the first 4 values, they might have some random jitter
2134 if (duration_count[index] > 3)
2135 duration_gcd[index] = av_gcd(duration_gcd[index], duration);
2137 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
2138 last_dts[pkt->stream_index]= pkt->dts;
2140 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2141 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2143 st->codec->extradata_size= i;
2144 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2145 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2146 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2150 /* if still no information, we try to open the codec and to
2151 decompress the frame. We try to avoid that in most cases as
2152 it takes longer and uses more memory. For MPEG-4, we need to
2153 decompress for QuickTime. */
2154 if (!has_codec_parameters(st->codec) /*&&
2155 (st->codec->codec_id == CODEC_ID_FLV1 ||
2156 st->codec->codec_id == CODEC_ID_H264 ||
2157 st->codec->codec_id == CODEC_ID_H263 ||
2158 st->codec->codec_id == CODEC_ID_H261 ||
2159 st->codec->codec_id == CODEC_ID_VORBIS ||
2160 st->codec->codec_id == CODEC_ID_MJPEG ||
2161 st->codec->codec_id == CODEC_ID_PNG ||
2162 st->codec->codec_id == CODEC_ID_PAM ||
2163 st->codec->codec_id == CODEC_ID_PGM ||
2164 st->codec->codec_id == CODEC_ID_PGMYUV ||
2165 st->codec->codec_id == CODEC_ID_PBM ||
2166 st->codec->codec_id == CODEC_ID_PPM ||
2167 st->codec->codec_id == CODEC_ID_SHORTEN ||
2168 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
2169 try_decode_frame(st, pkt->data, pkt->size);
2171 if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2177 // close codecs which were opened in try_decode_frame()
2178 for(i=0;i<ic->nb_streams;i++) {
2179 st = ic->streams[i];
2180 if(st->codec->codec)
2181 avcodec_close(st->codec);
2183 for(i=0;i<ic->nb_streams;i++) {
2184 st = ic->streams[i];
2185 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2186 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2187 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2189 // the check for tb_unreliable() is not completely correct, since this is not about handling
2190 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2191 // ipmovie.c produces.
2192 if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
2193 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
2194 if(duration_count[i]
2195 && tb_unreliable(st->codec) /*&&
2196 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2197 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
2199 double best_error= 2*av_q2d(st->time_base);
2200 best_error= best_error*best_error*duration_count[i]*1000*12*30;
2202 for(j=1; j<MAX_STD_TIMEBASES; j++){
2203 double error= duration_error[i][j] * get_std_framerate(j);
2204 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
2205 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2206 if(error < best_error){
2208 num = get_std_framerate(j);
2211 // do not increase frame rate by more than 1 % in order to match a standard rate.
2212 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2213 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2216 if (!st->r_frame_rate.num){
2217 if( st->codec->time_base.den * (int64_t)st->time_base.num
2218 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2219 st->r_frame_rate.num = st->codec->time_base.den;
2220 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2222 st->r_frame_rate.num = st->time_base.den;
2223 st->r_frame_rate.den = st->time_base.num;
2226 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
2227 if(!st->codec->bits_per_coded_sample)
2228 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2232 av_estimate_timings(ic, old_offset);
2234 compute_chapters_end(ic);
2237 /* correct DTS for B-frame streams with no timestamps */
2238 for(i=0;i<ic->nb_streams;i++) {
2239 st = ic->streams[i];
2240 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
2242 ppktl = &ic->packet_buffer;
2244 if(ppkt1->stream_index != i)
2246 if(ppkt1->pkt->dts < 0)
2248 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2250 ppkt1->pkt->dts -= delta;
2255 st->cur_dts -= delta;
2261 av_free(duration_error);
2266 /*******************************************************/
2268 int av_read_play(AVFormatContext *s)
2270 if (s->iformat->read_play)
2271 return s->iformat->read_play(s);
2273 return av_url_read_fpause(s->pb, 0);
2274 return AVERROR(ENOSYS);
2277 int av_read_pause(AVFormatContext *s)
2279 if (s->iformat->read_pause)
2280 return s->iformat->read_pause(s);
2282 return av_url_read_fpause(s->pb, 1);
2283 return AVERROR(ENOSYS);
2286 void av_close_input_stream(AVFormatContext *s)
2291 if (s->iformat->read_close)
2292 s->iformat->read_close(s);
2293 for(i=0;i<s->nb_streams;i++) {
2294 /* free all data in a stream component */
2297 av_parser_close(st->parser);
2298 av_free_packet(&st->cur_pkt);
2300 av_metadata_free(&st->metadata);
2301 av_free(st->index_entries);
2302 av_free(st->codec->extradata);
2304 av_free(st->filename);
2305 av_free(st->priv_data);
2308 for(i=s->nb_programs-1; i>=0; i--) {
2309 av_freep(&s->programs[i]->provider_name);
2310 av_freep(&s->programs[i]->name);
2311 av_metadata_free(&s->programs[i]->metadata);
2312 av_freep(&s->programs[i]->stream_index);
2313 av_freep(&s->programs[i]);
2315 av_freep(&s->programs);
2316 flush_packet_queue(s);
2317 av_freep(&s->priv_data);
2318 while(s->nb_chapters--) {
2319 av_free(s->chapters[s->nb_chapters]->title);
2320 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2321 av_free(s->chapters[s->nb_chapters]);
2323 av_freep(&s->chapters);
2324 av_metadata_free(&s->metadata);
2328 void av_close_input_file(AVFormatContext *s)
2330 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2331 av_close_input_stream(s);
2336 AVStream *av_new_stream(AVFormatContext *s, int id)
2341 if (s->nb_streams >= MAX_STREAMS)
2344 st = av_mallocz(sizeof(AVStream));
2348 st->codec= avcodec_alloc_context();
2350 /* no default bitrate if decoding */
2351 st->codec->bit_rate = 0;
2353 st->index = s->nb_streams;
2355 st->start_time = AV_NOPTS_VALUE;
2356 st->duration = AV_NOPTS_VALUE;
2357 /* we set the current DTS to 0 so that formats without any timestamps
2358 but durations get some timestamps, formats with some unknown
2359 timestamps have their first few packets buffered and the
2360 timestamps corrected before they are returned to the user */
2362 st->first_dts = AV_NOPTS_VALUE;
2364 /* default pts setting is MPEG-like */
2365 av_set_pts_info(st, 33, 1, 90000);
2366 st->last_IP_pts = AV_NOPTS_VALUE;
2367 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2368 st->pts_buffer[i]= AV_NOPTS_VALUE;
2369 st->reference_dts = AV_NOPTS_VALUE;
2371 st->sample_aspect_ratio = (AVRational){0,1};
2373 s->streams[s->nb_streams++] = st;
2377 AVProgram *av_new_program(AVFormatContext *ac, int id)
2379 AVProgram *program=NULL;
2383 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2386 for(i=0; i<ac->nb_programs; i++)
2387 if(ac->programs[i]->id == id)
2388 program = ac->programs[i];
2391 program = av_mallocz(sizeof(AVProgram));
2394 dynarray_add(&ac->programs, &ac->nb_programs, program);
2395 program->discard = AVDISCARD_NONE;
2402 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2404 AVChapter *chapter = NULL;
2407 for(i=0; i<s->nb_chapters; i++)
2408 if(s->chapters[i]->id == id)
2409 chapter = s->chapters[i];
2412 chapter= av_mallocz(sizeof(AVChapter));
2415 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2417 av_free(chapter->title);
2418 av_metadata_set(&chapter->metadata, "title", title);
2420 chapter->time_base= time_base;
2421 chapter->start = start;
2427 /************************************************************/
2428 /* output media file */
2430 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2434 if (s->oformat->priv_data_size > 0) {
2435 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2437 return AVERROR(ENOMEM);
2439 s->priv_data = NULL;
2441 if (s->oformat->set_parameters) {
2442 ret = s->oformat->set_parameters(s, ap);
2449 int av_write_header(AVFormatContext *s)
2454 // some sanity checks
2455 for(i=0;i<s->nb_streams;i++) {
2458 switch (st->codec->codec_type) {
2459 case CODEC_TYPE_AUDIO:
2460 if(st->codec->sample_rate<=0){
2461 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2464 if(!st->codec->block_align)
2465 st->codec->block_align = st->codec->channels *
2466 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2468 case CODEC_TYPE_VIDEO:
2469 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2470 av_log(s, AV_LOG_ERROR, "time base not set\n");
2473 if(st->codec->width<=0 || st->codec->height<=0){
2474 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2477 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2478 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2484 if(s->oformat->codec_tag){
2485 if(st->codec->codec_tag){
2487 //check that tag + id is in the table
2488 //if neither is in the table -> OK
2489 //if tag is in the table with another id -> FAIL
2490 //if id is in the table with another tag -> FAIL unless strict < ?
2492 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2495 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2496 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2497 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2500 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2501 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2503 return AVERROR(ENOMEM);
2506 #if LIBAVFORMAT_VERSION_MAJOR < 53
2507 ff_metadata_mux_compat(s);
2510 if(s->oformat->write_header){
2511 ret = s->oformat->write_header(s);
2516 /* init PTS generation */
2517 for(i=0;i<s->nb_streams;i++) {
2518 int64_t den = AV_NOPTS_VALUE;
2521 switch (st->codec->codec_type) {
2522 case CODEC_TYPE_AUDIO:
2523 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2525 case CODEC_TYPE_VIDEO:
2526 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2531 if (den != AV_NOPTS_VALUE) {
2533 return AVERROR_INVALIDDATA;
2534 av_frac_init(&st->pts, 0, 0, den);
2540 //FIXME merge with compute_pkt_fields
2541 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2542 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2543 int num, den, frame_size, i;
2545 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2547 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2550 /* duration field */
2551 if (pkt->duration == 0) {
2552 compute_frame_duration(&num, &den, st, NULL, pkt);
2554 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2558 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2561 //XXX/FIXME this is a temporary hack until all encoders output pts
2562 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2564 // pkt->pts= st->cur_dts;
2565 pkt->pts= st->pts.val;
2568 //calculate dts from pts
2569 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2570 st->pts_buffer[0]= pkt->pts;
2571 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2572 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2573 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2574 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2576 pkt->dts= st->pts_buffer[0];
2579 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2580 av_log(st->codec, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2583 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2584 av_log(st->codec, AV_LOG_ERROR, "error, pts < dts\n");
2588 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2589 st->cur_dts= pkt->dts;
2590 st->pts.val= pkt->dts;
2593 switch (st->codec->codec_type) {
2594 case CODEC_TYPE_AUDIO:
2595 frame_size = get_audio_frame_size(st->codec, pkt->size);
2597 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2598 likely equal to the encoder delay, but it would be better if we
2599 had the real timestamps from the encoder */
2600 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2601 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2604 case CODEC_TYPE_VIDEO:
2605 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2613 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2615 int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2617 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2620 ret= s->oformat->write_packet(s, pkt);
2622 ret= url_ferror(s->pb);
2626 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2627 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2629 AVPacketList **next_point, *this_pktl;
2631 this_pktl = av_mallocz(sizeof(AVPacketList));
2632 this_pktl->pkt= *pkt;
2633 if(pkt->destruct == av_destruct_packet)
2634 pkt->destruct= NULL; // not shared -> must keep original from being freed
2636 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2638 next_point = &s->packet_buffer;
2640 if(compare(s, &(*next_point)->pkt, pkt))
2642 next_point= &(*next_point)->next;
2644 this_pktl->next= *next_point;
2645 *next_point= this_pktl;
2648 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2650 AVStream *st = s->streams[ pkt ->stream_index];
2651 AVStream *st2= s->streams[ next->stream_index];
2652 int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
2653 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2655 if (pkt->dts == AV_NOPTS_VALUE)
2658 return next->dts * left > pkt->dts * right; //FIXME this can overflow
2661 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2664 int streams[MAX_STREAMS];
2667 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2670 memset(streams, 0, sizeof(streams));
2671 pktl= s->packet_buffer;
2673 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2674 if(streams[ pktl->pkt.stream_index ] == 0)
2676 streams[ pktl->pkt.stream_index ]++;
2680 if(stream_count && (s->nb_streams == stream_count || flush)){
2681 pktl= s->packet_buffer;
2684 s->packet_buffer= pktl->next;
2688 av_init_packet(out);
2694 * Interleaves an AVPacket correctly so it can be muxed.
2695 * @param out the interleaved packet will be output here
2696 * @param in the input packet
2697 * @param flush 1 if no further packets are available as input and all
2698 * remaining packets should be output
2699 * @return 1 if a packet was output, 0 if no packet could be output,
2700 * < 0 if an error occurred
2702 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2703 if(s->oformat->interleave_packet)
2704 return s->oformat->interleave_packet(s, out, in, flush);
2706 return av_interleave_packet_per_dts(s, out, in, flush);
2709 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2710 AVStream *st= s->streams[ pkt->stream_index];
2712 //FIXME/XXX/HACK drop zero sized packets
2713 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2716 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2717 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2720 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2725 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2726 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2729 ret= s->oformat->write_packet(s, &opkt);
2731 av_free_packet(&opkt);
2736 if(url_ferror(s->pb))
2737 return url_ferror(s->pb);
2741 int av_write_trailer(AVFormatContext *s)
2747 ret= av_interleave_packet(s, &pkt, NULL, 1);
2748 if(ret<0) //FIXME cleanup needed for ret<0 ?
2753 ret= s->oformat->write_packet(s, &pkt);
2755 av_free_packet(&pkt);
2759 if(url_ferror(s->pb))
2763 if(s->oformat->write_trailer)
2764 ret = s->oformat->write_trailer(s);
2767 ret=url_ferror(s->pb);
2768 for(i=0;i<s->nb_streams;i++)
2769 av_freep(&s->streams[i]->priv_data);
2770 av_freep(&s->priv_data);
2774 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2777 AVProgram *program=NULL;
2780 for(i=0; i<ac->nb_programs; i++){
2781 if(ac->programs[i]->id != progid)
2783 program = ac->programs[i];
2784 for(j=0; j<program->nb_stream_indexes; j++)
2785 if(program->stream_index[j] == idx)
2788 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
2791 program->stream_index = tmp;
2792 program->stream_index[program->nb_stream_indexes++] = idx;
2797 static void print_fps(double d, const char *postfix){
2798 uint64_t v= lrintf(d*100);
2799 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2800 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2801 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2804 /* "user interface" functions */
2805 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2808 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2809 AVStream *st = ic->streams[i];
2810 int g = av_gcd(st->time_base.num, st->time_base.den);
2811 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
2812 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2813 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2814 /* the pid is an important information, so we display it */
2815 /* XXX: add a generic system */
2816 if (flags & AVFMT_SHOW_IDS)
2817 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2819 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2820 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2821 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2822 if (st->sample_aspect_ratio.num && // default
2823 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2824 AVRational display_aspect_ratio;
2825 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2826 st->codec->width*st->sample_aspect_ratio.num,
2827 st->codec->height*st->sample_aspect_ratio.den,
2829 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2830 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2831 display_aspect_ratio.num, display_aspect_ratio.den);
2833 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2834 if(st->r_frame_rate.den && st->r_frame_rate.num)
2835 print_fps(av_q2d(st->r_frame_rate), "tbr");
2836 if(st->time_base.den && st->time_base.num)
2837 print_fps(1/av_q2d(st->time_base), "tbn");
2838 if(st->codec->time_base.den && st->codec->time_base.num)
2839 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2841 av_log(NULL, AV_LOG_INFO, "\n");
2844 void dump_format(AVFormatContext *ic,
2851 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2852 is_output ? "Output" : "Input",
2854 is_output ? ic->oformat->name : ic->iformat->name,
2855 is_output ? "to" : "from", url);
2857 av_log(NULL, AV_LOG_INFO, " Duration: ");
2858 if (ic->duration != AV_NOPTS_VALUE) {
2859 int hours, mins, secs, us;
2860 secs = ic->duration / AV_TIME_BASE;
2861 us = ic->duration % AV_TIME_BASE;
2866 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2867 (100 * us) / AV_TIME_BASE);
2869 av_log(NULL, AV_LOG_INFO, "N/A");
2871 if (ic->start_time != AV_NOPTS_VALUE) {
2873 av_log(NULL, AV_LOG_INFO, ", start: ");
2874 secs = ic->start_time / AV_TIME_BASE;
2875 us = ic->start_time % AV_TIME_BASE;
2876 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2877 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2879 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2881 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2883 av_log(NULL, AV_LOG_INFO, "N/A");
2885 av_log(NULL, AV_LOG_INFO, "\n");
2887 if(ic->nb_programs) {
2889 for(j=0; j<ic->nb_programs; j++) {
2890 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
2892 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2893 name ? name->value : "");
2894 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
2895 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2898 for(i=0;i<ic->nb_streams;i++)
2899 dump_stream_format(ic, i, index, is_output);
2902 #if LIBAVFORMAT_VERSION_MAJOR < 53
2903 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2905 return av_parse_video_frame_size(width_ptr, height_ptr, str);
2908 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
2910 AVRational frame_rate;
2911 int ret = av_parse_video_frame_rate(&frame_rate, arg);
2912 *frame_rate_num= frame_rate.num;
2913 *frame_rate_den= frame_rate.den;
2918 int64_t av_gettime(void)
2921 gettimeofday(&tv,NULL);
2922 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
2925 int64_t parse_date(const char *datestr, int duration)
2931 static const char * const date_fmt[] = {
2935 static const char * const time_fmt[] = {
2945 time_t now = time(0);
2947 len = strlen(datestr);
2949 lastch = datestr[len - 1];
2952 is_utc = (lastch == 'z' || lastch == 'Z');
2954 memset(&dt, 0, sizeof(dt));
2959 if (!strncasecmp(datestr, "now", len))
2960 return (int64_t) now * 1000000;
2962 /* parse the year-month-day part */
2963 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
2964 q = small_strptime(p, date_fmt[i], &dt);
2970 /* if the year-month-day part is missing, then take the
2971 * current year-month-day time */
2976 dt = *localtime(&now);
2978 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2983 if (*p == 'T' || *p == 't' || *p == ' ')
2986 /* parse the hour-minute-second part */
2987 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
2988 q = small_strptime(p, time_fmt[i], &dt);
2994 /* parse datestr as a duration */
2999 /* parse datestr as HH:MM:SS */
3000 q = small_strptime(p, time_fmt[0], &dt);
3002 /* parse datestr as S+ */
3003 dt.tm_sec = strtol(p, (char **)&q, 10);
3005 /* the parsing didn't succeed */
3012 /* Now we have all the fields that we can get */
3018 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3020 dt.tm_isdst = -1; /* unknown */
3030 /* parse the .m... part */
3034 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3037 val += n * (*q - '0');
3041 return negative ? -t : t;
3044 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3054 while (*p != '\0' && *p != '=' && *p != '&') {
3055 if ((q - tag) < sizeof(tag) - 1)
3063 while (*p != '&' && *p != '\0') {
3064 if ((q - arg) < arg_size - 1) {
3074 if (!strcmp(tag, tag1))
3083 int av_get_frame_filename(char *buf, int buf_size,
3084 const char *path, int number)
3087 char *q, buf1[20], c;
3088 int nd, len, percentd_found;
3100 while (isdigit(*p)) {
3101 nd = nd * 10 + *p++ - '0';
3104 } while (isdigit(c));
3113 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3115 if ((q - buf + len) > buf_size - 1)
3117 memcpy(q, buf1, len);
3125 if ((q - buf) < buf_size - 1)
3129 if (!percentd_found)
3138 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3141 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3143 for(i=0;i<size;i+=16) {
3150 PRINT(" %02x", buf[i+j]);
3155 for(j=0;j<len;j++) {
3157 if (c < ' ' || c > '~')
3166 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3168 hex_dump_internal(NULL, f, 0, buf, size);
3171 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3173 hex_dump_internal(avcl, NULL, level, buf, size);
3176 //FIXME needs to know the time_base
3177 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3179 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3180 PRINT("stream #%d:\n", pkt->stream_index);
3181 PRINT(" keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
3182 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3183 /* DTS is _always_ valid after av_read_frame() */
3185 if (pkt->dts == AV_NOPTS_VALUE)
3188 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3189 /* PTS may not be known if B-frames are present. */
3191 if (pkt->pts == AV_NOPTS_VALUE)
3194 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3196 PRINT(" size=%d\n", pkt->size);
3199 av_hex_dump(f, pkt->data, pkt->size);
3202 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3204 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3207 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3209 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3212 void url_split(char *proto, int proto_size,
3213 char *authorization, int authorization_size,
3214 char *hostname, int hostname_size,
3216 char *path, int path_size,
3219 const char *p, *ls, *at, *col, *brk;
3221 if (port_ptr) *port_ptr = -1;
3222 if (proto_size > 0) proto[0] = 0;
3223 if (authorization_size > 0) authorization[0] = 0;
3224 if (hostname_size > 0) hostname[0] = 0;
3225 if (path_size > 0) path[0] = 0;
3227 /* parse protocol */
3228 if ((p = strchr(url, ':'))) {
3229 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3234 /* no protocol means plain filename */
3235 av_strlcpy(path, url, path_size);
3239 /* separate path from hostname */
3240 ls = strchr(p, '/');
3242 ls = strchr(p, '?');
3244 av_strlcpy(path, ls, path_size);
3246 ls = &p[strlen(p)]; // XXX
3248 /* the rest is hostname, use that to parse auth/port */
3250 /* authorization (user[:pass]@hostname) */
3251 if ((at = strchr(p, '@')) && at < ls) {
3252 av_strlcpy(authorization, p,
3253 FFMIN(authorization_size, at + 1 - p));
3254 p = at + 1; /* skip '@' */
3257 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3259 av_strlcpy(hostname, p + 1,
3260 FFMIN(hostname_size, brk - p));
3261 if (brk[1] == ':' && port_ptr)
3262 *port_ptr = atoi(brk + 2);
3263 } else if ((col = strchr(p, ':')) && col < ls) {
3264 av_strlcpy(hostname, p,
3265 FFMIN(col + 1 - p, hostname_size));
3266 if (port_ptr) *port_ptr = atoi(col + 1);
3268 av_strlcpy(hostname, p,
3269 FFMIN(ls + 1 - p, hostname_size));
3273 char *ff_data_to_hex(char *buff, const uint8_t *src, int s)
3276 static const char hex_table[16] = { '0', '1', '2', '3',
3279 'C', 'D', 'E', 'F' };
3281 for(i = 0; i < s; i++) {
3282 buff[i * 2] = hex_table[src[i] >> 4];
3283 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3289 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3290 int pts_num, int pts_den)
3292 unsigned int gcd= av_gcd(pts_num, pts_den);
3293 s->pts_wrap_bits = pts_wrap_bits;
3294 s->time_base.num = pts_num/gcd;
3295 s->time_base.den = pts_den/gcd;
3298 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);