2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "allformats.h"
29 * @file libavformat/utils.c
30 * Various utility functions for using ffmpeg library.
33 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
34 static void av_frac_add(AVFrac *f, int64_t incr);
36 /** head of registered input format linked list. */
37 AVInputFormat *first_iformat = NULL;
38 /** head of registered output format linked list. */
39 AVOutputFormat *first_oformat = NULL;
41 void av_register_input_format(AVInputFormat *format)
45 while (*p != NULL) p = &(*p)->next;
50 void av_register_output_format(AVOutputFormat *format)
54 while (*p != NULL) p = &(*p)->next;
59 int match_ext(const char *filename, const char *extensions)
67 ext = strrchr(filename, '.');
73 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
76 if (!strcasecmp(ext1, ext))
86 AVOutputFormat *guess_format(const char *short_name, const char *filename,
87 const char *mime_type)
89 AVOutputFormat *fmt, *fmt_found;
92 /* specific test for image sequences */
93 #ifdef CONFIG_IMAGE2_MUXER
94 if (!short_name && filename &&
95 av_filename_number_test(filename) &&
96 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
97 return guess_format("image2", NULL, NULL);
100 /* find the proper file type */
104 while (fmt != NULL) {
106 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
108 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
110 if (filename && fmt->extensions &&
111 match_ext(filename, fmt->extensions)) {
114 if (score > score_max) {
123 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
124 const char *mime_type)
126 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
129 AVOutputFormat *stream_fmt;
130 char stream_format_name[64];
132 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
133 stream_fmt = guess_format(stream_format_name, NULL, NULL);
142 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
143 const char *filename, const char *mime_type, enum CodecType type){
144 if(type == CODEC_TYPE_VIDEO){
145 enum CodecID codec_id= CODEC_ID_NONE;
147 #ifdef CONFIG_IMAGE2_MUXER
148 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
149 codec_id= av_guess_image2_codec(filename);
152 if(codec_id == CODEC_ID_NONE)
153 codec_id= fmt->video_codec;
155 }else if(type == CODEC_TYPE_AUDIO)
156 return fmt->audio_codec;
158 return CODEC_ID_NONE;
161 AVInputFormat *av_find_input_format(const char *short_name)
164 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
165 if (!strcmp(fmt->name, short_name))
171 /* memory handling */
173 void av_destruct_packet(AVPacket *pkt)
176 pkt->data = NULL; pkt->size = 0;
179 int av_new_packet(AVPacket *pkt, int size)
182 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
183 return AVERROR_NOMEM;
184 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
186 return AVERROR_NOMEM;
187 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
192 pkt->destruct = av_destruct_packet;
196 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
198 int ret= av_new_packet(pkt, size);
203 pkt->pos= url_ftell(s);
205 ret= get_buffer(s, pkt->data, size);
214 int av_dup_packet(AVPacket *pkt)
216 if (pkt->destruct != av_destruct_packet) {
218 /* we duplicate the packet and don't forget to put the padding
220 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
221 return AVERROR_NOMEM;
222 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
224 return AVERROR_NOMEM;
226 memcpy(data, pkt->data, pkt->size);
227 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
229 pkt->destruct = av_destruct_packet;
234 int av_filename_number_test(const char *filename)
237 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
240 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
242 AVInputFormat *fmt1, *fmt;
243 int score, score_max;
247 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
248 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
251 if (fmt1->read_probe) {
252 score = fmt1->read_probe(pd);
253 } else if (fmt1->extensions) {
254 if (match_ext(pd->filename, fmt1->extensions)) {
258 if (score > score_max) {
266 /************************************************************/
267 /* input media file */
270 * Open a media file from an IO stream. 'fmt' must be specified.
272 static const char* format_to_name(void* ptr)
274 AVFormatContext* fc = (AVFormatContext*) ptr;
275 if(fc->iformat) return fc->iformat->name;
276 else if(fc->oformat) return fc->oformat->name;
280 #define OFFSET(x) offsetof(AVFormatContext,x)
281 #define DEFAULT 0 //should be NAN but it doesnt work as its not a constant in glibc as required by ANSI/ISO C
282 //these names are too long to be readable
283 #define E AV_OPT_FLAG_ENCODING_PARAM
284 #define D AV_OPT_FLAG_DECODING_PARAM
286 static const AVOption options[]={
287 {"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
288 {"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
289 {"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
290 {"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
291 {"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
292 {"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
293 {"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
294 {"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
295 {"analyzeduration", NULL, OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
303 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
305 static void avformat_get_context_defaults(AVFormatContext *s)
307 memset(s, 0, sizeof(AVFormatContext));
309 s->av_class = &av_format_context_class;
311 av_opt_set_defaults(s);
314 AVFormatContext *av_alloc_format_context(void)
317 ic = av_malloc(sizeof(AVFormatContext));
319 avformat_get_context_defaults(ic);
320 ic->av_class = &av_format_context_class;
324 int av_open_input_stream(AVFormatContext **ic_ptr,
325 ByteIOContext *pb, const char *filename,
326 AVInputFormat *fmt, AVFormatParameters *ap)
330 AVFormatParameters default_ap;
334 memset(ap, 0, sizeof(default_ap));
337 if(!ap->prealloced_context)
338 ic = av_alloc_format_context();
348 ic->duration = AV_NOPTS_VALUE;
349 ic->start_time = AV_NOPTS_VALUE;
350 pstrcpy(ic->filename, sizeof(ic->filename), filename);
352 /* allocate private data */
353 if (fmt->priv_data_size > 0) {
354 ic->priv_data = av_mallocz(fmt->priv_data_size);
355 if (!ic->priv_data) {
360 ic->priv_data = NULL;
363 err = ic->iformat->read_header(ic, ap);
367 if (pb && !ic->data_offset)
368 ic->data_offset = url_ftell(&ic->pb);
374 av_freep(&ic->priv_data);
381 /** Size of probe buffer, for guessing file type from file contents. */
382 #define PROBE_BUF_MIN 2048
383 #define PROBE_BUF_MAX (1<<20)
385 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
388 AVFormatParameters *ap)
390 int err, must_open_file, file_opened, probe_size;
391 AVProbeData probe_data, *pd = &probe_data;
392 ByteIOContext pb1, *pb = &pb1;
397 pd->filename = filename;
402 /* guess format if no file can be opened */
403 fmt = av_probe_input_format(pd, 0);
406 /* do not open file if the format does not need it. XXX: specific
407 hack needed to handle RTSP/TCP */
409 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
411 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
414 if (!fmt || must_open_file) {
415 /* if no file needed do not try to open one */
416 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
422 url_setbufsize(pb, buf_size);
425 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
426 /* read probe data */
427 pd->buf= av_realloc(pd->buf, probe_size);
428 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
429 if (url_fseek(pb, 0, SEEK_SET) < 0) {
431 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
437 /* guess file format */
438 fmt = av_probe_input_format(pd, 1);
443 /* if still no format found, error */
449 /* XXX: suppress this hack for redirectors */
450 #ifdef CONFIG_NETWORK
451 if (fmt == &redir_demuxer) {
452 err = redir_open(ic_ptr, pb);
458 /* check filename in case of an image number is expected */
459 if (fmt->flags & AVFMT_NEEDNUMBER) {
460 if (!av_filename_number_test(filename)) {
461 err = AVERROR_NUMEXPECTED;
465 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
478 /*******************************************************/
480 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
482 return s->iformat->read_packet(s, pkt);
485 /**********************************************************/
488 * Get the number of samples of an audio frame. Return (-1) if error.
490 static int get_audio_frame_size(AVCodecContext *enc, int size)
494 if (enc->frame_size <= 1) {
495 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
497 if (bits_per_sample) {
498 if (enc->channels == 0)
500 frame_size = (size << 3) / (bits_per_sample * enc->channels);
502 /* used for example by ADPCM codecs */
503 if (enc->bit_rate == 0)
505 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
508 frame_size = enc->frame_size;
515 * Return the frame duration in seconds, return 0 if not available.
517 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
518 AVCodecParserContext *pc, AVPacket *pkt)
524 switch(st->codec->codec_type) {
525 case CODEC_TYPE_VIDEO:
526 if(st->time_base.num*1000LL > st->time_base.den){
527 *pnum = st->time_base.num;
528 *pden = st->time_base.den;
529 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
530 *pnum = st->codec->time_base.num;
531 *pden = st->codec->time_base.den;
532 if (pc && pc->repeat_pict) {
534 *pnum = (*pnum) * (2 + pc->repeat_pict);
538 case CODEC_TYPE_AUDIO:
539 frame_size = get_audio_frame_size(st->codec, pkt->size);
543 *pden = st->codec->sample_rate;
550 static int is_intra_only(AVCodecContext *enc){
551 if(enc->codec_type == CODEC_TYPE_AUDIO){
553 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
554 switch(enc->codec_id){
556 case CODEC_ID_MJPEGB:
558 case CODEC_ID_RAWVIDEO:
559 case CODEC_ID_DVVIDEO:
560 case CODEC_ID_HUFFYUV:
561 case CODEC_ID_FFVHUFF:
572 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
573 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
574 int64_t delta= last_ts - mask/2;
575 return ((lsb - delta)&mask) + delta;
578 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
579 AVCodecParserContext *pc, AVPacket *pkt)
581 int num, den, presentation_delayed;
582 /* handle wrapping */
583 if(st->cur_dts != AV_NOPTS_VALUE){
584 if(pkt->pts != AV_NOPTS_VALUE)
585 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
586 if(pkt->dts != AV_NOPTS_VALUE)
587 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
590 if (pkt->duration == 0) {
591 compute_frame_duration(&num, &den, st, pc, pkt);
593 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
597 if(is_intra_only(st->codec))
598 pkt->flags |= PKT_FLAG_KEY;
600 /* do we have a video B frame ? */
601 presentation_delayed = 0;
602 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
603 /* XXX: need has_b_frame, but cannot get it if the codec is
605 if (( st->codec->codec_id == CODEC_ID_H264
606 || st->codec->has_b_frames) &&
607 pc && pc->pict_type != FF_B_TYPE)
608 presentation_delayed = 1;
609 /* this may be redundant, but it shouldnt hurt */
610 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
611 presentation_delayed = 1;
614 if(st->cur_dts == AV_NOPTS_VALUE){
615 if(presentation_delayed) st->cur_dts = -pkt->duration;
616 else st->cur_dts = 0;
619 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
620 /* interpolate PTS and DTS if they are not present */
621 if (presentation_delayed) {
622 /* DTS = decompression time stamp */
623 /* PTS = presentation time stamp */
624 if (pkt->dts == AV_NOPTS_VALUE) {
625 /* if we know the last pts, use it */
626 if(st->last_IP_pts != AV_NOPTS_VALUE)
627 st->cur_dts = pkt->dts = st->last_IP_pts;
629 pkt->dts = st->cur_dts;
631 st->cur_dts = pkt->dts;
633 /* this is tricky: the dts must be incremented by the duration
634 of the frame we are displaying, i.e. the last I or P frame */
635 if (st->last_IP_duration == 0)
636 st->cur_dts += pkt->duration;
638 st->cur_dts += st->last_IP_duration;
639 st->last_IP_duration = pkt->duration;
640 st->last_IP_pts= pkt->pts;
641 /* cannot compute PTS if not present (we can compute it only
642 by knowing the futur */
643 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
644 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
645 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
646 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
647 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
648 pkt->pts += pkt->duration;
649 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
653 /* presentation is not delayed : PTS and DTS are the same */
654 if (pkt->pts == AV_NOPTS_VALUE) {
655 if (pkt->dts == AV_NOPTS_VALUE) {
656 pkt->pts = st->cur_dts;
657 pkt->dts = st->cur_dts;
660 st->cur_dts = pkt->dts;
664 st->cur_dts = pkt->pts;
667 st->cur_dts += pkt->duration;
669 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
674 /* key frame computation */
675 switch(st->codec->codec_type) {
676 case CODEC_TYPE_VIDEO:
677 if (pc->pict_type == FF_I_TYPE)
678 pkt->flags |= PKT_FLAG_KEY;
680 case CODEC_TYPE_AUDIO:
681 pkt->flags |= PKT_FLAG_KEY;
689 void av_destruct_packet_nofree(AVPacket *pkt)
691 pkt->data = NULL; pkt->size = 0;
694 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
700 /* select current input stream component */
703 if (!st->need_parsing || !st->parser) {
704 /* no parsing needed: we just output the packet as is */
705 /* raw data support */
707 compute_pkt_fields(s, st, NULL, pkt);
710 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
711 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
712 s->cur_ptr, s->cur_len,
713 s->cur_pkt.pts, s->cur_pkt.dts);
714 s->cur_pkt.pts = AV_NOPTS_VALUE;
715 s->cur_pkt.dts = AV_NOPTS_VALUE;
716 /* increment read pointer */
720 /* return packet if any */
724 pkt->stream_index = st->index;
725 pkt->pts = st->parser->pts;
726 pkt->dts = st->parser->dts;
727 pkt->destruct = av_destruct_packet_nofree;
728 compute_pkt_fields(s, st, st->parser, pkt);
730 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
731 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
732 0, 0, AVINDEX_KEYFRAME);
739 av_free_packet(&s->cur_pkt);
743 /* read next packet */
744 ret = av_read_packet(s, &s->cur_pkt);
746 if (ret == AVERROR(EAGAIN))
748 /* return the last frames, if any */
749 for(i = 0; i < s->nb_streams; i++) {
751 if (st->parser && st->need_parsing) {
752 av_parser_parse(st->parser, st->codec,
753 &pkt->data, &pkt->size,
755 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
760 /* no more packets: really terminates parsing */
764 st = s->streams[s->cur_pkt.stream_index];
765 if(st->codec->debug & FF_DEBUG_PTS)
766 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
767 s->cur_pkt.stream_index,
773 s->cur_ptr = s->cur_pkt.data;
774 s->cur_len = s->cur_pkt.size;
775 if (st->need_parsing && !st->parser) {
776 st->parser = av_parser_init(st->codec->codec_id);
778 /* no parser available : just output the raw packets */
779 st->need_parsing = 0;
780 }else if(st->need_parsing == 2){
781 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
783 if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
784 st->parser->last_frame_offset=
785 st->parser->cur_offset= s->cur_pkt.pos;
790 if(st->codec->debug & FF_DEBUG_PTS)
791 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
800 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
804 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
807 pktl = s->packet_buffer;
809 AVPacket *next_pkt= &pktl->pkt;
811 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
812 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
813 if( pktl->pkt.stream_index == next_pkt->stream_index
814 && next_pkt->dts < pktl->pkt.dts
815 && pktl->pkt.pts != pktl->pkt.dts //not b frame
816 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
817 next_pkt->pts= pktl->pkt.dts;
821 pktl = s->packet_buffer;
824 if( next_pkt->pts != AV_NOPTS_VALUE
825 || next_pkt->dts == AV_NOPTS_VALUE
827 /* read packet from packet buffer, if there is data */
829 s->packet_buffer = pktl->next;
835 AVPacketList **plast_pktl= &s->packet_buffer;
836 int ret= av_read_frame_internal(s, pkt);
838 if(pktl && ret != AVERROR(EAGAIN)){
845 /* duplicate the packet */
846 if (av_dup_packet(pkt) < 0)
847 return AVERROR_NOMEM;
849 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
851 pktl = av_mallocz(sizeof(AVPacketList));
853 return AVERROR_NOMEM;
855 /* add the packet in the buffered packet list */
859 assert(!s->packet_buffer);
860 return av_read_frame_internal(s, pkt);
865 /* XXX: suppress the packet queue */
866 static void flush_packet_queue(AVFormatContext *s)
871 pktl = s->packet_buffer;
874 s->packet_buffer = pktl->next;
875 av_free_packet(&pktl->pkt);
880 /*******************************************************/
883 int av_find_default_stream_index(AVFormatContext *s)
888 if (s->nb_streams <= 0)
890 for(i = 0; i < s->nb_streams; i++) {
892 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
900 * Flush the frame reader.
902 static void av_read_frame_flush(AVFormatContext *s)
907 flush_packet_queue(s);
909 /* free previous packet */
911 if (s->cur_st->parser)
912 av_free_packet(&s->cur_pkt);
919 /* for each stream, reset read state */
920 for(i = 0; i < s->nb_streams; i++) {
924 av_parser_close(st->parser);
927 st->last_IP_pts = AV_NOPTS_VALUE;
928 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
932 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
935 for(i = 0; i < s->nb_streams; i++) {
936 AVStream *st = s->streams[i];
938 st->cur_dts = av_rescale(timestamp,
939 st->time_base.den * (int64_t)ref_st->time_base.num,
940 st->time_base.num * (int64_t)ref_st->time_base.den);
944 int av_add_index_entry(AVStream *st,
945 int64_t pos, int64_t timestamp, int size, int distance, int flags)
947 AVIndexEntry *entries, *ie;
950 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
953 entries = av_fast_realloc(st->index_entries,
954 &st->index_entries_allocated_size,
955 (st->nb_index_entries + 1) *
956 sizeof(AVIndexEntry));
960 st->index_entries= entries;
962 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
965 index= st->nb_index_entries++;
967 assert(index==0 || ie[-1].timestamp < timestamp);
970 if(ie->timestamp != timestamp){
971 if(ie->timestamp <= timestamp)
973 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
974 st->nb_index_entries++;
975 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
976 distance= ie->min_distance;
980 ie->timestamp = timestamp;
981 ie->min_distance= distance;
989 * build an index for raw streams using a parser.
991 static void av_build_index_raw(AVFormatContext *s)
993 AVPacket pkt1, *pkt = &pkt1;
998 av_read_frame_flush(s);
999 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1002 ret = av_read_frame(s, pkt);
1005 if (pkt->stream_index == 0 && st->parser &&
1006 (pkt->flags & PKT_FLAG_KEY)) {
1007 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1008 0, 0, AVINDEX_KEYFRAME);
1010 av_free_packet(pkt);
1015 * Returns TRUE if we deal with a raw stream.
1017 * Raw codec data and parsing needed.
1019 static int is_raw_stream(AVFormatContext *s)
1023 if (s->nb_streams != 1)
1026 if (!st->need_parsing)
1031 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1034 AVIndexEntry *entries= st->index_entries;
1035 int nb_entries= st->nb_index_entries;
1044 timestamp = entries[m].timestamp;
1045 if(timestamp >= wanted_timestamp)
1047 if(timestamp <= wanted_timestamp)
1050 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1052 if(!(flags & AVSEEK_FLAG_ANY)){
1053 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1054 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1065 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1066 AVInputFormat *avif= s->iformat;
1067 int64_t pos_min, pos_max, pos, pos_limit;
1068 int64_t ts_min, ts_max, ts;
1072 if (stream_index < 0)
1076 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1080 ts_min= AV_NOPTS_VALUE;
1081 pos_limit= -1; //gcc falsely says it may be uninitalized
1083 st= s->streams[stream_index];
1084 if(st->index_entries){
1087 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1088 index= FFMAX(index, 0);
1089 e= &st->index_entries[index];
1091 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1093 ts_min= e->timestamp;
1095 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1102 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1103 assert(index < st->nb_index_entries);
1105 e= &st->index_entries[index];
1106 assert(e->timestamp >= target_ts);
1108 ts_max= e->timestamp;
1109 pos_limit= pos_max - e->min_distance;
1111 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1112 pos_max,pos_limit, ts_max);
1117 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1122 url_fseek(&s->pb, pos, SEEK_SET);
1124 av_update_cur_dts(s, st, ts);
1129 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1131 int64_t start_pos, filesize;
1135 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1138 if(ts_min == AV_NOPTS_VALUE){
1139 pos_min = s->data_offset;
1140 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1141 if (ts_min == AV_NOPTS_VALUE)
1145 if(ts_max == AV_NOPTS_VALUE){
1147 filesize = url_fsize(&s->pb);
1148 pos_max = filesize - 1;
1151 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1153 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1154 if (ts_max == AV_NOPTS_VALUE)
1158 int64_t tmp_pos= pos_max + 1;
1159 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1160 if(tmp_ts == AV_NOPTS_VALUE)
1164 if(tmp_pos >= filesize)
1170 if(ts_min > ts_max){
1172 }else if(ts_min == ts_max){
1177 while (pos_min < pos_limit) {
1179 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1183 assert(pos_limit <= pos_max);
1186 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1187 // interpolate position (better than dichotomy)
1188 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1189 + pos_min - approximate_keyframe_distance;
1190 }else if(no_change==1){
1191 // bisection, if interpolation failed to change min or max pos last time
1192 pos = (pos_min + pos_limit)>>1;
1194 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1199 else if(pos > pos_limit)
1203 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1209 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1211 assert(ts != AV_NOPTS_VALUE);
1212 if (target_ts <= ts) {
1213 pos_limit = start_pos - 1;
1217 if (target_ts >= ts) {
1223 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1224 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1227 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1229 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1230 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1231 pos, ts_min, target_ts, ts_max);
1237 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1238 int64_t pos_min, pos_max;
1242 if (stream_index < 0)
1245 st= s->streams[stream_index];
1248 pos_min = s->data_offset;
1249 pos_max = url_fsize(&s->pb) - 1;
1251 if (pos < pos_min) pos= pos_min;
1252 else if(pos > pos_max) pos= pos_max;
1254 url_fseek(&s->pb, pos, SEEK_SET);
1257 av_update_cur_dts(s, st, ts);
1262 static int av_seek_frame_generic(AVFormatContext *s,
1263 int stream_index, int64_t timestamp, int flags)
1269 st = s->streams[stream_index];
1271 index = av_index_search_timestamp(st, timestamp, flags);
1277 if(st->index_entries && st->nb_index_entries){
1278 ie= &st->index_entries[st->nb_index_entries-1];
1279 url_fseek(&s->pb, ie->pos, SEEK_SET);
1280 av_update_cur_dts(s, st, ie->timestamp);
1282 url_fseek(&s->pb, 0, SEEK_SET);
1285 int ret = av_read_frame(s, &pkt);
1288 av_free_packet(&pkt);
1289 if(stream_index == pkt.stream_index){
1290 if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
1294 index = av_index_search_timestamp(st, timestamp, flags);
1299 av_read_frame_flush(s);
1300 if (s->iformat->read_seek){
1301 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1304 ie = &st->index_entries[index];
1305 url_fseek(&s->pb, ie->pos, SEEK_SET);
1307 av_update_cur_dts(s, st, ie->timestamp);
1312 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1317 av_read_frame_flush(s);
1319 if(flags & AVSEEK_FLAG_BYTE)
1320 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1322 if(stream_index < 0){
1323 stream_index= av_find_default_stream_index(s);
1324 if(stream_index < 0)
1327 st= s->streams[stream_index];
1328 /* timestamp for default must be expressed in AV_TIME_BASE units */
1329 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1331 st= s->streams[stream_index];
1333 /* first, we try the format specific seek */
1334 if (s->iformat->read_seek)
1335 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1342 if(s->iformat->read_timestamp)
1343 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1345 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1348 /*******************************************************/
1351 * Returns TRUE if the stream has accurate timings in any stream.
1353 * @return TRUE if the stream has accurate timings for at least one component.
1355 static int av_has_timings(AVFormatContext *ic)
1360 for(i = 0;i < ic->nb_streams; i++) {
1361 st = ic->streams[i];
1362 if (st->start_time != AV_NOPTS_VALUE &&
1363 st->duration != AV_NOPTS_VALUE)
1370 * Estimate the stream timings from the one of each components.
1372 * Also computes the global bitrate if possible.
1374 static void av_update_stream_timings(AVFormatContext *ic)
1376 int64_t start_time, start_time1, end_time, end_time1;
1380 start_time = INT64_MAX;
1381 end_time = INT64_MIN;
1382 for(i = 0;i < ic->nb_streams; i++) {
1383 st = ic->streams[i];
1384 if (st->start_time != AV_NOPTS_VALUE) {
1385 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1386 if (start_time1 < start_time)
1387 start_time = start_time1;
1388 if (st->duration != AV_NOPTS_VALUE) {
1389 end_time1 = start_time1
1390 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1391 if (end_time1 > end_time)
1392 end_time = end_time1;
1396 if (start_time != INT64_MAX) {
1397 ic->start_time = start_time;
1398 if (end_time != INT64_MIN) {
1399 ic->duration = end_time - start_time;
1400 if (ic->file_size > 0) {
1401 /* compute the bit rate */
1402 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1403 (double)ic->duration;
1410 static void fill_all_stream_timings(AVFormatContext *ic)
1415 av_update_stream_timings(ic);
1416 for(i = 0;i < ic->nb_streams; i++) {
1417 st = ic->streams[i];
1418 if (st->start_time == AV_NOPTS_VALUE) {
1419 if(ic->start_time != AV_NOPTS_VALUE)
1420 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1421 if(ic->duration != AV_NOPTS_VALUE)
1422 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1427 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1429 int64_t filesize, duration;
1433 /* if bit_rate is already set, we believe it */
1434 if (ic->bit_rate == 0) {
1436 for(i=0;i<ic->nb_streams;i++) {
1437 st = ic->streams[i];
1438 bit_rate += st->codec->bit_rate;
1440 ic->bit_rate = bit_rate;
1443 /* if duration is already set, we believe it */
1444 if (ic->duration == AV_NOPTS_VALUE &&
1445 ic->bit_rate != 0 &&
1446 ic->file_size != 0) {
1447 filesize = ic->file_size;
1449 for(i = 0; i < ic->nb_streams; i++) {
1450 st = ic->streams[i];
1451 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1452 if (st->start_time == AV_NOPTS_VALUE ||
1453 st->duration == AV_NOPTS_VALUE) {
1455 st->duration = duration;
1462 #define DURATION_MAX_READ_SIZE 250000
1464 /* only usable for MPEG-PS streams */
1465 static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offset)
1467 AVPacket pkt1, *pkt = &pkt1;
1469 int read_size, i, ret;
1471 int64_t filesize, offset, duration;
1473 /* free previous packet */
1474 if (ic->cur_st && ic->cur_st->parser)
1475 av_free_packet(&ic->cur_pkt);
1478 /* flush packet queue */
1479 flush_packet_queue(ic);
1481 for(i=0;i<ic->nb_streams;i++) {
1482 st = ic->streams[i];
1484 av_parser_close(st->parser);
1489 /* we read the first packets to get the first PTS (not fully
1490 accurate, but it is enough now) */
1491 url_fseek(&ic->pb, 0, SEEK_SET);
1494 if (read_size >= DURATION_MAX_READ_SIZE)
1496 /* if all info is available, we can stop */
1497 for(i = 0;i < ic->nb_streams; i++) {
1498 st = ic->streams[i];
1499 if (st->start_time == AV_NOPTS_VALUE)
1502 if (i == ic->nb_streams)
1505 ret = av_read_packet(ic, pkt);
1508 read_size += pkt->size;
1509 st = ic->streams[pkt->stream_index];
1510 if (pkt->pts != AV_NOPTS_VALUE) {
1511 if (st->start_time == AV_NOPTS_VALUE)
1512 st->start_time = pkt->pts;
1514 av_free_packet(pkt);
1517 /* estimate the end time (duration) */
1518 /* XXX: may need to support wrapping */
1519 filesize = ic->file_size;
1520 offset = filesize - DURATION_MAX_READ_SIZE;
1524 url_fseek(&ic->pb, offset, SEEK_SET);
1527 if (read_size >= DURATION_MAX_READ_SIZE)
1529 /* if all info is available, we can stop */
1530 for(i = 0;i < ic->nb_streams; i++) {
1531 st = ic->streams[i];
1532 if (st->duration == AV_NOPTS_VALUE)
1535 if (i == ic->nb_streams)
1538 ret = av_read_packet(ic, pkt);
1541 read_size += pkt->size;
1542 st = ic->streams[pkt->stream_index];
1543 if (pkt->pts != AV_NOPTS_VALUE) {
1544 end_time = pkt->pts;
1545 duration = end_time - st->start_time;
1547 if (st->duration == AV_NOPTS_VALUE ||
1548 st->duration < duration)
1549 st->duration = duration;
1552 av_free_packet(pkt);
1555 fill_all_stream_timings(ic);
1557 url_fseek(&ic->pb, old_offset, SEEK_SET);
1560 static void av_estimate_timings(AVFormatContext *ic, offset_t old_offset)
1564 /* get the file size, if possible */
1565 if (ic->iformat->flags & AVFMT_NOFILE) {
1568 file_size = url_fsize(&ic->pb);
1572 ic->file_size = file_size;
1574 if ((!strcmp(ic->iformat->name, "mpeg") ||
1575 !strcmp(ic->iformat->name, "mpegts")) &&
1576 file_size && !ic->pb.is_streamed) {
1577 /* get accurate estimate from the PTSes */
1578 av_estimate_timings_from_pts(ic, old_offset);
1579 } else if (av_has_timings(ic)) {
1580 /* at least one components has timings - we use them for all
1582 fill_all_stream_timings(ic);
1584 /* less precise: use bit rate info */
1585 av_estimate_timings_from_bit_rate(ic);
1587 av_update_stream_timings(ic);
1593 for(i = 0;i < ic->nb_streams; i++) {
1594 st = ic->streams[i];
1595 printf("%d: start_time: %0.3f duration: %0.3f\n",
1596 i, (double)st->start_time / AV_TIME_BASE,
1597 (double)st->duration / AV_TIME_BASE);
1599 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1600 (double)ic->start_time / AV_TIME_BASE,
1601 (double)ic->duration / AV_TIME_BASE,
1602 ic->bit_rate / 1000);
1607 static int has_codec_parameters(AVCodecContext *enc)
1610 switch(enc->codec_type) {
1611 case CODEC_TYPE_AUDIO:
1612 val = enc->sample_rate;
1614 case CODEC_TYPE_VIDEO:
1615 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1624 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1628 int got_picture, data_size, ret=0;
1631 if(!st->codec->codec){
1632 codec = avcodec_find_decoder(st->codec->codec_id);
1635 ret = avcodec_open(st->codec, codec);
1640 if(!has_codec_parameters(st->codec)){
1641 switch(st->codec->codec_type) {
1642 case CODEC_TYPE_VIDEO:
1643 ret = avcodec_decode_video(st->codec, &picture,
1644 &got_picture, (uint8_t *)data, size);
1646 case CODEC_TYPE_AUDIO:
1647 data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
1648 samples = av_malloc(data_size);
1651 ret = avcodec_decode_audio2(st->codec, samples,
1652 &data_size, (uint8_t *)data, size);
1663 /* absolute maximum size we read until we abort */
1664 #define MAX_READ_SIZE 5000000
1666 #define MAX_STD_TIMEBASES (60*12+5)
1667 static int get_std_framerate(int i){
1668 if(i<60*12) return i*1001;
1669 else return ((int[]){24,30,60,12,15})[i-60*12]*1000*12;
1672 int av_find_stream_info(AVFormatContext *ic)
1674 int i, count, ret, read_size, j;
1676 AVPacket pkt1, *pkt;
1677 AVPacketList *pktl=NULL, **ppktl;
1678 int64_t last_dts[MAX_STREAMS];
1679 int duration_count[MAX_STREAMS]={0};
1680 double (*duration_error)[MAX_STD_TIMEBASES];
1681 offset_t old_offset = url_ftell(&ic->pb);
1683 duration_error = av_mallocz(MAX_STREAMS * sizeof(*duration_error));
1684 if (!duration_error) return AVERROR_NOMEM;
1686 for(i=0;i<ic->nb_streams;i++) {
1687 st = ic->streams[i];
1688 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1689 /* if(!st->time_base.num)
1691 if(!st->codec->time_base.num)
1692 st->codec->time_base= st->time_base;
1694 //only for the split stuff
1696 st->parser = av_parser_init(st->codec->codec_id);
1697 if(st->need_parsing == 2 && st->parser){
1698 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1703 for(i=0;i<MAX_STREAMS;i++){
1704 last_dts[i]= AV_NOPTS_VALUE;
1709 ppktl = &ic->packet_buffer;
1711 /* check if one codec still needs to be handled */
1712 for(i=0;i<ic->nb_streams;i++) {
1713 st = ic->streams[i];
1714 if (!has_codec_parameters(st->codec))
1716 /* variable fps and no guess at the real fps */
1717 if( st->codec->time_base.den >= 101LL*st->codec->time_base.num
1718 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1720 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1723 if (i == ic->nb_streams) {
1724 /* NOTE: if the format has no header, then we need to read
1725 some packets to get most of the streams, so we cannot
1727 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1728 /* if we found the info for all the codecs, we can stop */
1733 /* we did not get all the codec info, but we read too much data */
1734 if (read_size >= MAX_READ_SIZE) {
1739 /* NOTE: a new stream can be added there if no header in file
1740 (AVFMTCTX_NOHEADER) */
1741 ret = av_read_frame_internal(ic, &pkt1);
1744 ret = -1; /* we could not have all the codec parameters before EOF */
1745 for(i=0;i<ic->nb_streams;i++) {
1746 st = ic->streams[i];
1747 if (!has_codec_parameters(st->codec)){
1749 avcodec_string(buf, sizeof(buf), st->codec, 0);
1750 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1758 pktl = av_mallocz(sizeof(AVPacketList));
1760 ret = AVERROR_NOMEM;
1764 /* add the packet in the buffered packet list */
1766 ppktl = &pktl->next;
1771 /* duplicate the packet */
1772 if (av_dup_packet(pkt) < 0) {
1773 ret = AVERROR_NOMEM;
1777 read_size += pkt->size;
1779 st = ic->streams[pkt->stream_index];
1780 if(st->codec_info_nb_frames>1) //FIXME move codec_info_nb_frames and codec_info_duration from AVStream into this func
1781 st->codec_info_duration += pkt->duration;
1782 if (pkt->duration != 0)
1783 st->codec_info_nb_frames++;
1786 int index= pkt->stream_index;
1787 int64_t last= last_dts[index];
1788 int64_t duration= pkt->dts - last;
1790 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1791 double dur= duration * av_q2d(st->time_base);
1793 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1794 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
1795 if(duration_count[index] < 2)
1796 memset(duration_error, 0, MAX_STREAMS * sizeof(*duration_error));
1797 for(i=1; i<MAX_STD_TIMEBASES; i++){
1798 int framerate= get_std_framerate(i);
1799 int ticks= lrintf(dur*framerate/(1001*12));
1800 double error= dur - ticks*1001*12/(double)framerate;
1801 duration_error[index][i] += error*error;
1803 duration_count[index]++;
1805 if(st->codec_info_nb_frames == 0 && 0)
1806 st->codec_info_duration += duration;
1808 if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
1809 last_dts[pkt->stream_index]= pkt->dts;
1811 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1812 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1814 st->codec->extradata_size= i;
1815 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1816 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1817 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1821 /* if still no information, we try to open the codec and to
1822 decompress the frame. We try to avoid that in most cases as
1823 it takes longer and uses more memory. For MPEG4, we need to
1824 decompress for Quicktime. */
1825 if (!has_codec_parameters(st->codec) /*&&
1826 (st->codec->codec_id == CODEC_ID_FLV1 ||
1827 st->codec->codec_id == CODEC_ID_H264 ||
1828 st->codec->codec_id == CODEC_ID_H263 ||
1829 st->codec->codec_id == CODEC_ID_H261 ||
1830 st->codec->codec_id == CODEC_ID_VORBIS ||
1831 st->codec->codec_id == CODEC_ID_MJPEG ||
1832 st->codec->codec_id == CODEC_ID_PNG ||
1833 st->codec->codec_id == CODEC_ID_PAM ||
1834 st->codec->codec_id == CODEC_ID_PGM ||
1835 st->codec->codec_id == CODEC_ID_PGMYUV ||
1836 st->codec->codec_id == CODEC_ID_PBM ||
1837 st->codec->codec_id == CODEC_ID_PPM ||
1838 st->codec->codec_id == CODEC_ID_SHORTEN ||
1839 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1840 try_decode_frame(st, pkt->data, pkt->size);
1842 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
1848 // close codecs which where opened in try_decode_frame()
1849 for(i=0;i<ic->nb_streams;i++) {
1850 st = ic->streams[i];
1851 if(st->codec->codec)
1852 avcodec_close(st->codec);
1854 for(i=0;i<ic->nb_streams;i++) {
1855 st = ic->streams[i];
1856 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1857 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
1858 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
1860 if(duration_count[i]
1861 && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) /*&&
1862 //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
1863 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
1864 double best_error= 2*av_q2d(st->time_base);
1865 best_error= best_error*best_error*duration_count[i]*1000*12*30;
1867 for(j=1; j<MAX_STD_TIMEBASES; j++){
1868 double error= duration_error[i][j] * get_std_framerate(j);
1869 // if(st->codec->codec_type == CODEC_TYPE_VIDEO)
1870 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
1871 if(error < best_error){
1873 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
1878 if (!st->r_frame_rate.num){
1879 if( st->codec->time_base.den * (int64_t)st->time_base.num
1880 <= st->codec->time_base.num * (int64_t)st->time_base.den){
1881 st->r_frame_rate.num = st->codec->time_base.den;
1882 st->r_frame_rate.den = st->codec->time_base.num;
1884 st->r_frame_rate.num = st->time_base.den;
1885 st->r_frame_rate.den = st->time_base.num;
1888 }else if(st->codec->codec_type == CODEC_TYPE_AUDIO) {
1889 if(!st->codec->bits_per_sample)
1890 st->codec->bits_per_sample= av_get_bits_per_sample(st->codec->codec_id);
1894 av_estimate_timings(ic, old_offset);
1896 /* correct DTS for b frame streams with no timestamps */
1897 for(i=0;i<ic->nb_streams;i++) {
1898 st = ic->streams[i];
1899 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1901 ppktl = &ic->packet_buffer;
1903 if(ppkt1->stream_index != i)
1905 if(ppkt1->pkt->dts < 0)
1907 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1909 ppkt1->pkt->dts -= delta;
1914 st->cur_dts -= delta;
1920 av_free(duration_error);
1925 /*******************************************************/
1927 int av_read_play(AVFormatContext *s)
1929 if (!s->iformat->read_play)
1930 return AVERROR_NOTSUPP;
1931 return s->iformat->read_play(s);
1934 int av_read_pause(AVFormatContext *s)
1936 if (!s->iformat->read_pause)
1937 return AVERROR_NOTSUPP;
1938 return s->iformat->read_pause(s);
1941 void av_close_input_file(AVFormatContext *s)
1943 int i, must_open_file;
1946 /* free previous packet */
1947 if (s->cur_st && s->cur_st->parser)
1948 av_free_packet(&s->cur_pkt);
1950 if (s->iformat->read_close)
1951 s->iformat->read_close(s);
1952 for(i=0;i<s->nb_streams;i++) {
1953 /* free all data in a stream component */
1956 av_parser_close(st->parser);
1958 av_free(st->index_entries);
1959 av_free(st->codec->extradata);
1963 flush_packet_queue(s);
1965 if (s->iformat->flags & AVFMT_NOFILE) {
1968 if (must_open_file) {
1971 av_freep(&s->priv_data);
1975 AVStream *av_new_stream(AVFormatContext *s, int id)
1980 if (s->nb_streams >= MAX_STREAMS)
1983 st = av_mallocz(sizeof(AVStream));
1987 st->codec= avcodec_alloc_context();
1989 /* no default bitrate if decoding */
1990 st->codec->bit_rate = 0;
1992 st->index = s->nb_streams;
1994 st->start_time = AV_NOPTS_VALUE;
1995 st->duration = AV_NOPTS_VALUE;
1996 st->cur_dts = AV_NOPTS_VALUE;
1998 /* default pts settings is MPEG like */
1999 av_set_pts_info(st, 33, 1, 90000);
2000 st->last_IP_pts = AV_NOPTS_VALUE;
2001 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2002 st->pts_buffer[i]= AV_NOPTS_VALUE;
2004 s->streams[s->nb_streams++] = st;
2008 /************************************************************/
2009 /* output media file */
2011 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2015 if (s->oformat->priv_data_size > 0) {
2016 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2018 return AVERROR_NOMEM;
2020 s->priv_data = NULL;
2022 if (s->oformat->set_parameters) {
2023 ret = s->oformat->set_parameters(s, ap);
2030 int av_write_header(AVFormatContext *s)
2035 // some sanity checks
2036 for(i=0;i<s->nb_streams;i++) {
2039 switch (st->codec->codec_type) {
2040 case CODEC_TYPE_AUDIO:
2041 if(st->codec->sample_rate<=0){
2042 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2046 case CODEC_TYPE_VIDEO:
2047 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2048 av_log(s, AV_LOG_ERROR, "time base not set\n");
2051 if(st->codec->width<=0 || st->codec->height<=0){
2052 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2058 if(s->oformat->codec_tag){
2059 if(st->codec->codec_tag){
2061 //check that tag + id is in the table
2062 //if neither is in the table -> ok
2063 //if tag is in the table with another id -> FAIL
2064 //if id is in the table with another tag -> FAIL unless strict < ?
2066 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2070 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2071 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2073 return AVERROR_NOMEM;
2076 if(s->oformat->write_header){
2077 ret = s->oformat->write_header(s);
2082 /* init PTS generation */
2083 for(i=0;i<s->nb_streams;i++) {
2084 int64_t den = AV_NOPTS_VALUE;
2087 switch (st->codec->codec_type) {
2088 case CODEC_TYPE_AUDIO:
2089 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2091 case CODEC_TYPE_VIDEO:
2092 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2097 if (den != AV_NOPTS_VALUE) {
2099 return AVERROR_INVALIDDATA;
2100 av_frac_init(&st->pts, 0, 0, den);
2106 //FIXME merge with compute_pkt_fields
2107 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2108 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2109 int num, den, frame_size, i;
2111 // av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2113 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2116 /* duration field */
2117 if (pkt->duration == 0) {
2118 compute_frame_duration(&num, &den, st, NULL, pkt);
2120 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2124 //XXX/FIXME this is a temporary hack until all encoders output pts
2125 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2127 // pkt->pts= st->cur_dts;
2128 pkt->pts= st->pts.val;
2131 //calculate dts from pts
2132 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2133 st->pts_buffer[0]= pkt->pts;
2134 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2135 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2136 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2137 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2139 pkt->dts= st->pts_buffer[0];
2142 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2143 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2146 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2147 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2151 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2152 st->cur_dts= pkt->dts;
2153 st->pts.val= pkt->dts;
2156 switch (st->codec->codec_type) {
2157 case CODEC_TYPE_AUDIO:
2158 frame_size = get_audio_frame_size(st->codec, pkt->size);
2160 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2161 but it would be better if we had the real timestamps from the encoder */
2162 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2163 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2166 case CODEC_TYPE_VIDEO:
2167 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2175 static void truncate_ts(AVStream *st, AVPacket *pkt){
2176 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2179 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2181 if (pkt->pts != AV_NOPTS_VALUE)
2182 pkt->pts &= pts_mask;
2183 if (pkt->dts != AV_NOPTS_VALUE)
2184 pkt->dts &= pts_mask;
2187 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2191 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2192 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2195 truncate_ts(s->streams[pkt->stream_index], pkt);
2197 ret= s->oformat->write_packet(s, pkt);
2199 ret= url_ferror(&s->pb);
2203 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2204 AVPacketList *pktl, **next_point, *this_pktl;
2206 int streams[MAX_STREAMS];
2209 AVStream *st= s->streams[ pkt->stream_index];
2211 // assert(pkt->destruct != av_destruct_packet); //FIXME
2213 this_pktl = av_mallocz(sizeof(AVPacketList));
2214 this_pktl->pkt= *pkt;
2215 if(pkt->destruct == av_destruct_packet)
2216 pkt->destruct= NULL; // non shared -> must keep original from being freed
2218 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2220 next_point = &s->packet_buffer;
2222 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2223 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2224 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2225 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2227 next_point= &(*next_point)->next;
2229 this_pktl->next= *next_point;
2230 *next_point= this_pktl;
2233 memset(streams, 0, sizeof(streams));
2234 pktl= s->packet_buffer;
2236 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2237 if(streams[ pktl->pkt.stream_index ] == 0)
2239 streams[ pktl->pkt.stream_index ]++;
2243 if(s->nb_streams == stream_count || (flush && stream_count)){
2244 pktl= s->packet_buffer;
2247 s->packet_buffer= pktl->next;
2251 av_init_packet(out);
2257 * Interleaves a AVPacket correctly so it can be muxed.
2258 * @param out the interleaved packet will be output here
2259 * @param in the input packet
2260 * @param flush 1 if no further packets are available as input and all
2261 * remaining packets should be output
2262 * @return 1 if a packet was output, 0 if no packet could be output,
2263 * < 0 if an error occured
2265 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2266 if(s->oformat->interleave_packet)
2267 return s->oformat->interleave_packet(s, out, in, flush);
2269 return av_interleave_packet_per_dts(s, out, in, flush);
2272 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2273 AVStream *st= s->streams[ pkt->stream_index];
2275 //FIXME/XXX/HACK drop zero sized packets
2276 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2279 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2280 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2283 if(pkt->dts == AV_NOPTS_VALUE)
2288 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2289 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2292 truncate_ts(s->streams[opkt.stream_index], &opkt);
2293 ret= s->oformat->write_packet(s, &opkt);
2295 av_free_packet(&opkt);
2300 if(url_ferror(&s->pb))
2301 return url_ferror(&s->pb);
2305 int av_write_trailer(AVFormatContext *s)
2311 ret= av_interleave_packet(s, &pkt, NULL, 1);
2312 if(ret<0) //FIXME cleanup needed for ret<0 ?
2317 truncate_ts(s->streams[pkt.stream_index], &pkt);
2318 ret= s->oformat->write_packet(s, &pkt);
2320 av_free_packet(&pkt);
2324 if(url_ferror(&s->pb))
2328 if(s->oformat->write_trailer)
2329 ret = s->oformat->write_trailer(s);
2332 ret=url_ferror(&s->pb);
2333 for(i=0;i<s->nb_streams;i++)
2334 av_freep(&s->streams[i]->priv_data);
2335 av_freep(&s->priv_data);
2339 /* "user interface" functions */
2341 void dump_format(AVFormatContext *ic,
2349 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2350 is_output ? "Output" : "Input",
2352 is_output ? ic->oformat->name : ic->iformat->name,
2353 is_output ? "to" : "from", url);
2355 av_log(NULL, AV_LOG_INFO, " Duration: ");
2356 if (ic->duration != AV_NOPTS_VALUE) {
2357 int hours, mins, secs, us;
2358 secs = ic->duration / AV_TIME_BASE;
2359 us = ic->duration % AV_TIME_BASE;
2364 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2365 (10 * us) / AV_TIME_BASE);
2367 av_log(NULL, AV_LOG_INFO, "N/A");
2369 if (ic->start_time != AV_NOPTS_VALUE) {
2371 av_log(NULL, AV_LOG_INFO, ", start: ");
2372 secs = ic->start_time / AV_TIME_BASE;
2373 us = ic->start_time % AV_TIME_BASE;
2374 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2375 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2377 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2379 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2381 av_log(NULL, AV_LOG_INFO, "N/A");
2383 av_log(NULL, AV_LOG_INFO, "\n");
2385 for(i=0;i<ic->nb_streams;i++) {
2386 AVStream *st = ic->streams[i];
2387 int g= ff_gcd(st->time_base.num, st->time_base.den);
2388 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2389 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2390 /* the pid is an important information, so we display it */
2391 /* XXX: add a generic system */
2393 flags = ic->oformat->flags;
2395 flags = ic->iformat->flags;
2396 if (flags & AVFMT_SHOW_IDS) {
2397 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2399 if (strlen(st->language) > 0) {
2400 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2402 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2403 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2404 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2405 if(st->r_frame_rate.den && st->r_frame_rate.num)
2406 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2407 /* else if(st->time_base.den && st->time_base.num)
2408 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2410 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2412 av_log(NULL, AV_LOG_INFO, "\n");
2419 int frame_rate, frame_rate_base;
2422 static AbvEntry frame_abvs[] = {
2423 { "ntsc", 720, 480, 30000, 1001 },
2424 { "pal", 720, 576, 25, 1 },
2425 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2426 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2427 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2428 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2429 { "film", 352, 240, 24, 1 },
2430 { "ntsc-film", 352, 240, 24000, 1001 },
2431 { "sqcif", 128, 96, 0, 0 },
2432 { "qcif", 176, 144, 0, 0 },
2433 { "cif", 352, 288, 0, 0 },
2434 { "4cif", 704, 576, 0, 0 },
2437 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2440 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2442 int frame_width = 0, frame_height = 0;
2445 if (!strcmp(frame_abvs[i].abv, str)) {
2446 frame_width = frame_abvs[i].width;
2447 frame_height = frame_abvs[i].height;
2453 frame_width = strtol(p, (char **)&p, 10);
2456 frame_height = strtol(p, (char **)&p, 10);
2458 if (frame_width <= 0 || frame_height <= 0)
2460 *width_ptr = frame_width;
2461 *height_ptr = frame_height;
2465 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2470 /* First, we check our abbreviation table */
2471 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2472 if (!strcmp(frame_abvs[i].abv, arg)) {
2473 *frame_rate = frame_abvs[i].frame_rate;
2474 *frame_rate_base = frame_abvs[i].frame_rate_base;
2478 /* Then, we try to parse it as fraction */
2479 cp = strchr(arg, '/');
2481 cp = strchr(arg, ':');
2484 *frame_rate = strtol(arg, &cpp, 10);
2485 if (cpp != arg || cpp == cp)
2486 *frame_rate_base = strtol(cp+1, &cpp, 10);
2491 /* Finally we give up and parse it as double */
2492 AVRational time_base = av_d2q(strtod(arg, 0), DEFAULT_FRAME_RATE_BASE);
2493 *frame_rate_base = time_base.den;
2494 *frame_rate = time_base.num;
2496 if (!*frame_rate || !*frame_rate_base)
2502 #ifndef CONFIG_WINCE
2503 int64_t parse_date(const char *datestr, int duration)
2509 static const char *date_fmt[] = {
2513 static const char *time_fmt[] = {
2523 time_t now = time(0);
2525 len = strlen(datestr);
2527 lastch = datestr[len - 1];
2530 is_utc = (lastch == 'z' || lastch == 'Z');
2532 memset(&dt, 0, sizeof(dt));
2537 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2538 q = small_strptime(p, date_fmt[i], &dt);
2548 dt = *localtime(&now);
2550 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2555 if (*p == 'T' || *p == 't' || *p == ' ')
2558 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2559 q = small_strptime(p, time_fmt[i], &dt);
2569 q = small_strptime(p, time_fmt[0], &dt);
2571 dt.tm_sec = strtol(p, (char **)&q, 10);
2577 /* Now we have all the fields that we can get */
2582 return now * INT64_C(1000000);
2586 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2588 dt.tm_isdst = -1; /* unknown */
2601 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2604 val += n * (*q - '0');
2608 return negative ? -t : t;
2610 #endif /* CONFIG_WINCE */
2612 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2622 while (*p != '\0' && *p != '=' && *p != '&') {
2623 if ((q - tag) < sizeof(tag) - 1)
2631 while (*p != '&' && *p != '\0') {
2632 if ((q - arg) < arg_size - 1) {
2642 if (!strcmp(tag, tag1))
2651 int av_get_frame_filename(char *buf, int buf_size,
2652 const char *path, int number)
2655 char *q, buf1[20], c;
2656 int nd, len, percentd_found;
2668 while (isdigit(*p)) {
2669 nd = nd * 10 + *p++ - '0';
2672 } while (isdigit(c));
2681 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2683 if ((q - buf + len) > buf_size - 1)
2685 memcpy(q, buf1, len);
2693 if ((q - buf) < buf_size - 1)
2697 if (!percentd_found)
2706 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2710 for(i=0;i<size;i+=16) {
2714 fprintf(f, "%08x ", i);
2717 fprintf(f, " %02x", buf[i+j]);
2722 for(j=0;j<len;j++) {
2724 if (c < ' ' || c > '~')
2726 fprintf(f, "%c", c);
2732 //FIXME needs to know the time_base
2733 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2735 fprintf(f, "stream #%d:\n", pkt->stream_index);
2736 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2737 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2738 /* DTS is _always_ valid after av_read_frame() */
2739 fprintf(f, " dts=");
2740 if (pkt->dts == AV_NOPTS_VALUE)
2743 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2744 /* PTS may be not known if B frames are present */
2745 fprintf(f, " pts=");
2746 if (pkt->pts == AV_NOPTS_VALUE)
2749 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2751 fprintf(f, " size=%d\n", pkt->size);
2753 av_hex_dump(f, pkt->data, pkt->size);
2756 void url_split(char *proto, int proto_size,
2757 char *authorization, int authorization_size,
2758 char *hostname, int hostname_size,
2760 char *path, int path_size,
2771 while (*p != ':' && *p != '\0') {
2772 if ((q - proto) < proto_size - 1)
2778 if (authorization_size > 0)
2779 authorization[0] = '\0';
2783 if (hostname_size > 0)
2787 char *at,*slash; // PETR: position of '@' character and '/' character
2794 at = strchr(p,'@'); // PETR: get the position of '@'
2795 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2796 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2798 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2800 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2801 if (*p == '@') { // PETR: passed '@'
2802 if (authorization_size > 0)
2806 } else if (!at) { // PETR: hostname
2807 if ((q - hostname) < hostname_size - 1)
2810 if ((q - authorization) < authorization_size - 1)
2815 if (hostname_size > 0)
2819 port = strtoul(p, (char **)&p, 10);
2824 pstrcpy(path, path_size, p);
2827 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2828 int pts_num, int pts_den)
2830 s->pts_wrap_bits = pts_wrap_bits;
2831 s->time_base.num = pts_num;
2832 s->time_base.den = pts_den;
2835 /* fraction handling */
2838 * f = val + (num / den) + 0.5.
2840 * 'num' is normalized so that it is such as 0 <= num < den.
2842 * @param f fractional number
2843 * @param val integer value
2844 * @param num must be >= 0
2845 * @param den must be >= 1
2847 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2860 * Fractionnal addition to f: f = f + (incr / f->den).
2862 * @param f fractional number
2863 * @param incr increment, can be positive or negative
2865 static void av_frac_add(AVFrac *f, int64_t incr)
2869 num = f->num + incr;
2872 f->val += num / den;
2878 } else if (num >= den) {
2879 f->val += num / den;