2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 AVInputFormat *first_iformat = NULL;
25 AVOutputFormat *first_oformat = NULL;
26 AVImageFormat *first_image_format = NULL;
28 void av_register_input_format(AVInputFormat *format)
32 while (*p != NULL) p = &(*p)->next;
37 void av_register_output_format(AVOutputFormat *format)
41 while (*p != NULL) p = &(*p)->next;
46 int match_ext(const char *filename, const char *extensions)
54 ext = strrchr(filename, '.');
60 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
63 if (!strcasecmp(ext1, ext))
73 AVOutputFormat *guess_format(const char *short_name, const char *filename,
74 const char *mime_type)
76 AVOutputFormat *fmt, *fmt_found;
79 /* specific test for image sequences */
80 if (!short_name && filename &&
81 filename_number_test(filename) >= 0 &&
82 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
83 return guess_format("image2", NULL, NULL);
85 if (!short_name && filename &&
86 filename_number_test(filename) >= 0 &&
87 guess_image_format(filename)) {
88 return guess_format("image", NULL, NULL);
91 /* find the proper file type */
97 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
99 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
101 if (filename && fmt->extensions &&
102 match_ext(filename, fmt->extensions)) {
105 if (score > score_max) {
114 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
115 const char *mime_type)
117 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
120 AVOutputFormat *stream_fmt;
121 char stream_format_name[64];
123 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
124 stream_fmt = guess_format(stream_format_name, NULL, NULL);
134 * guesses the codec id based upon muxer and filename.
136 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
137 const char *filename, const char *mime_type, enum CodecType type){
138 if(type == CODEC_TYPE_VIDEO){
139 enum CodecID codec_id= CODEC_ID_NONE;
141 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
142 codec_id= av_guess_image2_codec(filename);
144 if(codec_id == CODEC_ID_NONE)
145 codec_id= fmt->video_codec;
147 }else if(type == CODEC_TYPE_AUDIO)
148 return fmt->audio_codec;
150 return CODEC_ID_NONE;
153 AVInputFormat *av_find_input_format(const char *short_name)
156 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
157 if (!strcmp(fmt->name, short_name))
163 /* memory handling */
166 * Default packet destructor
168 static void av_destruct_packet(AVPacket *pkt)
171 pkt->data = NULL; pkt->size = 0;
175 * Allocate the payload of a packet and intialized its fields to default values.
178 * @param size wanted payload size
179 * @return 0 if OK. AVERROR_xxx otherwise.
181 int av_new_packet(AVPacket *pkt, int size)
184 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
185 return AVERROR_NOMEM;
186 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
188 return AVERROR_NOMEM;
189 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
194 pkt->destruct = av_destruct_packet;
198 /* This is a hack - the packet memory allocation stuff is broken. The
199 packet is allocated if it was not really allocated */
200 int av_dup_packet(AVPacket *pkt)
202 if (pkt->destruct != av_destruct_packet) {
204 /* we duplicate the packet and don't forget to put the padding
206 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
207 return AVERROR_NOMEM;
208 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
210 return AVERROR_NOMEM;
212 memcpy(data, pkt->data, pkt->size);
213 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
215 pkt->destruct = av_destruct_packet;
222 int fifo_init(FifoBuffer *f, int size)
224 f->buffer = av_malloc(size);
227 f->end = f->buffer + size;
228 f->wptr = f->rptr = f->buffer;
232 void fifo_free(FifoBuffer *f)
237 int fifo_size(FifoBuffer *f, uint8_t *rptr)
244 if (f->wptr >= rptr) {
245 size = f->wptr - rptr;
247 size = (f->end - rptr) + (f->wptr - f->buffer);
252 /* get data from the fifo (return -1 if not enough data) */
253 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
262 if (f->wptr >= rptr) {
263 size = f->wptr - rptr;
265 size = (f->end - rptr) + (f->wptr - f->buffer);
270 while (buf_size > 0) {
274 memcpy(buf, rptr, len);
285 void fifo_realloc(FifoBuffer *f, unsigned int new_size){
286 unsigned int old_size= f->end - f->buffer;
288 if(old_size < new_size){
289 uint8_t *old= f->buffer;
291 f->buffer= av_realloc(f->buffer, new_size);
293 f->rptr += f->buffer - old;
294 f->wptr += f->buffer - old;
296 if(f->wptr < f->rptr){
297 memmove(f->rptr + new_size - old_size, f->rptr, f->buffer + old_size - f->rptr);
298 f->rptr += new_size - old_size;
300 f->end= f->buffer + new_size;
304 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr)
317 memcpy(wptr, buf, len);
327 /* get data from the fifo (return -1 if not enough data) */
328 int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
330 uint8_t *rptr = *rptr_ptr;
333 if (f->wptr >= rptr) {
334 size = f->wptr - rptr;
336 size = (f->end - rptr) + (f->wptr - f->buffer);
341 while (buf_size > 0) {
345 put_buffer(pb, rptr, len);
355 int filename_number_test(const char *filename)
360 return get_frame_filename(buf, sizeof(buf), filename, 1);
363 /* guess file format */
364 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
366 AVInputFormat *fmt1, *fmt;
367 int score, score_max;
371 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
372 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
375 if (fmt1->read_probe) {
376 score = fmt1->read_probe(pd);
377 } else if (fmt1->extensions) {
378 if (match_ext(pd->filename, fmt1->extensions)) {
382 if (score > score_max) {
390 /************************************************************/
391 /* input media file */
394 * open a media file from an IO stream. 'fmt' must be specified.
397 static const char* format_to_name(void* ptr)
399 AVFormatContext* fc = (AVFormatContext*) ptr;
400 if(fc->iformat) return fc->iformat->name;
401 else if(fc->oformat) return fc->oformat->name;
405 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
407 AVFormatContext *av_alloc_format_context(void)
410 ic = av_mallocz(sizeof(AVFormatContext));
412 ic->av_class = &av_format_context_class;
416 int av_open_input_stream(AVFormatContext **ic_ptr,
417 ByteIOContext *pb, const char *filename,
418 AVInputFormat *fmt, AVFormatParameters *ap)
423 ic = av_alloc_format_context();
431 ic->duration = AV_NOPTS_VALUE;
432 ic->start_time = AV_NOPTS_VALUE;
433 pstrcpy(ic->filename, sizeof(ic->filename), filename);
435 /* allocate private data */
436 if (fmt->priv_data_size > 0) {
437 ic->priv_data = av_mallocz(fmt->priv_data_size);
438 if (!ic->priv_data) {
443 ic->priv_data = NULL;
446 err = ic->iformat->read_header(ic, ap);
451 ic->data_offset = url_ftell(&ic->pb);
457 av_freep(&ic->priv_data);
464 #define PROBE_BUF_SIZE 2048
467 * Open a media file as input. The codec are not opened. Only the file
468 * header (if present) is read.
470 * @param ic_ptr the opened media file handle is put here
471 * @param filename filename to open.
472 * @param fmt if non NULL, force the file format to use
473 * @param buf_size optional buffer size (zero if default is OK)
474 * @param ap additionnal parameters needed when opening the file (NULL if default)
475 * @return 0 if OK. AVERROR_xxx otherwise.
477 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
480 AVFormatParameters *ap)
482 int err, must_open_file, file_opened;
483 uint8_t buf[PROBE_BUF_SIZE];
484 AVProbeData probe_data, *pd = &probe_data;
485 ByteIOContext pb1, *pb = &pb1;
490 pd->filename = filename;
495 /* guess format if no file can be opened */
496 fmt = av_probe_input_format(pd, 0);
499 /* do not open file if the format does not need it. XXX: specific
500 hack needed to handle RTSP/TCP */
502 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
504 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
507 if (!fmt || must_open_file) {
508 /* if no file needed do not try to open one */
509 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
515 url_setbufsize(pb, buf_size);
518 /* read probe data */
519 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
520 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
522 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
530 /* guess file format */
532 fmt = av_probe_input_format(pd, 1);
535 /* if still no format found, error */
541 /* XXX: suppress this hack for redirectors */
542 #ifdef CONFIG_NETWORK
543 if (fmt == &redir_demux) {
544 err = redir_open(ic_ptr, pb);
550 /* check filename in case of an image number is expected */
551 if (fmt->flags & AVFMT_NEEDNUMBER) {
552 if (filename_number_test(filename) < 0) {
553 err = AVERROR_NUMEXPECTED;
557 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
569 /*******************************************************/
572 * Read a transport packet from a media file. This function is
573 * absolete and should never be used. Use av_read_frame() instead.
575 * @param s media file handle
576 * @param pkt is filled
577 * @return 0 if OK. AVERROR_xxx if error.
579 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
581 return s->iformat->read_packet(s, pkt);
584 /**********************************************************/
586 /* get the number of samples of an audio frame. Return (-1) if error */
587 static int get_audio_frame_size(AVCodecContext *enc, int size)
591 if (enc->frame_size <= 1) {
592 /* specific hack for pcm codecs because no frame size is
594 switch(enc->codec_id) {
595 case CODEC_ID_PCM_S16LE:
596 case CODEC_ID_PCM_S16BE:
597 case CODEC_ID_PCM_U16LE:
598 case CODEC_ID_PCM_U16BE:
599 if (enc->channels == 0)
601 frame_size = size / (2 * enc->channels);
603 case CODEC_ID_PCM_S8:
604 case CODEC_ID_PCM_U8:
605 case CODEC_ID_PCM_MULAW:
606 case CODEC_ID_PCM_ALAW:
607 if (enc->channels == 0)
609 frame_size = size / (enc->channels);
612 /* used for example by ADPCM codecs */
613 if (enc->bit_rate == 0)
615 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
619 frame_size = enc->frame_size;
625 /* return the frame duration in seconds, return 0 if not available */
626 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
627 AVCodecParserContext *pc, AVPacket *pkt)
633 switch(st->codec.codec_type) {
634 case CODEC_TYPE_VIDEO:
635 if(st->codec.time_base.num*1000 <= st->codec.time_base.den){
636 *pnum = st->time_base.num;
637 *pden = st->time_base.den;
639 *pnum = st->codec.time_base.num;
640 *pden = st->codec.time_base.den;
642 if (pc && pc->repeat_pict) {
644 *pnum = (*pnum) * (2 + pc->repeat_pict);
647 case CODEC_TYPE_AUDIO:
648 frame_size = get_audio_frame_size(&st->codec, pkt->size);
652 *pden = st->codec.sample_rate;
659 static int is_intra_only(AVCodecContext *enc){
660 if(enc->codec_type == CODEC_TYPE_AUDIO){
662 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
663 switch(enc->codec_id){
665 case CODEC_ID_MJPEGB:
667 case CODEC_ID_RAWVIDEO:
668 case CODEC_ID_DVVIDEO:
669 case CODEC_ID_HUFFYUV:
670 case CODEC_ID_FFVHUFF:
681 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
682 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
683 int64_t delta= last_ts - mask/2;
684 return ((lsb - delta)&mask) + delta;
687 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
688 AVCodecParserContext *pc, AVPacket *pkt)
690 int num, den, presentation_delayed;
691 /* handle wrapping */
692 if(st->cur_dts != AV_NOPTS_VALUE){
693 if(pkt->pts != AV_NOPTS_VALUE)
694 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
695 if(pkt->dts != AV_NOPTS_VALUE)
696 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
699 if (pkt->duration == 0) {
700 compute_frame_duration(&num, &den, st, pc, pkt);
702 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
706 if(is_intra_only(&st->codec))
707 pkt->flags |= PKT_FLAG_KEY;
709 /* do we have a video B frame ? */
710 presentation_delayed = 0;
711 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
712 /* XXX: need has_b_frame, but cannot get it if the codec is
714 if (( st->codec.codec_id == CODEC_ID_H264
715 || st->codec.has_b_frames) &&
716 pc && pc->pict_type != FF_B_TYPE)
717 presentation_delayed = 1;
718 /* this may be redundant, but it shouldnt hurt */
719 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
720 presentation_delayed = 1;
723 if(st->cur_dts == AV_NOPTS_VALUE){
724 if(presentation_delayed) st->cur_dts = -pkt->duration;
725 else st->cur_dts = 0;
728 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
729 /* interpolate PTS and DTS if they are not present */
730 if (presentation_delayed) {
731 /* DTS = decompression time stamp */
732 /* PTS = presentation time stamp */
733 if (pkt->dts == AV_NOPTS_VALUE) {
734 /* if we know the last pts, use it */
735 if(st->last_IP_pts != AV_NOPTS_VALUE)
736 st->cur_dts = pkt->dts = st->last_IP_pts;
738 pkt->dts = st->cur_dts;
740 st->cur_dts = pkt->dts;
742 /* this is tricky: the dts must be incremented by the duration
743 of the frame we are displaying, i.e. the last I or P frame */
744 if (st->last_IP_duration == 0)
745 st->cur_dts += pkt->duration;
747 st->cur_dts += st->last_IP_duration;
748 st->last_IP_duration = pkt->duration;
749 st->last_IP_pts= pkt->pts;
750 /* cannot compute PTS if not present (we can compute it only
751 by knowing the futur */
752 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
753 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
754 int64_t old_diff= ABS(st->cur_dts - pkt->duration - pkt->pts);
755 int64_t new_diff= ABS(st->cur_dts - pkt->pts);
756 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
757 pkt->pts += pkt->duration;
758 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
762 /* presentation is not delayed : PTS and DTS are the same */
763 if (pkt->pts == AV_NOPTS_VALUE) {
764 if (pkt->dts == AV_NOPTS_VALUE) {
765 pkt->pts = st->cur_dts;
766 pkt->dts = st->cur_dts;
769 st->cur_dts = pkt->dts;
773 st->cur_dts = pkt->pts;
776 st->cur_dts += pkt->duration;
778 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
783 /* key frame computation */
784 switch(st->codec.codec_type) {
785 case CODEC_TYPE_VIDEO:
786 if (pc->pict_type == FF_I_TYPE)
787 pkt->flags |= PKT_FLAG_KEY;
789 case CODEC_TYPE_AUDIO:
790 pkt->flags |= PKT_FLAG_KEY;
798 void av_destruct_packet_nofree(AVPacket *pkt)
800 pkt->data = NULL; pkt->size = 0;
803 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
809 /* select current input stream component */
813 /* no parsing needed: we just output the packet as is */
814 /* raw data support */
816 compute_pkt_fields(s, st, NULL, pkt);
819 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
820 len = av_parser_parse(st->parser, &st->codec, &pkt->data, &pkt->size,
821 s->cur_ptr, s->cur_len,
822 s->cur_pkt.pts, s->cur_pkt.dts);
823 s->cur_pkt.pts = AV_NOPTS_VALUE;
824 s->cur_pkt.dts = AV_NOPTS_VALUE;
825 /* increment read pointer */
829 /* return packet if any */
833 pkt->stream_index = st->index;
834 pkt->pts = st->parser->pts;
835 pkt->dts = st->parser->dts;
836 pkt->destruct = av_destruct_packet_nofree;
837 compute_pkt_fields(s, st, st->parser, pkt);
842 av_free_packet(&s->cur_pkt);
846 /* read next packet */
847 ret = av_read_packet(s, &s->cur_pkt);
851 /* return the last frames, if any */
852 for(i = 0; i < s->nb_streams; i++) {
855 av_parser_parse(st->parser, &st->codec,
856 &pkt->data, &pkt->size,
858 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
863 /* no more packets: really terminates parsing */
867 st = s->streams[s->cur_pkt.stream_index];
870 s->cur_ptr = s->cur_pkt.data;
871 s->cur_len = s->cur_pkt.size;
872 if (st->need_parsing && !st->parser) {
873 st->parser = av_parser_init(st->codec.codec_id);
875 /* no parser available : just output the raw packets */
876 st->need_parsing = 0;
884 * Return the next frame of a stream. The returned packet is valid
885 * until the next av_read_frame() or until av_close_input_file() and
886 * must be freed with av_free_packet. For video, the packet contains
887 * exactly one frame. For audio, it contains an integer number of
888 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
889 * data). If the audio frames have a variable size (e.g. MPEG audio),
890 * then it contains one frame.
892 * pkt->pts, pkt->dts and pkt->duration are always set to correct
893 * values in AV_TIME_BASE unit (and guessed if the format cannot
894 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
895 * has B frames, so it is better to rely on pkt->dts if you do not
896 * decompress the payload.
898 * Return 0 if OK, < 0 if error or end of file.
900 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
904 pktl = s->packet_buffer;
906 /* read packet from packet buffer, if there is data */
908 s->packet_buffer = pktl->next;
912 return av_read_frame_internal(s, pkt);
916 /* XXX: suppress the packet queue */
917 static void flush_packet_queue(AVFormatContext *s)
922 pktl = s->packet_buffer;
925 s->packet_buffer = pktl->next;
926 av_free_packet(&pktl->pkt);
931 /*******************************************************/
934 int av_find_default_stream_index(AVFormatContext *s)
939 if (s->nb_streams <= 0)
941 for(i = 0; i < s->nb_streams; i++) {
943 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
950 /* flush the frame reader */
951 static void av_read_frame_flush(AVFormatContext *s)
956 flush_packet_queue(s);
958 /* free previous packet */
960 if (s->cur_st->parser)
961 av_free_packet(&s->cur_pkt);
968 /* for each stream, reset read state */
969 for(i = 0; i < s->nb_streams; i++) {
973 av_parser_close(st->parser);
976 st->last_IP_pts = AV_NOPTS_VALUE;
977 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
982 * updates cur_dts of all streams based on given timestamp and AVStream.
983 * stream ref_st unchanged, others set cur_dts in their native timebase
984 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
985 * @param timestamp new dts expressed in time_base of param ref_st
986 * @param ref_st reference stream giving time_base of param timestamp
988 static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
991 for(i = 0; i < s->nb_streams; i++) {
992 AVStream *st = s->streams[i];
994 st->cur_dts = av_rescale(timestamp,
995 st->time_base.den * (int64_t)ref_st->time_base.num,
996 st->time_base.num * (int64_t)ref_st->time_base.den);
1001 * add a index entry into a sorted list updateing if it is already there.
1002 * @param timestamp timestamp in the timebase of the given stream
1004 int av_add_index_entry(AVStream *st,
1005 int64_t pos, int64_t timestamp, int distance, int flags)
1007 AVIndexEntry *entries, *ie;
1010 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1013 entries = av_fast_realloc(st->index_entries,
1014 &st->index_entries_allocated_size,
1015 (st->nb_index_entries + 1) *
1016 sizeof(AVIndexEntry));
1020 st->index_entries= entries;
1022 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1025 index= st->nb_index_entries++;
1026 ie= &entries[index];
1027 assert(index==0 || ie[-1].timestamp < timestamp);
1029 ie= &entries[index];
1030 if(ie->timestamp != timestamp){
1031 if(ie->timestamp <= timestamp)
1033 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1034 st->nb_index_entries++;
1035 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1036 distance= ie->min_distance;
1040 ie->timestamp = timestamp;
1041 ie->min_distance= distance;
1047 /* build an index for raw streams using a parser */
1048 static void av_build_index_raw(AVFormatContext *s)
1050 AVPacket pkt1, *pkt = &pkt1;
1055 av_read_frame_flush(s);
1056 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1059 ret = av_read_frame(s, pkt);
1062 if (pkt->stream_index == 0 && st->parser &&
1063 (pkt->flags & PKT_FLAG_KEY)) {
1064 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1065 0, AVINDEX_KEYFRAME);
1067 av_free_packet(pkt);
1071 /* return TRUE if we deal with a raw stream (raw codec data and
1073 static int is_raw_stream(AVFormatContext *s)
1077 if (s->nb_streams != 1)
1080 if (!st->need_parsing)
1086 * gets the index for a specific timestamp.
1087 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1088 * the timestamp which is <= the requested one, if backward is 0
1089 * then it will be >=
1090 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1091 * @return < 0 if no such timestamp could be found
1093 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1096 AVIndexEntry *entries= st->index_entries;
1097 int nb_entries= st->nb_index_entries;
1106 timestamp = entries[m].timestamp;
1107 if(timestamp >= wanted_timestamp)
1109 if(timestamp <= wanted_timestamp)
1112 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1114 if(!(flags & AVSEEK_FLAG_ANY)){
1115 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1116 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1128 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1129 * this isnt supposed to be called directly by a user application, but by demuxers
1130 * @param target_ts target timestamp in the time base of the given stream
1131 * @param stream_index stream number
1133 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1134 AVInputFormat *avif= s->iformat;
1135 int64_t pos_min, pos_max, pos, pos_limit;
1136 int64_t ts_min, ts_max, ts;
1138 int index, no_change;
1141 if (stream_index < 0)
1145 av_log(s, AV_LOG_DEBUG, "read_seek: %d %lld\n", stream_index, target_ts);
1149 ts_min= AV_NOPTS_VALUE;
1150 pos_limit= -1; //gcc falsely says it may be uninitalized
1152 st= s->streams[stream_index];
1153 if(st->index_entries){
1156 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1157 index= FFMAX(index, 0);
1158 e= &st->index_entries[index];
1160 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1162 ts_min= e->timestamp;
1164 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%llx dts_min=%lld\n",
1171 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1172 assert(index < st->nb_index_entries);
1174 e= &st->index_entries[index];
1175 assert(e->timestamp >= target_ts);
1177 ts_max= e->timestamp;
1178 pos_limit= pos_max - e->min_distance;
1180 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%llx pos_limit=0x%llx dts_max=%lld\n",
1181 pos_max,pos_limit, ts_max);
1186 if(ts_min == AV_NOPTS_VALUE){
1187 pos_min = s->data_offset;
1188 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1189 if (ts_min == AV_NOPTS_VALUE)
1193 if(ts_max == AV_NOPTS_VALUE){
1195 pos_max = url_filesize(url_fileno(&s->pb)) - 1;
1198 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1200 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1201 if (ts_max == AV_NOPTS_VALUE)
1205 int64_t tmp_pos= pos_max + 1;
1206 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1207 if(tmp_ts == AV_NOPTS_VALUE)
1216 while (pos_min < pos_limit) {
1218 av_log(s, AV_LOG_DEBUG, "pos_min=0x%llx pos_max=0x%llx dts_min=%lld dts_max=%lld\n",
1222 assert(pos_limit <= pos_max);
1225 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1226 // interpolate position (better than dichotomy)
1227 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1228 + pos_min - approximate_keyframe_distance;
1229 }else if(no_change==1){
1230 // bisection, if interpolation failed to change min or max pos last time
1231 pos = (pos_min + pos_limit)>>1;
1233 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1238 else if(pos > pos_limit)
1242 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1248 av_log(s, AV_LOG_DEBUG, "%Ld %Ld %Ld / %Ld %Ld %Ld target:%Ld limit:%Ld start:%Ld noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1250 assert(ts != AV_NOPTS_VALUE);
1251 if (target_ts <= ts) {
1252 pos_limit = start_pos - 1;
1256 if (target_ts >= ts) {
1262 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1263 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1266 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1268 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1269 av_log(s, AV_LOG_DEBUG, "pos=0x%llx %lld<=%lld<=%lld\n",
1270 pos, ts_min, target_ts, ts_max);
1273 url_fseek(&s->pb, pos, SEEK_SET);
1275 av_update_cur_dts(s, st, ts);
1280 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1281 int64_t pos_min, pos_max;
1285 if (stream_index < 0)
1288 st= s->streams[stream_index];
1291 pos_min = s->data_offset;
1292 pos_max = url_filesize(url_fileno(&s->pb)) - 1;
1294 if (pos < pos_min) pos= pos_min;
1295 else if(pos > pos_max) pos= pos_max;
1297 url_fseek(&s->pb, pos, SEEK_SET);
1300 av_update_cur_dts(s, st, ts);
1305 static int av_seek_frame_generic(AVFormatContext *s,
1306 int stream_index, int64_t timestamp, int flags)
1312 if (!s->index_built) {
1313 if (is_raw_stream(s)) {
1314 av_build_index_raw(s);
1321 st = s->streams[stream_index];
1322 index = av_index_search_timestamp(st, timestamp, flags);
1326 /* now we have found the index, we can seek */
1327 ie = &st->index_entries[index];
1328 av_read_frame_flush(s);
1329 url_fseek(&s->pb, ie->pos, SEEK_SET);
1331 av_update_cur_dts(s, st, ie->timestamp);
1337 * Seek to the key frame at timestamp.
1338 * 'timestamp' in 'stream_index'.
1339 * @param stream_index If stream_index is (-1), a default
1340 * stream is selected, and timestamp is automatically converted
1341 * from AV_TIME_BASE units to the stream specific time_base.
1342 * @param timestamp timestamp in AVStream.time_base units
1343 * @param flags flags which select direction and seeking mode
1344 * @return >= 0 on success
1346 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1351 av_read_frame_flush(s);
1353 if(flags & AVSEEK_FLAG_BYTE)
1354 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1356 if(stream_index < 0){
1357 stream_index= av_find_default_stream_index(s);
1358 if(stream_index < 0)
1361 st= s->streams[stream_index];
1362 /* timestamp for default must be expressed in AV_TIME_BASE units */
1363 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1365 st= s->streams[stream_index];
1367 /* first, we try the format specific seek */
1368 if (s->iformat->read_seek)
1369 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1376 if(s->iformat->read_timestamp)
1377 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1379 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1382 /*******************************************************/
1384 /* return TRUE if the stream has accurate timings for at least one component */
1385 static int av_has_timings(AVFormatContext *ic)
1390 for(i = 0;i < ic->nb_streams; i++) {
1391 st = ic->streams[i];
1392 if (st->start_time != AV_NOPTS_VALUE &&
1393 st->duration != AV_NOPTS_VALUE)
1399 /* estimate the stream timings from the one of each components. Also
1400 compute the global bitrate if possible */
1401 static void av_update_stream_timings(AVFormatContext *ic)
1403 int64_t start_time, start_time1, end_time, end_time1;
1407 start_time = MAXINT64;
1408 end_time = MININT64;
1409 for(i = 0;i < ic->nb_streams; i++) {
1410 st = ic->streams[i];
1411 if (st->start_time != AV_NOPTS_VALUE) {
1412 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1413 if (start_time1 < start_time)
1414 start_time = start_time1;
1415 if (st->duration != AV_NOPTS_VALUE) {
1416 end_time1 = start_time1
1417 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1418 if (end_time1 > end_time)
1419 end_time = end_time1;
1423 if (start_time != MAXINT64) {
1424 ic->start_time = start_time;
1425 if (end_time != MAXINT64) {
1426 ic->duration = end_time - start_time;
1427 if (ic->file_size > 0) {
1428 /* compute the bit rate */
1429 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1430 (double)ic->duration;
1437 static void fill_all_stream_timings(AVFormatContext *ic)
1442 av_update_stream_timings(ic);
1443 for(i = 0;i < ic->nb_streams; i++) {
1444 st = ic->streams[i];
1445 if (st->start_time == AV_NOPTS_VALUE) {
1446 if(ic->start_time != AV_NOPTS_VALUE)
1447 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1448 if(ic->duration != AV_NOPTS_VALUE)
1449 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1454 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1456 int64_t filesize, duration;
1460 /* if bit_rate is already set, we believe it */
1461 if (ic->bit_rate == 0) {
1463 for(i=0;i<ic->nb_streams;i++) {
1464 st = ic->streams[i];
1465 bit_rate += st->codec.bit_rate;
1467 ic->bit_rate = bit_rate;
1470 /* if duration is already set, we believe it */
1471 if (ic->duration == AV_NOPTS_VALUE &&
1472 ic->bit_rate != 0 &&
1473 ic->file_size != 0) {
1474 filesize = ic->file_size;
1476 for(i = 0; i < ic->nb_streams; i++) {
1477 st = ic->streams[i];
1478 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1479 if (st->start_time == AV_NOPTS_VALUE ||
1480 st->duration == AV_NOPTS_VALUE) {
1482 st->duration = duration;
1489 #define DURATION_MAX_READ_SIZE 250000
1491 /* only usable for MPEG-PS streams */
1492 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1494 AVPacket pkt1, *pkt = &pkt1;
1496 int read_size, i, ret;
1497 int64_t start_time, end_time, end_time1;
1498 int64_t filesize, offset, duration;
1500 /* free previous packet */
1501 if (ic->cur_st && ic->cur_st->parser)
1502 av_free_packet(&ic->cur_pkt);
1505 /* flush packet queue */
1506 flush_packet_queue(ic);
1508 for(i=0;i<ic->nb_streams;i++) {
1509 st = ic->streams[i];
1511 av_parser_close(st->parser);
1516 /* we read the first packets to get the first PTS (not fully
1517 accurate, but it is enough now) */
1518 url_fseek(&ic->pb, 0, SEEK_SET);
1521 if (read_size >= DURATION_MAX_READ_SIZE)
1523 /* if all info is available, we can stop */
1524 for(i = 0;i < ic->nb_streams; i++) {
1525 st = ic->streams[i];
1526 if (st->start_time == AV_NOPTS_VALUE)
1529 if (i == ic->nb_streams)
1532 ret = av_read_packet(ic, pkt);
1535 read_size += pkt->size;
1536 st = ic->streams[pkt->stream_index];
1537 if (pkt->pts != AV_NOPTS_VALUE) {
1538 if (st->start_time == AV_NOPTS_VALUE)
1539 st->start_time = pkt->pts;
1541 av_free_packet(pkt);
1544 /* estimate the end time (duration) */
1545 /* XXX: may need to support wrapping */
1546 filesize = ic->file_size;
1547 offset = filesize - DURATION_MAX_READ_SIZE;
1551 url_fseek(&ic->pb, offset, SEEK_SET);
1554 if (read_size >= DURATION_MAX_READ_SIZE)
1556 /* if all info is available, we can stop */
1557 for(i = 0;i < ic->nb_streams; i++) {
1558 st = ic->streams[i];
1559 if (st->duration == AV_NOPTS_VALUE)
1562 if (i == ic->nb_streams)
1565 ret = av_read_packet(ic, pkt);
1568 read_size += pkt->size;
1569 st = ic->streams[pkt->stream_index];
1570 if (pkt->pts != AV_NOPTS_VALUE) {
1571 end_time = pkt->pts;
1572 duration = end_time - st->start_time;
1574 if (st->duration == AV_NOPTS_VALUE ||
1575 st->duration < duration)
1576 st->duration = duration;
1579 av_free_packet(pkt);
1582 fill_all_stream_timings(ic);
1584 url_fseek(&ic->pb, 0, SEEK_SET);
1587 static void av_estimate_timings(AVFormatContext *ic)
1592 /* get the file size, if possible */
1593 if (ic->iformat->flags & AVFMT_NOFILE) {
1596 h = url_fileno(&ic->pb);
1597 file_size = url_filesize(h);
1601 ic->file_size = file_size;
1603 if ((ic->iformat == &mpegps_demux || ic->iformat == &mpegts_demux) && file_size && !ic->pb.is_streamed) {
1604 /* get accurate estimate from the PTSes */
1605 av_estimate_timings_from_pts(ic);
1606 } else if (av_has_timings(ic)) {
1607 /* at least one components has timings - we use them for all
1609 fill_all_stream_timings(ic);
1611 /* less precise: use bit rate info */
1612 av_estimate_timings_from_bit_rate(ic);
1614 av_update_stream_timings(ic);
1620 for(i = 0;i < ic->nb_streams; i++) {
1621 st = ic->streams[i];
1622 printf("%d: start_time: %0.3f duration: %0.3f\n",
1623 i, (double)st->start_time / AV_TIME_BASE,
1624 (double)st->duration / AV_TIME_BASE);
1626 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1627 (double)ic->start_time / AV_TIME_BASE,
1628 (double)ic->duration / AV_TIME_BASE,
1629 ic->bit_rate / 1000);
1634 static int has_codec_parameters(AVCodecContext *enc)
1637 switch(enc->codec_type) {
1638 case CODEC_TYPE_AUDIO:
1639 val = enc->sample_rate;
1641 case CODEC_TYPE_VIDEO:
1642 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1651 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1655 int got_picture, ret;
1658 codec = avcodec_find_decoder(st->codec.codec_id);
1661 ret = avcodec_open(&st->codec, codec);
1665 if(!has_codec_parameters(&st->codec)){
1666 switch(st->codec.codec_type) {
1667 case CODEC_TYPE_VIDEO:
1668 ret = avcodec_decode_video(&st->codec, &picture,
1669 &got_picture, (uint8_t *)data, size);
1671 case CODEC_TYPE_AUDIO:
1672 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1675 ret = avcodec_decode_audio(&st->codec, samples,
1676 &got_picture, (uint8_t *)data, size);
1684 avcodec_close(&st->codec);
1688 /* absolute maximum size we read until we abort */
1689 #define MAX_READ_SIZE 5000000
1691 /* maximum duration until we stop analysing the stream */
1692 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 1.0))
1695 * Read the beginning of a media file to get stream information. This
1696 * is useful for file formats with no headers such as MPEG. This
1697 * function also compute the real frame rate in case of mpeg2 repeat
1700 * @param ic media file handle
1701 * @return >=0 if OK. AVERROR_xxx if error.
1702 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1704 int av_find_stream_info(AVFormatContext *ic)
1706 int i, count, ret, read_size;
1708 AVPacket pkt1, *pkt;
1709 AVPacketList *pktl=NULL, **ppktl;
1710 int64_t last_dts[MAX_STREAMS];
1711 int64_t best_duration[MAX_STREAMS];
1713 for(i=0;i<ic->nb_streams;i++) {
1714 st = ic->streams[i];
1715 if(st->codec.codec_type == CODEC_TYPE_VIDEO){
1716 /* if(!st->time_base.num)
1718 if(!st->codec.time_base.num)
1719 st->codec.time_base= st->time_base;
1723 for(i=0;i<MAX_STREAMS;i++){
1724 last_dts[i]= AV_NOPTS_VALUE;
1725 best_duration[i]= INT64_MAX;
1730 ppktl = &ic->packet_buffer;
1732 /* check if one codec still needs to be handled */
1733 for(i=0;i<ic->nb_streams;i++) {
1734 st = ic->streams[i];
1735 if (!has_codec_parameters(&st->codec))
1737 /* variable fps and no guess at the real fps */
1738 if( st->codec.time_base.den >= 1000LL*st->codec.time_base.num
1739 && best_duration[i]== INT64_MAX && st->codec.codec_type == CODEC_TYPE_VIDEO)
1742 if (i == ic->nb_streams) {
1743 /* NOTE: if the format has no header, then we need to read
1744 some packets to get most of the streams, so we cannot
1746 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1747 /* if we found the info for all the codecs, we can stop */
1752 /* we did not get all the codec info, but we read too much data */
1753 if (read_size >= MAX_READ_SIZE) {
1759 /* NOTE: a new stream can be added there if no header in file
1760 (AVFMTCTX_NOHEADER) */
1761 ret = av_read_frame_internal(ic, &pkt1);
1764 ret = -1; /* we could not have all the codec parameters before EOF */
1765 for(i=0;i<ic->nb_streams;i++) {
1766 st = ic->streams[i];
1767 if (!has_codec_parameters(&st->codec))
1770 if ((ic->ctx_flags & AVFMTCTX_NOHEADER) &&
1771 i == ic->nb_streams)
1776 pktl = av_mallocz(sizeof(AVPacketList));
1778 ret = AVERROR_NOMEM;
1782 /* add the packet in the buffered packet list */
1784 ppktl = &pktl->next;
1789 /* duplicate the packet */
1790 if (av_dup_packet(pkt) < 0) {
1791 ret = AVERROR_NOMEM;
1795 read_size += pkt->size;
1797 st = ic->streams[pkt->stream_index];
1798 st->codec_info_duration += pkt->duration;
1799 if (pkt->duration != 0)
1800 st->codec_info_nb_frames++;
1802 if(st->codec.codec_type == CODEC_TYPE_VIDEO){
1803 int64_t last= last_dts[pkt->stream_index];
1805 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && last < pkt->dts &&
1806 best_duration[pkt->stream_index] > pkt->dts - last){
1807 best_duration[pkt->stream_index] = pkt->dts - last;
1809 last_dts[pkt->stream_index]= pkt->dts;
1811 /* if still no information, we try to open the codec and to
1812 decompress the frame. We try to avoid that in most cases as
1813 it takes longer and uses more memory. For MPEG4, we need to
1814 decompress for Quicktime. */
1815 if (!has_codec_parameters(&st->codec) /*&&
1816 (st->codec.codec_id == CODEC_ID_FLV1 ||
1817 st->codec.codec_id == CODEC_ID_H264 ||
1818 st->codec.codec_id == CODEC_ID_H263 ||
1819 st->codec.codec_id == CODEC_ID_H261 ||
1820 st->codec.codec_id == CODEC_ID_VORBIS ||
1821 st->codec.codec_id == CODEC_ID_MJPEG ||
1822 st->codec.codec_id == CODEC_ID_PNG ||
1823 st->codec.codec_id == CODEC_ID_PAM ||
1824 st->codec.codec_id == CODEC_ID_PGM ||
1825 st->codec.codec_id == CODEC_ID_PGMYUV ||
1826 st->codec.codec_id == CODEC_ID_PBM ||
1827 st->codec.codec_id == CODEC_ID_PPM ||
1828 st->codec.codec_id == CODEC_ID_SHORTEN ||
1829 (st->codec.codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1830 try_decode_frame(st, pkt->data, pkt->size);
1832 if (st->codec_info_duration >= MAX_STREAM_DURATION) {
1838 for(i=0;i<ic->nb_streams;i++) {
1839 st = ic->streams[i];
1840 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1841 if(st->codec.codec_id == CODEC_ID_RAWVIDEO && !st->codec.codec_tag && !st->codec.bits_per_sample)
1842 st->codec.codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec.pix_fmt);
1844 if(best_duration[i] < INT64_MAX && st->codec.time_base.num*1000 <= st->codec.time_base.den){
1847 st->r_frame_rate.num= st->time_base.den;
1848 st->r_frame_rate.den= st->time_base.num*best_duration[i];
1849 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->r_frame_rate.num, st->r_frame_rate.den, 1<<15);
1851 int_fps= av_rescale(st->r_frame_rate.num, 1, st->r_frame_rate.den); // 1/0
1853 if(int_fps>0 && av_rescale(st->r_frame_rate.num, 1, int_fps) == st->r_frame_rate.den){
1854 st->r_frame_rate.num= int_fps;
1855 st->r_frame_rate.den= 1;
1859 /* set real frame rate info */
1860 /* compute the real frame rate for telecine */
1861 if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
1862 st->codec.codec_id == CODEC_ID_MPEG2VIDEO) &&
1863 st->codec.sub_id == 2) {
1864 if (st->codec_info_nb_frames >= 20) {
1865 float coded_frame_rate, est_frame_rate;
1866 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
1867 (double)st->codec_info_duration ;
1868 coded_frame_rate = 1.0/av_q2d(st->codec.time_base);
1870 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
1871 coded_frame_rate, est_frame_rate);
1873 /* if we detect that it could be a telecine, we
1874 signal it. It would be better to do it at a
1875 higher level as it can change in a film */
1876 if (coded_frame_rate >= 24.97 &&
1877 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
1878 st->r_frame_rate = (AVRational){24000, 1001};
1882 /* if no real frame rate, use the codec one */
1883 if (!st->r_frame_rate.num){
1884 st->r_frame_rate.num = st->codec.time_base.den;
1885 st->r_frame_rate.den = st->codec.time_base.num;
1890 av_estimate_timings(ic);
1892 /* correct DTS for b frame streams with no timestamps */
1893 for(i=0;i<ic->nb_streams;i++) {
1894 st = ic->streams[i];
1895 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1897 ppktl = &ic->packet_buffer;
1899 if(ppkt1->stream_index != i)
1901 if(ppkt1->pkt->dts < 0)
1903 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1905 ppkt1->pkt->dts -= delta;
1910 st->cur_dts -= delta;
1918 /*******************************************************/
1921 * start playing a network based stream (e.g. RTSP stream) at the
1924 int av_read_play(AVFormatContext *s)
1926 if (!s->iformat->read_play)
1927 return AVERROR_NOTSUPP;
1928 return s->iformat->read_play(s);
1932 * pause a network based stream (e.g. RTSP stream). Use av_read_play()
1935 int av_read_pause(AVFormatContext *s)
1937 if (!s->iformat->read_pause)
1938 return AVERROR_NOTSUPP;
1939 return s->iformat->read_pause(s);
1943 * Close a media file (but not its codecs)
1945 * @param s media file handle
1947 void av_close_input_file(AVFormatContext *s)
1949 int i, must_open_file;
1952 /* free previous packet */
1953 if (s->cur_st && s->cur_st->parser)
1954 av_free_packet(&s->cur_pkt);
1956 if (s->iformat->read_close)
1957 s->iformat->read_close(s);
1958 for(i=0;i<s->nb_streams;i++) {
1959 /* free all data in a stream component */
1962 av_parser_close(st->parser);
1964 av_free(st->index_entries);
1967 flush_packet_queue(s);
1969 if (s->iformat->flags & AVFMT_NOFILE) {
1972 if (must_open_file) {
1975 av_freep(&s->priv_data);
1980 * Add a new stream to a media file. Can only be called in the
1981 * read_header function. If the flag AVFMTCTX_NOHEADER is in the
1982 * format context, then new streams can be added in read_packet too.
1985 * @param s media file handle
1986 * @param id file format dependent stream id
1988 AVStream *av_new_stream(AVFormatContext *s, int id)
1992 if (s->nb_streams >= MAX_STREAMS)
1995 st = av_mallocz(sizeof(AVStream));
1998 avcodec_get_context_defaults(&st->codec);
2000 /* no default bitrate if decoding */
2001 st->codec.bit_rate = 0;
2003 st->index = s->nb_streams;
2005 st->start_time = AV_NOPTS_VALUE;
2006 st->duration = AV_NOPTS_VALUE;
2007 st->cur_dts = AV_NOPTS_VALUE;
2009 /* default pts settings is MPEG like */
2010 av_set_pts_info(st, 33, 1, 90000);
2011 st->last_IP_pts = AV_NOPTS_VALUE;
2013 s->streams[s->nb_streams++] = st;
2017 /************************************************************/
2018 /* output media file */
2020 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2024 if (s->oformat->priv_data_size > 0) {
2025 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2027 return AVERROR_NOMEM;
2029 s->priv_data = NULL;
2031 if (s->oformat->set_parameters) {
2032 ret = s->oformat->set_parameters(s, ap);
2040 * allocate the stream private data and write the stream header to an
2043 * @param s media file handle
2044 * @return 0 if OK. AVERROR_xxx if error.
2046 int av_write_header(AVFormatContext *s)
2051 ret = s->oformat->write_header(s);
2055 /* init PTS generation */
2056 for(i=0;i<s->nb_streams;i++) {
2059 switch (st->codec.codec_type) {
2060 case CODEC_TYPE_AUDIO:
2061 av_frac_init(&st->pts, 0, 0,
2062 (int64_t)st->time_base.num * st->codec.sample_rate);
2064 case CODEC_TYPE_VIDEO:
2065 av_frac_init(&st->pts, 0, 0,
2066 (int64_t)st->time_base.num * st->codec.time_base.den);
2075 //FIXME merge with compute_pkt_fields
2076 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2077 int b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames);
2078 int num, den, frame_size;
2080 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
2082 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2085 /* duration field */
2086 if (pkt->duration == 0) {
2087 compute_frame_duration(&num, &den, st, NULL, pkt);
2089 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2093 //XXX/FIXME this is a temporary hack until all encoders output pts
2094 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
2096 // pkt->pts= st->cur_dts;
2097 pkt->pts= st->pts.val;
2100 //calculate dts from pts
2101 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2103 if(st->last_IP_pts == AV_NOPTS_VALUE){
2104 st->last_IP_pts= -pkt->duration;
2106 if(st->last_IP_pts < pkt->pts){
2107 pkt->dts= st->last_IP_pts;
2108 st->last_IP_pts= pkt->pts;
2115 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2116 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %Ld >= %Ld\n", st->cur_dts, pkt->dts);
2119 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2120 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2124 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2125 st->cur_dts= pkt->dts;
2126 st->pts.val= pkt->dts;
2129 switch (st->codec.codec_type) {
2130 case CODEC_TYPE_AUDIO:
2131 frame_size = get_audio_frame_size(&st->codec, pkt->size);
2133 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2134 but it would be better if we had the real timestamps from the encoder */
2135 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2136 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2139 case CODEC_TYPE_VIDEO:
2140 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec.time_base.num);
2148 static void truncate_ts(AVStream *st, AVPacket *pkt){
2149 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2152 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2154 pkt->pts &= pts_mask;
2155 pkt->dts &= pts_mask;
2159 * Write a packet to an output media file. The packet shall contain
2160 * one audio or video frame.
2162 * @param s media file handle
2163 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2164 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2166 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2170 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2174 truncate_ts(s->streams[pkt->stream_index], pkt);
2176 ret= s->oformat->write_packet(s, pkt);
2178 ret= url_ferror(&s->pb);
2183 * interleave_packet implementation which will interleave per DTS.
2185 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2186 AVPacketList *pktl, **next_point, *this_pktl;
2188 int streams[MAX_STREAMS];
2191 AVStream *st= s->streams[ pkt->stream_index];
2193 assert(pkt->destruct != av_destruct_packet); //FIXME
2195 this_pktl = av_mallocz(sizeof(AVPacketList));
2196 this_pktl->pkt= *pkt;
2197 av_dup_packet(&this_pktl->pkt);
2199 next_point = &s->packet_buffer;
2201 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2202 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2203 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2204 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2206 next_point= &(*next_point)->next;
2208 this_pktl->next= *next_point;
2209 *next_point= this_pktl;
2212 memset(streams, 0, sizeof(streams));
2213 pktl= s->packet_buffer;
2215 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2216 if(streams[ pktl->pkt.stream_index ] == 0)
2218 streams[ pktl->pkt.stream_index ]++;
2222 if(s->nb_streams == stream_count || (flush && stream_count)){
2223 pktl= s->packet_buffer;
2226 s->packet_buffer= pktl->next;
2230 av_init_packet(out);
2236 * Interleaves a AVPacket correctly so it can be muxed.
2237 * @param out the interleaved packet will be output here
2238 * @param in the input packet
2239 * @param flush 1 if no further packets are available as input and all
2240 * remaining packets should be output
2241 * @return 1 if a packet was output, 0 if no packet could be output,
2242 * < 0 if an error occured
2244 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2245 if(s->oformat->interleave_packet)
2246 return s->oformat->interleave_packet(s, out, in, flush);
2248 return av_interleave_packet_per_dts(s, out, in, flush);
2252 * Writes a packet to an output media file ensuring correct interleaving.
2253 * The packet shall contain one audio or video frame.
2254 * If the packets are already correctly interleaved the application should
2255 * call av_write_frame() instead as its slightly faster, its also important
2256 * to keep in mind that completly non interleaved input will need huge amounts
2257 * of memory to interleave with this, so its prefereable to interleave at the
2260 * @param s media file handle
2261 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2262 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2264 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2265 AVStream *st= s->streams[ pkt->stream_index];
2267 //FIXME/XXX/HACK drop zero sized packets
2268 if(st->codec.codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2271 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts);
2272 if(compute_pkt_fields2(st, pkt) < 0)
2275 if(pkt->dts == AV_NOPTS_VALUE)
2280 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2281 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2284 truncate_ts(s->streams[opkt.stream_index], &opkt);
2285 ret= s->oformat->write_packet(s, &opkt);
2287 av_free_packet(&opkt);
2292 if(url_ferror(&s->pb))
2293 return url_ferror(&s->pb);
2298 * write the stream trailer to an output media file and and free the
2299 * file private data.
2301 * @param s media file handle
2302 * @return 0 if OK. AVERROR_xxx if error. */
2303 int av_write_trailer(AVFormatContext *s)
2309 ret= av_interleave_packet(s, &pkt, NULL, 1);
2310 if(ret<0) //FIXME cleanup needed for ret<0 ?
2315 truncate_ts(s->streams[pkt.stream_index], &pkt);
2316 ret= s->oformat->write_packet(s, &pkt);
2318 av_free_packet(&pkt);
2322 if(url_ferror(&s->pb))
2326 ret = s->oformat->write_trailer(s);
2329 ret=url_ferror(&s->pb);
2330 for(i=0;i<s->nb_streams;i++)
2331 av_freep(&s->streams[i]->priv_data);
2332 av_freep(&s->priv_data);
2336 /* "user interface" functions */
2338 void dump_format(AVFormatContext *ic,
2346 av_log(NULL, AV_LOG_DEBUG, "%s #%d, %s, %s '%s':\n",
2347 is_output ? "Output" : "Input",
2349 is_output ? ic->oformat->name : ic->iformat->name,
2350 is_output ? "to" : "from", url);
2352 av_log(NULL, AV_LOG_DEBUG, " Duration: ");
2353 if (ic->duration != AV_NOPTS_VALUE) {
2354 int hours, mins, secs, us;
2355 secs = ic->duration / AV_TIME_BASE;
2356 us = ic->duration % AV_TIME_BASE;
2361 av_log(NULL, AV_LOG_DEBUG, "%02d:%02d:%02d.%01d", hours, mins, secs,
2362 (10 * us) / AV_TIME_BASE);
2364 av_log(NULL, AV_LOG_DEBUG, "N/A");
2366 if (ic->start_time != AV_NOPTS_VALUE) {
2368 av_log(NULL, AV_LOG_DEBUG, ", start: ");
2369 secs = ic->start_time / AV_TIME_BASE;
2370 us = ic->start_time % AV_TIME_BASE;
2371 av_log(NULL, AV_LOG_DEBUG, "%d.%06d",
2372 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2374 av_log(NULL, AV_LOG_DEBUG, ", bitrate: ");
2376 av_log(NULL, AV_LOG_DEBUG,"%d kb/s", ic->bit_rate / 1000);
2378 av_log(NULL, AV_LOG_DEBUG, "N/A");
2380 av_log(NULL, AV_LOG_DEBUG, "\n");
2382 for(i=0;i<ic->nb_streams;i++) {
2383 AVStream *st = ic->streams[i];
2384 avcodec_string(buf, sizeof(buf), &st->codec, is_output);
2385 av_log(NULL, AV_LOG_DEBUG, " Stream #%d.%d", index, i);
2386 /* the pid is an important information, so we display it */
2387 /* XXX: add a generic system */
2389 flags = ic->oformat->flags;
2391 flags = ic->iformat->flags;
2392 if (flags & AVFMT_SHOW_IDS) {
2393 av_log(NULL, AV_LOG_DEBUG, "[0x%x]", st->id);
2395 av_log(NULL, AV_LOG_DEBUG, ": %s\n", buf);
2402 int frame_rate, frame_rate_base;
2405 static AbvEntry frame_abvs[] = {
2406 { "ntsc", 720, 480, 30000, 1001 },
2407 { "pal", 720, 576, 25, 1 },
2408 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2409 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2410 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2411 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2412 { "film", 352, 240, 24, 1 },
2413 { "ntsc-film", 352, 240, 24000, 1001 },
2414 { "sqcif", 128, 96, 0, 0 },
2415 { "qcif", 176, 144, 0, 0 },
2416 { "cif", 352, 288, 0, 0 },
2417 { "4cif", 704, 576, 0, 0 },
2420 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2423 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2425 int frame_width = 0, frame_height = 0;
2428 if (!strcmp(frame_abvs[i].abv, str)) {
2429 frame_width = frame_abvs[i].width;
2430 frame_height = frame_abvs[i].height;
2436 frame_width = strtol(p, (char **)&p, 10);
2439 frame_height = strtol(p, (char **)&p, 10);
2441 if (frame_width <= 0 || frame_height <= 0)
2443 *width_ptr = frame_width;
2444 *height_ptr = frame_height;
2448 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2453 /* First, we check our abbreviation table */
2454 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2455 if (!strcmp(frame_abvs[i].abv, arg)) {
2456 *frame_rate = frame_abvs[i].frame_rate;
2457 *frame_rate_base = frame_abvs[i].frame_rate_base;
2461 /* Then, we try to parse it as fraction */
2462 cp = strchr(arg, '/');
2464 cp = strchr(arg, ':');
2467 *frame_rate = strtol(arg, &cpp, 10);
2468 if (cpp != arg || cpp == cp)
2469 *frame_rate_base = strtol(cp+1, &cpp, 10);
2474 /* Finally we give up and parse it as double */
2475 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
2476 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5);
2478 if (!*frame_rate || !*frame_rate_base)
2485 * - If not a duration:
2486 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2487 * Time is localtime unless Z is suffixed to the end. In this case GMT
2488 * Return the date in micro seconds since 1970
2490 * HH[:MM[:SS[.m...]]]
2493 int64_t parse_date(const char *datestr, int duration)
2499 static const char *date_fmt[] = {
2503 static const char *time_fmt[] = {
2513 time_t now = time(0);
2515 len = strlen(datestr);
2517 lastch = datestr[len - 1];
2520 is_utc = (lastch == 'z' || lastch == 'Z');
2522 memset(&dt, 0, sizeof(dt));
2527 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2528 q = small_strptime(p, date_fmt[i], &dt);
2538 dt = *localtime(&now);
2540 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2545 if (*p == 'T' || *p == 't' || *p == ' ')
2548 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2549 q = small_strptime(p, time_fmt[i], &dt);
2559 q = small_strptime(p, time_fmt[0], &dt);
2561 dt.tm_sec = strtol(p, (char **)&q, 10);
2567 /* Now we have all the fields that we can get */
2572 return now * int64_t_C(1000000);
2576 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2578 dt.tm_isdst = -1; /* unknown */
2591 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2594 val += n * (*q - '0');
2598 return negative ? -t : t;
2601 /* syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. Return
2603 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2613 while (*p != '\0' && *p != '=' && *p != '&') {
2614 if ((q - tag) < sizeof(tag) - 1)
2622 while (*p != '&' && *p != '\0') {
2623 if ((q - arg) < arg_size - 1) {
2633 if (!strcmp(tag, tag1))
2642 /* Return in 'buf' the path with '%d' replaced by number. Also handles
2643 the '%0nd' format where 'n' is the total number of digits and
2644 '%%'. Return 0 if OK, and -1 if format error */
2645 int get_frame_filename(char *buf, int buf_size,
2646 const char *path, int number)
2649 char *q, buf1[20], c;
2650 int nd, len, percentd_found;
2662 while (isdigit(*p)) {
2663 nd = nd * 10 + *p++ - '0';
2666 } while (isdigit(c));
2675 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2677 if ((q - buf + len) > buf_size - 1)
2679 memcpy(q, buf1, len);
2687 if ((q - buf) < buf_size - 1)
2691 if (!percentd_found)
2701 * Print nice hexa dump of a buffer
2702 * @param f stream for output
2704 * @param size buffer size
2706 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2710 for(i=0;i<size;i+=16) {
2714 fprintf(f, "%08x ", i);
2717 fprintf(f, " %02x", buf[i+j]);
2722 for(j=0;j<len;j++) {
2724 if (c < ' ' || c > '~')
2726 fprintf(f, "%c", c);
2733 * Print on 'f' a nice dump of a packet
2734 * @param f stream for output
2735 * @param pkt packet to dump
2736 * @param dump_payload true if the payload must be displayed too
2738 //FIXME needs to know the time_base
2739 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2741 fprintf(f, "stream #%d:\n", pkt->stream_index);
2742 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2743 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2744 /* DTS is _always_ valid after av_read_frame() */
2745 fprintf(f, " dts=");
2746 if (pkt->dts == AV_NOPTS_VALUE)
2749 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2750 /* PTS may be not known if B frames are present */
2751 fprintf(f, " pts=");
2752 if (pkt->pts == AV_NOPTS_VALUE)
2755 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2757 fprintf(f, " size=%d\n", pkt->size);
2759 av_hex_dump(f, pkt->data, pkt->size);
2762 void url_split(char *proto, int proto_size,
2763 char *authorization, int authorization_size,
2764 char *hostname, int hostname_size,
2766 char *path, int path_size,
2777 while (*p != ':' && *p != '\0') {
2778 if ((q - proto) < proto_size - 1)
2784 if (authorization_size > 0)
2785 authorization[0] = '\0';
2789 if (hostname_size > 0)
2793 char *at,*slash; // PETR: position of '@' character and '/' character
2800 at = strchr(p,'@'); // PETR: get the position of '@'
2801 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2802 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2804 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2806 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2807 if (*p == '@') { // PETR: passed '@'
2808 if (authorization_size > 0)
2812 } else if (!at) { // PETR: hostname
2813 if ((q - hostname) < hostname_size - 1)
2816 if ((q - authorization) < authorization_size - 1)
2821 if (hostname_size > 0)
2825 port = strtoul(p, (char **)&p, 10);
2830 pstrcpy(path, path_size, p);
2834 * Set the pts for a given stream
2836 * @param pts_wrap_bits number of bits effectively used by the pts
2837 * (used for wrap control, 33 is the value for MPEG)
2838 * @param pts_num numerator to convert to seconds (MPEG: 1)
2839 * @param pts_den denominator to convert to seconds (MPEG: 90000)
2841 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2842 int pts_num, int pts_den)
2844 s->pts_wrap_bits = pts_wrap_bits;
2845 s->time_base.num = pts_num;
2846 s->time_base.den = pts_den;
2849 /* fraction handling */
2852 * f = val + (num / den) + 0.5. 'num' is normalized so that it is such
2853 * as 0 <= num < den.
2855 * @param f fractional number
2856 * @param val integer value
2857 * @param num must be >= 0
2858 * @param den must be >= 1
2860 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2872 /* set f to (val + 0.5) */
2873 void av_frac_set(AVFrac *f, int64_t val)
2876 f->num = f->den >> 1;
2880 * Fractionnal addition to f: f = f + (incr / f->den)
2882 * @param f fractional number
2883 * @param incr increment, can be positive or negative
2885 void av_frac_add(AVFrac *f, int64_t incr)
2889 num = f->num + incr;
2892 f->val += num / den;
2898 } else if (num >= den) {
2899 f->val += num / den;
2906 * register a new image format
2907 * @param img_fmt Image format descriptor
2909 void av_register_image_format(AVImageFormat *img_fmt)
2913 p = &first_image_format;
2914 while (*p != NULL) p = &(*p)->next;
2916 img_fmt->next = NULL;
2919 /* guess image format */
2920 AVImageFormat *av_probe_image_format(AVProbeData *pd)
2922 AVImageFormat *fmt1, *fmt;
2923 int score, score_max;
2927 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2928 if (fmt1->img_probe) {
2929 score = fmt1->img_probe(pd);
2930 if (score > score_max) {
2939 AVImageFormat *guess_image_format(const char *filename)
2941 AVImageFormat *fmt1;
2943 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2944 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
2951 * Read an image from a stream.
2952 * @param gb byte stream containing the image
2953 * @param fmt image format, NULL if probing is required
2955 int av_read_image(ByteIOContext *pb, const char *filename,
2957 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
2959 char buf[PROBE_BUF_SIZE];
2960 AVProbeData probe_data, *pd = &probe_data;
2965 pd->filename = filename;
2967 pos = url_ftell(pb);
2968 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
2969 url_fseek(pb, pos, SEEK_SET);
2970 fmt = av_probe_image_format(pd);
2973 return AVERROR_NOFMT;
2974 ret = fmt->img_read(pb, alloc_cb, opaque);
2979 * Write an image to a stream.
2980 * @param pb byte stream for the image output
2981 * @param fmt image format
2982 * @param img image data and informations
2984 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
2986 return fmt->img_write(pb, img);