2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 AVInputFormat *first_iformat = NULL;
25 AVOutputFormat *first_oformat = NULL;
26 AVImageFormat *first_image_format = NULL;
28 void av_register_input_format(AVInputFormat *format)
32 while (*p != NULL) p = &(*p)->next;
37 void av_register_output_format(AVOutputFormat *format)
41 while (*p != NULL) p = &(*p)->next;
46 int match_ext(const char *filename, const char *extensions)
54 ext = strrchr(filename, '.');
60 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
63 if (!strcasecmp(ext1, ext))
73 AVOutputFormat *guess_format(const char *short_name, const char *filename,
74 const char *mime_type)
76 AVOutputFormat *fmt, *fmt_found;
79 /* specific test for image sequences */
80 if (!short_name && filename &&
81 filename_number_test(filename) >= 0 &&
82 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
83 return guess_format("image2", NULL, NULL);
85 if (!short_name && filename &&
86 filename_number_test(filename) >= 0 &&
87 guess_image_format(filename)) {
88 return guess_format("image", NULL, NULL);
91 /* find the proper file type */
97 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
99 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
101 if (filename && fmt->extensions &&
102 match_ext(filename, fmt->extensions)) {
105 if (score > score_max) {
114 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
115 const char *mime_type)
117 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
120 AVOutputFormat *stream_fmt;
121 char stream_format_name[64];
123 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
124 stream_fmt = guess_format(stream_format_name, NULL, NULL);
134 * guesses the codec id based upon muxer and filename.
136 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
137 const char *filename, const char *mime_type, enum CodecType type){
138 if(type == CODEC_TYPE_VIDEO){
139 enum CodecID codec_id= CODEC_ID_NONE;
141 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
142 codec_id= av_guess_image2_codec(filename);
144 if(codec_id == CODEC_ID_NONE)
145 codec_id= fmt->video_codec;
147 }else if(type == CODEC_TYPE_AUDIO)
148 return fmt->audio_codec;
150 return CODEC_ID_NONE;
153 AVInputFormat *av_find_input_format(const char *short_name)
156 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
157 if (!strcmp(fmt->name, short_name))
163 /* memory handling */
166 * Default packet destructor
168 static void av_destruct_packet(AVPacket *pkt)
171 pkt->data = NULL; pkt->size = 0;
175 * Allocate the payload of a packet and intialized its fields to default values.
178 * @param size wanted payload size
179 * @return 0 if OK. AVERROR_xxx otherwise.
181 int av_new_packet(AVPacket *pkt, int size)
184 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
185 return AVERROR_NOMEM;
186 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
188 return AVERROR_NOMEM;
189 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
194 pkt->destruct = av_destruct_packet;
198 /* This is a hack - the packet memory allocation stuff is broken. The
199 packet is allocated if it was not really allocated */
200 int av_dup_packet(AVPacket *pkt)
202 if (pkt->destruct != av_destruct_packet) {
204 /* we duplicate the packet and don't forget to put the padding
206 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
207 return AVERROR_NOMEM;
208 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
210 return AVERROR_NOMEM;
212 memcpy(data, pkt->data, pkt->size);
213 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
215 pkt->destruct = av_destruct_packet;
222 int fifo_init(FifoBuffer *f, int size)
224 f->buffer = av_malloc(size);
227 f->end = f->buffer + size;
228 f->wptr = f->rptr = f->buffer;
232 void fifo_free(FifoBuffer *f)
237 int fifo_size(FifoBuffer *f, uint8_t *rptr)
244 if (f->wptr >= rptr) {
245 size = f->wptr - rptr;
247 size = (f->end - rptr) + (f->wptr - f->buffer);
252 /* get data from the fifo (return -1 if not enough data) */
253 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
262 if (f->wptr >= rptr) {
263 size = f->wptr - rptr;
265 size = (f->end - rptr) + (f->wptr - f->buffer);
270 while (buf_size > 0) {
274 memcpy(buf, rptr, len);
285 void fifo_realloc(FifoBuffer *f, unsigned int new_size){
286 unsigned int old_size= f->end - f->buffer;
288 if(old_size < new_size){
289 uint8_t *old= f->buffer;
291 f->buffer= av_realloc(f->buffer, new_size);
293 f->rptr += f->buffer - old;
294 f->wptr += f->buffer - old;
296 if(f->wptr < f->rptr){
297 memmove(f->rptr + new_size - old_size, f->rptr, f->buffer + old_size - f->rptr);
298 f->rptr += new_size - old_size;
300 f->end= f->buffer + new_size;
304 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr)
317 memcpy(wptr, buf, len);
327 /* get data from the fifo (return -1 if not enough data) */
328 int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
330 uint8_t *rptr = *rptr_ptr;
333 if (f->wptr >= rptr) {
334 size = f->wptr - rptr;
336 size = (f->end - rptr) + (f->wptr - f->buffer);
341 while (buf_size > 0) {
345 put_buffer(pb, rptr, len);
355 int filename_number_test(const char *filename)
360 return get_frame_filename(buf, sizeof(buf), filename, 1);
363 /* guess file format */
364 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
366 AVInputFormat *fmt1, *fmt;
367 int score, score_max;
371 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
372 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
375 if (fmt1->read_probe) {
376 score = fmt1->read_probe(pd);
377 } else if (fmt1->extensions) {
378 if (match_ext(pd->filename, fmt1->extensions)) {
382 if (score > score_max) {
390 /************************************************************/
391 /* input media file */
394 * open a media file from an IO stream. 'fmt' must be specified.
397 static const char* format_to_name(void* ptr)
399 AVFormatContext* fc = (AVFormatContext*) ptr;
400 if(fc->iformat) return fc->iformat->name;
401 else if(fc->oformat) return fc->oformat->name;
405 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
407 AVFormatContext *av_alloc_format_context(void)
410 ic = av_mallocz(sizeof(AVFormatContext));
412 ic->av_class = &av_format_context_class;
416 int av_open_input_stream(AVFormatContext **ic_ptr,
417 ByteIOContext *pb, const char *filename,
418 AVInputFormat *fmt, AVFormatParameters *ap)
423 ic = av_alloc_format_context();
431 ic->duration = AV_NOPTS_VALUE;
432 ic->start_time = AV_NOPTS_VALUE;
433 pstrcpy(ic->filename, sizeof(ic->filename), filename);
435 /* allocate private data */
436 if (fmt->priv_data_size > 0) {
437 ic->priv_data = av_mallocz(fmt->priv_data_size);
438 if (!ic->priv_data) {
443 ic->priv_data = NULL;
446 err = ic->iformat->read_header(ic, ap);
451 ic->data_offset = url_ftell(&ic->pb);
457 av_freep(&ic->priv_data);
464 #define PROBE_BUF_SIZE 2048
467 * Open a media file as input. The codec are not opened. Only the file
468 * header (if present) is read.
470 * @param ic_ptr the opened media file handle is put here
471 * @param filename filename to open.
472 * @param fmt if non NULL, force the file format to use
473 * @param buf_size optional buffer size (zero if default is OK)
474 * @param ap additionnal parameters needed when opening the file (NULL if default)
475 * @return 0 if OK. AVERROR_xxx otherwise.
477 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
480 AVFormatParameters *ap)
482 int err, must_open_file, file_opened;
483 uint8_t buf[PROBE_BUF_SIZE];
484 AVProbeData probe_data, *pd = &probe_data;
485 ByteIOContext pb1, *pb = &pb1;
490 pd->filename = filename;
495 /* guess format if no file can be opened */
496 fmt = av_probe_input_format(pd, 0);
499 /* do not open file if the format does not need it. XXX: specific
500 hack needed to handle RTSP/TCP */
502 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
504 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
507 if (!fmt || must_open_file) {
508 /* if no file needed do not try to open one */
509 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
515 url_setbufsize(pb, buf_size);
518 /* read probe data */
519 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
520 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
522 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
530 /* guess file format */
532 fmt = av_probe_input_format(pd, 1);
535 /* if still no format found, error */
541 /* XXX: suppress this hack for redirectors */
542 #ifdef CONFIG_NETWORK
543 if (fmt == &redir_demux) {
544 err = redir_open(ic_ptr, pb);
550 /* check filename in case of an image number is expected */
551 if (fmt->flags & AVFMT_NEEDNUMBER) {
552 if (filename_number_test(filename) < 0) {
553 err = AVERROR_NUMEXPECTED;
557 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
569 /*******************************************************/
572 * Read a transport packet from a media file. This function is
573 * absolete and should never be used. Use av_read_frame() instead.
575 * @param s media file handle
576 * @param pkt is filled
577 * @return 0 if OK. AVERROR_xxx if error.
579 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
581 return s->iformat->read_packet(s, pkt);
584 /**********************************************************/
586 /* get the number of samples of an audio frame. Return (-1) if error */
587 static int get_audio_frame_size(AVCodecContext *enc, int size)
591 if (enc->frame_size <= 1) {
592 /* specific hack for pcm codecs because no frame size is
594 switch(enc->codec_id) {
595 case CODEC_ID_PCM_S16LE:
596 case CODEC_ID_PCM_S16BE:
597 case CODEC_ID_PCM_U16LE:
598 case CODEC_ID_PCM_U16BE:
599 if (enc->channels == 0)
601 frame_size = size / (2 * enc->channels);
603 case CODEC_ID_PCM_S8:
604 case CODEC_ID_PCM_U8:
605 case CODEC_ID_PCM_MULAW:
606 case CODEC_ID_PCM_ALAW:
607 if (enc->channels == 0)
609 frame_size = size / (enc->channels);
612 /* used for example by ADPCM codecs */
613 if (enc->bit_rate == 0)
615 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
619 frame_size = enc->frame_size;
625 /* return the frame duration in seconds, return 0 if not available */
626 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
627 AVCodecParserContext *pc, AVPacket *pkt)
633 switch(st->codec.codec_type) {
634 case CODEC_TYPE_VIDEO:
635 if(st->time_base.num*1000LL > st->time_base.den){
636 *pnum = st->time_base.num;
637 *pden = st->time_base.den;
638 }else if(st->codec.time_base.num*1000LL > st->codec.time_base.den){
639 *pnum = st->codec.time_base.num;
640 *pden = st->codec.time_base.den;
641 if (pc && pc->repeat_pict) {
643 *pnum = (*pnum) * (2 + pc->repeat_pict);
647 case CODEC_TYPE_AUDIO:
648 frame_size = get_audio_frame_size(&st->codec, pkt->size);
652 *pden = st->codec.sample_rate;
659 static int is_intra_only(AVCodecContext *enc){
660 if(enc->codec_type == CODEC_TYPE_AUDIO){
662 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
663 switch(enc->codec_id){
665 case CODEC_ID_MJPEGB:
667 case CODEC_ID_RAWVIDEO:
668 case CODEC_ID_DVVIDEO:
669 case CODEC_ID_HUFFYUV:
670 case CODEC_ID_FFVHUFF:
681 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
682 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
683 int64_t delta= last_ts - mask/2;
684 return ((lsb - delta)&mask) + delta;
687 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
688 AVCodecParserContext *pc, AVPacket *pkt)
690 int num, den, presentation_delayed;
691 /* handle wrapping */
692 if(st->cur_dts != AV_NOPTS_VALUE){
693 if(pkt->pts != AV_NOPTS_VALUE)
694 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
695 if(pkt->dts != AV_NOPTS_VALUE)
696 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
699 if (pkt->duration == 0) {
700 compute_frame_duration(&num, &den, st, pc, pkt);
702 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
706 if(is_intra_only(&st->codec))
707 pkt->flags |= PKT_FLAG_KEY;
709 /* do we have a video B frame ? */
710 presentation_delayed = 0;
711 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
712 /* XXX: need has_b_frame, but cannot get it if the codec is
714 if (( st->codec.codec_id == CODEC_ID_H264
715 || st->codec.has_b_frames) &&
716 pc && pc->pict_type != FF_B_TYPE)
717 presentation_delayed = 1;
718 /* this may be redundant, but it shouldnt hurt */
719 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
720 presentation_delayed = 1;
723 if(st->cur_dts == AV_NOPTS_VALUE){
724 if(presentation_delayed) st->cur_dts = -pkt->duration;
725 else st->cur_dts = 0;
728 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
729 /* interpolate PTS and DTS if they are not present */
730 if (presentation_delayed) {
731 /* DTS = decompression time stamp */
732 /* PTS = presentation time stamp */
733 if (pkt->dts == AV_NOPTS_VALUE) {
734 /* if we know the last pts, use it */
735 if(st->last_IP_pts != AV_NOPTS_VALUE)
736 st->cur_dts = pkt->dts = st->last_IP_pts;
738 pkt->dts = st->cur_dts;
740 st->cur_dts = pkt->dts;
742 /* this is tricky: the dts must be incremented by the duration
743 of the frame we are displaying, i.e. the last I or P frame */
744 if (st->last_IP_duration == 0)
745 st->cur_dts += pkt->duration;
747 st->cur_dts += st->last_IP_duration;
748 st->last_IP_duration = pkt->duration;
749 st->last_IP_pts= pkt->pts;
750 /* cannot compute PTS if not present (we can compute it only
751 by knowing the futur */
752 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
753 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
754 int64_t old_diff= ABS(st->cur_dts - pkt->duration - pkt->pts);
755 int64_t new_diff= ABS(st->cur_dts - pkt->pts);
756 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
757 pkt->pts += pkt->duration;
758 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
762 /* presentation is not delayed : PTS and DTS are the same */
763 if (pkt->pts == AV_NOPTS_VALUE) {
764 if (pkt->dts == AV_NOPTS_VALUE) {
765 pkt->pts = st->cur_dts;
766 pkt->dts = st->cur_dts;
769 st->cur_dts = pkt->dts;
773 st->cur_dts = pkt->pts;
776 st->cur_dts += pkt->duration;
778 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
783 /* key frame computation */
784 switch(st->codec.codec_type) {
785 case CODEC_TYPE_VIDEO:
786 if (pc->pict_type == FF_I_TYPE)
787 pkt->flags |= PKT_FLAG_KEY;
789 case CODEC_TYPE_AUDIO:
790 pkt->flags |= PKT_FLAG_KEY;
798 void av_destruct_packet_nofree(AVPacket *pkt)
800 pkt->data = NULL; pkt->size = 0;
803 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
809 /* select current input stream component */
813 /* no parsing needed: we just output the packet as is */
814 /* raw data support */
816 compute_pkt_fields(s, st, NULL, pkt);
819 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
820 len = av_parser_parse(st->parser, &st->codec, &pkt->data, &pkt->size,
821 s->cur_ptr, s->cur_len,
822 s->cur_pkt.pts, s->cur_pkt.dts);
823 s->cur_pkt.pts = AV_NOPTS_VALUE;
824 s->cur_pkt.dts = AV_NOPTS_VALUE;
825 /* increment read pointer */
829 /* return packet if any */
833 pkt->stream_index = st->index;
834 pkt->pts = st->parser->pts;
835 pkt->dts = st->parser->dts;
836 pkt->destruct = av_destruct_packet_nofree;
837 compute_pkt_fields(s, st, st->parser, pkt);
842 av_free_packet(&s->cur_pkt);
846 /* read next packet */
847 ret = av_read_packet(s, &s->cur_pkt);
851 /* return the last frames, if any */
852 for(i = 0; i < s->nb_streams; i++) {
855 av_parser_parse(st->parser, &st->codec,
856 &pkt->data, &pkt->size,
858 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
863 /* no more packets: really terminates parsing */
867 st = s->streams[s->cur_pkt.stream_index];
870 s->cur_ptr = s->cur_pkt.data;
871 s->cur_len = s->cur_pkt.size;
872 if (st->need_parsing && !st->parser) {
873 st->parser = av_parser_init(st->codec.codec_id);
875 /* no parser available : just output the raw packets */
876 st->need_parsing = 0;
884 * Return the next frame of a stream. The returned packet is valid
885 * until the next av_read_frame() or until av_close_input_file() and
886 * must be freed with av_free_packet. For video, the packet contains
887 * exactly one frame. For audio, it contains an integer number of
888 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
889 * data). If the audio frames have a variable size (e.g. MPEG audio),
890 * then it contains one frame.
892 * pkt->pts, pkt->dts and pkt->duration are always set to correct
893 * values in AV_TIME_BASE unit (and guessed if the format cannot
894 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
895 * has B frames, so it is better to rely on pkt->dts if you do not
896 * decompress the payload.
898 * Return 0 if OK, < 0 if error or end of file.
900 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
904 pktl = s->packet_buffer;
906 /* read packet from packet buffer, if there is data */
908 s->packet_buffer = pktl->next;
912 return av_read_frame_internal(s, pkt);
916 /* XXX: suppress the packet queue */
917 static void flush_packet_queue(AVFormatContext *s)
922 pktl = s->packet_buffer;
925 s->packet_buffer = pktl->next;
926 av_free_packet(&pktl->pkt);
931 /*******************************************************/
934 int av_find_default_stream_index(AVFormatContext *s)
939 if (s->nb_streams <= 0)
941 for(i = 0; i < s->nb_streams; i++) {
943 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
950 /* flush the frame reader */
951 static void av_read_frame_flush(AVFormatContext *s)
956 flush_packet_queue(s);
958 /* free previous packet */
960 if (s->cur_st->parser)
961 av_free_packet(&s->cur_pkt);
968 /* for each stream, reset read state */
969 for(i = 0; i < s->nb_streams; i++) {
973 av_parser_close(st->parser);
976 st->last_IP_pts = AV_NOPTS_VALUE;
977 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
982 * updates cur_dts of all streams based on given timestamp and AVStream.
983 * stream ref_st unchanged, others set cur_dts in their native timebase
984 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
985 * @param timestamp new dts expressed in time_base of param ref_st
986 * @param ref_st reference stream giving time_base of param timestamp
988 static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
991 for(i = 0; i < s->nb_streams; i++) {
992 AVStream *st = s->streams[i];
994 st->cur_dts = av_rescale(timestamp,
995 st->time_base.den * (int64_t)ref_st->time_base.num,
996 st->time_base.num * (int64_t)ref_st->time_base.den);
1001 * add a index entry into a sorted list updateing if it is already there.
1002 * @param timestamp timestamp in the timebase of the given stream
1004 int av_add_index_entry(AVStream *st,
1005 int64_t pos, int64_t timestamp, int distance, int flags)
1007 AVIndexEntry *entries, *ie;
1010 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1013 entries = av_fast_realloc(st->index_entries,
1014 &st->index_entries_allocated_size,
1015 (st->nb_index_entries + 1) *
1016 sizeof(AVIndexEntry));
1020 st->index_entries= entries;
1022 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1025 index= st->nb_index_entries++;
1026 ie= &entries[index];
1027 assert(index==0 || ie[-1].timestamp < timestamp);
1029 ie= &entries[index];
1030 if(ie->timestamp != timestamp){
1031 if(ie->timestamp <= timestamp)
1033 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1034 st->nb_index_entries++;
1035 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1036 distance= ie->min_distance;
1040 ie->timestamp = timestamp;
1041 ie->min_distance= distance;
1047 /* build an index for raw streams using a parser */
1048 static void av_build_index_raw(AVFormatContext *s)
1050 AVPacket pkt1, *pkt = &pkt1;
1055 av_read_frame_flush(s);
1056 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1059 ret = av_read_frame(s, pkt);
1062 if (pkt->stream_index == 0 && st->parser &&
1063 (pkt->flags & PKT_FLAG_KEY)) {
1064 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1065 0, AVINDEX_KEYFRAME);
1067 av_free_packet(pkt);
1071 /* return TRUE if we deal with a raw stream (raw codec data and
1073 static int is_raw_stream(AVFormatContext *s)
1077 if (s->nb_streams != 1)
1080 if (!st->need_parsing)
1086 * gets the index for a specific timestamp.
1087 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1088 * the timestamp which is <= the requested one, if backward is 0
1089 * then it will be >=
1090 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1091 * @return < 0 if no such timestamp could be found
1093 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1096 AVIndexEntry *entries= st->index_entries;
1097 int nb_entries= st->nb_index_entries;
1106 timestamp = entries[m].timestamp;
1107 if(timestamp >= wanted_timestamp)
1109 if(timestamp <= wanted_timestamp)
1112 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1114 if(!(flags & AVSEEK_FLAG_ANY)){
1115 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1116 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1128 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1129 * this isnt supposed to be called directly by a user application, but by demuxers
1130 * @param target_ts target timestamp in the time base of the given stream
1131 * @param stream_index stream number
1133 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1134 AVInputFormat *avif= s->iformat;
1135 int64_t pos_min, pos_max, pos, pos_limit;
1136 int64_t ts_min, ts_max, ts;
1138 int index, no_change;
1141 if (stream_index < 0)
1145 av_log(s, AV_LOG_DEBUG, "read_seek: %d %lld\n", stream_index, target_ts);
1149 ts_min= AV_NOPTS_VALUE;
1150 pos_limit= -1; //gcc falsely says it may be uninitalized
1152 st= s->streams[stream_index];
1153 if(st->index_entries){
1156 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1157 index= FFMAX(index, 0);
1158 e= &st->index_entries[index];
1160 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1162 ts_min= e->timestamp;
1164 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%llx dts_min=%lld\n",
1171 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1172 assert(index < st->nb_index_entries);
1174 e= &st->index_entries[index];
1175 assert(e->timestamp >= target_ts);
1177 ts_max= e->timestamp;
1178 pos_limit= pos_max - e->min_distance;
1180 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%llx pos_limit=0x%llx dts_max=%lld\n",
1181 pos_max,pos_limit, ts_max);
1186 if(ts_min == AV_NOPTS_VALUE){
1187 pos_min = s->data_offset;
1188 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1189 if (ts_min == AV_NOPTS_VALUE)
1193 if(ts_max == AV_NOPTS_VALUE){
1195 pos_max = url_fsize(&s->pb) - 1;
1198 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1200 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1201 if (ts_max == AV_NOPTS_VALUE)
1205 int64_t tmp_pos= pos_max + 1;
1206 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1207 if(tmp_ts == AV_NOPTS_VALUE)
1216 while (pos_min < pos_limit) {
1218 av_log(s, AV_LOG_DEBUG, "pos_min=0x%llx pos_max=0x%llx dts_min=%lld dts_max=%lld\n",
1222 assert(pos_limit <= pos_max);
1225 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1226 // interpolate position (better than dichotomy)
1227 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1228 + pos_min - approximate_keyframe_distance;
1229 }else if(no_change==1){
1230 // bisection, if interpolation failed to change min or max pos last time
1231 pos = (pos_min + pos_limit)>>1;
1233 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1238 else if(pos > pos_limit)
1242 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1248 av_log(s, AV_LOG_DEBUG, "%Ld %Ld %Ld / %Ld %Ld %Ld target:%Ld limit:%Ld start:%Ld noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1250 assert(ts != AV_NOPTS_VALUE);
1251 if (target_ts <= ts) {
1252 pos_limit = start_pos - 1;
1256 if (target_ts >= ts) {
1262 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1263 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1266 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1268 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1269 av_log(s, AV_LOG_DEBUG, "pos=0x%llx %lld<=%lld<=%lld\n",
1270 pos, ts_min, target_ts, ts_max);
1273 url_fseek(&s->pb, pos, SEEK_SET);
1275 av_update_cur_dts(s, st, ts);
1280 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1281 int64_t pos_min, pos_max;
1285 if (stream_index < 0)
1288 st= s->streams[stream_index];
1291 pos_min = s->data_offset;
1292 pos_max = url_fsize(&s->pb) - 1;
1294 if (pos < pos_min) pos= pos_min;
1295 else if(pos > pos_max) pos= pos_max;
1297 url_fseek(&s->pb, pos, SEEK_SET);
1300 av_update_cur_dts(s, st, ts);
1305 static int av_seek_frame_generic(AVFormatContext *s,
1306 int stream_index, int64_t timestamp, int flags)
1312 if (!s->index_built) {
1313 if (is_raw_stream(s)) {
1314 av_build_index_raw(s);
1321 st = s->streams[stream_index];
1322 index = av_index_search_timestamp(st, timestamp, flags);
1326 /* now we have found the index, we can seek */
1327 ie = &st->index_entries[index];
1328 av_read_frame_flush(s);
1329 url_fseek(&s->pb, ie->pos, SEEK_SET);
1331 av_update_cur_dts(s, st, ie->timestamp);
1337 * Seek to the key frame at timestamp.
1338 * 'timestamp' in 'stream_index'.
1339 * @param stream_index If stream_index is (-1), a default
1340 * stream is selected, and timestamp is automatically converted
1341 * from AV_TIME_BASE units to the stream specific time_base.
1342 * @param timestamp timestamp in AVStream.time_base units
1343 * @param flags flags which select direction and seeking mode
1344 * @return >= 0 on success
1346 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1351 av_read_frame_flush(s);
1353 if(flags & AVSEEK_FLAG_BYTE)
1354 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1356 if(stream_index < 0){
1357 stream_index= av_find_default_stream_index(s);
1358 if(stream_index < 0)
1361 st= s->streams[stream_index];
1362 /* timestamp for default must be expressed in AV_TIME_BASE units */
1363 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1365 st= s->streams[stream_index];
1367 /* first, we try the format specific seek */
1368 if (s->iformat->read_seek)
1369 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1376 if(s->iformat->read_timestamp)
1377 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1379 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1382 /*******************************************************/
1384 /* return TRUE if the stream has accurate timings for at least one component */
1385 static int av_has_timings(AVFormatContext *ic)
1390 for(i = 0;i < ic->nb_streams; i++) {
1391 st = ic->streams[i];
1392 if (st->start_time != AV_NOPTS_VALUE &&
1393 st->duration != AV_NOPTS_VALUE)
1399 /* estimate the stream timings from the one of each components. Also
1400 compute the global bitrate if possible */
1401 static void av_update_stream_timings(AVFormatContext *ic)
1403 int64_t start_time, start_time1, end_time, end_time1;
1407 start_time = MAXINT64;
1408 end_time = MININT64;
1409 for(i = 0;i < ic->nb_streams; i++) {
1410 st = ic->streams[i];
1411 if (st->start_time != AV_NOPTS_VALUE) {
1412 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1413 if (start_time1 < start_time)
1414 start_time = start_time1;
1415 if (st->duration != AV_NOPTS_VALUE) {
1416 end_time1 = start_time1
1417 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1418 if (end_time1 > end_time)
1419 end_time = end_time1;
1423 if (start_time != MAXINT64) {
1424 ic->start_time = start_time;
1425 if (end_time != MAXINT64) {
1426 ic->duration = end_time - start_time;
1427 if (ic->file_size > 0) {
1428 /* compute the bit rate */
1429 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1430 (double)ic->duration;
1437 static void fill_all_stream_timings(AVFormatContext *ic)
1442 av_update_stream_timings(ic);
1443 for(i = 0;i < ic->nb_streams; i++) {
1444 st = ic->streams[i];
1445 if (st->start_time == AV_NOPTS_VALUE) {
1446 if(ic->start_time != AV_NOPTS_VALUE)
1447 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1448 if(ic->duration != AV_NOPTS_VALUE)
1449 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1454 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1456 int64_t filesize, duration;
1460 /* if bit_rate is already set, we believe it */
1461 if (ic->bit_rate == 0) {
1463 for(i=0;i<ic->nb_streams;i++) {
1464 st = ic->streams[i];
1465 bit_rate += st->codec.bit_rate;
1467 ic->bit_rate = bit_rate;
1470 /* if duration is already set, we believe it */
1471 if (ic->duration == AV_NOPTS_VALUE &&
1472 ic->bit_rate != 0 &&
1473 ic->file_size != 0) {
1474 filesize = ic->file_size;
1476 for(i = 0; i < ic->nb_streams; i++) {
1477 st = ic->streams[i];
1478 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1479 if (st->start_time == AV_NOPTS_VALUE ||
1480 st->duration == AV_NOPTS_VALUE) {
1482 st->duration = duration;
1489 #define DURATION_MAX_READ_SIZE 250000
1491 /* only usable for MPEG-PS streams */
1492 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1494 AVPacket pkt1, *pkt = &pkt1;
1496 int read_size, i, ret;
1497 int64_t start_time, end_time, end_time1;
1498 int64_t filesize, offset, duration;
1500 /* free previous packet */
1501 if (ic->cur_st && ic->cur_st->parser)
1502 av_free_packet(&ic->cur_pkt);
1505 /* flush packet queue */
1506 flush_packet_queue(ic);
1508 for(i=0;i<ic->nb_streams;i++) {
1509 st = ic->streams[i];
1511 av_parser_close(st->parser);
1516 /* we read the first packets to get the first PTS (not fully
1517 accurate, but it is enough now) */
1518 url_fseek(&ic->pb, 0, SEEK_SET);
1521 if (read_size >= DURATION_MAX_READ_SIZE)
1523 /* if all info is available, we can stop */
1524 for(i = 0;i < ic->nb_streams; i++) {
1525 st = ic->streams[i];
1526 if (st->start_time == AV_NOPTS_VALUE)
1529 if (i == ic->nb_streams)
1532 ret = av_read_packet(ic, pkt);
1535 read_size += pkt->size;
1536 st = ic->streams[pkt->stream_index];
1537 if (pkt->pts != AV_NOPTS_VALUE) {
1538 if (st->start_time == AV_NOPTS_VALUE)
1539 st->start_time = pkt->pts;
1541 av_free_packet(pkt);
1544 /* estimate the end time (duration) */
1545 /* XXX: may need to support wrapping */
1546 filesize = ic->file_size;
1547 offset = filesize - DURATION_MAX_READ_SIZE;
1551 url_fseek(&ic->pb, offset, SEEK_SET);
1554 if (read_size >= DURATION_MAX_READ_SIZE)
1556 /* if all info is available, we can stop */
1557 for(i = 0;i < ic->nb_streams; i++) {
1558 st = ic->streams[i];
1559 if (st->duration == AV_NOPTS_VALUE)
1562 if (i == ic->nb_streams)
1565 ret = av_read_packet(ic, pkt);
1568 read_size += pkt->size;
1569 st = ic->streams[pkt->stream_index];
1570 if (pkt->pts != AV_NOPTS_VALUE) {
1571 end_time = pkt->pts;
1572 duration = end_time - st->start_time;
1574 if (st->duration == AV_NOPTS_VALUE ||
1575 st->duration < duration)
1576 st->duration = duration;
1579 av_free_packet(pkt);
1582 fill_all_stream_timings(ic);
1584 url_fseek(&ic->pb, 0, SEEK_SET);
1587 static void av_estimate_timings(AVFormatContext *ic)
1591 /* get the file size, if possible */
1592 if (ic->iformat->flags & AVFMT_NOFILE) {
1595 file_size = url_fsize(&ic->pb);
1599 ic->file_size = file_size;
1601 if ((ic->iformat == &mpegps_demux || ic->iformat == &mpegts_demux) && file_size && !ic->pb.is_streamed) {
1602 /* get accurate estimate from the PTSes */
1603 av_estimate_timings_from_pts(ic);
1604 } else if (av_has_timings(ic)) {
1605 /* at least one components has timings - we use them for all
1607 fill_all_stream_timings(ic);
1609 /* less precise: use bit rate info */
1610 av_estimate_timings_from_bit_rate(ic);
1612 av_update_stream_timings(ic);
1618 for(i = 0;i < ic->nb_streams; i++) {
1619 st = ic->streams[i];
1620 printf("%d: start_time: %0.3f duration: %0.3f\n",
1621 i, (double)st->start_time / AV_TIME_BASE,
1622 (double)st->duration / AV_TIME_BASE);
1624 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1625 (double)ic->start_time / AV_TIME_BASE,
1626 (double)ic->duration / AV_TIME_BASE,
1627 ic->bit_rate / 1000);
1632 static int has_codec_parameters(AVCodecContext *enc)
1635 switch(enc->codec_type) {
1636 case CODEC_TYPE_AUDIO:
1637 val = enc->sample_rate;
1639 case CODEC_TYPE_VIDEO:
1640 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1649 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1653 int got_picture, ret;
1656 codec = avcodec_find_decoder(st->codec.codec_id);
1659 ret = avcodec_open(&st->codec, codec);
1663 if(!has_codec_parameters(&st->codec)){
1664 switch(st->codec.codec_type) {
1665 case CODEC_TYPE_VIDEO:
1666 ret = avcodec_decode_video(&st->codec, &picture,
1667 &got_picture, (uint8_t *)data, size);
1669 case CODEC_TYPE_AUDIO:
1670 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1673 ret = avcodec_decode_audio(&st->codec, samples,
1674 &got_picture, (uint8_t *)data, size);
1682 avcodec_close(&st->codec);
1686 /* absolute maximum size we read until we abort */
1687 #define MAX_READ_SIZE 5000000
1689 /* maximum duration until we stop analysing the stream */
1690 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 1.0))
1693 * Read the beginning of a media file to get stream information. This
1694 * is useful for file formats with no headers such as MPEG. This
1695 * function also compute the real frame rate in case of mpeg2 repeat
1698 * @param ic media file handle
1699 * @return >=0 if OK. AVERROR_xxx if error.
1700 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1702 int av_find_stream_info(AVFormatContext *ic)
1704 int i, count, ret, read_size;
1706 AVPacket pkt1, *pkt;
1707 AVPacketList *pktl=NULL, **ppktl;
1708 int64_t last_dts[MAX_STREAMS];
1709 int64_t duration_sum[MAX_STREAMS];
1710 int duration_count[MAX_STREAMS]={0};
1712 for(i=0;i<ic->nb_streams;i++) {
1713 st = ic->streams[i];
1714 if(st->codec.codec_type == CODEC_TYPE_VIDEO){
1715 /* if(!st->time_base.num)
1717 if(!st->codec.time_base.num)
1718 st->codec.time_base= st->time_base;
1722 for(i=0;i<MAX_STREAMS;i++){
1723 last_dts[i]= AV_NOPTS_VALUE;
1724 duration_sum[i]= INT64_MAX;
1729 ppktl = &ic->packet_buffer;
1731 /* check if one codec still needs to be handled */
1732 for(i=0;i<ic->nb_streams;i++) {
1733 st = ic->streams[i];
1734 if (!has_codec_parameters(&st->codec))
1736 /* variable fps and no guess at the real fps */
1737 if( st->codec.time_base.den >= 1000LL*st->codec.time_base.num
1738 && duration_count[i]<20 && st->codec.codec_type == CODEC_TYPE_VIDEO)
1741 if (i == ic->nb_streams) {
1742 /* NOTE: if the format has no header, then we need to read
1743 some packets to get most of the streams, so we cannot
1745 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1746 /* if we found the info for all the codecs, we can stop */
1751 /* we did not get all the codec info, but we read too much data */
1752 if (read_size >= MAX_READ_SIZE) {
1758 /* NOTE: a new stream can be added there if no header in file
1759 (AVFMTCTX_NOHEADER) */
1760 ret = av_read_frame_internal(ic, &pkt1);
1763 ret = -1; /* we could not have all the codec parameters before EOF */
1764 for(i=0;i<ic->nb_streams;i++) {
1765 st = ic->streams[i];
1766 if (!has_codec_parameters(&st->codec))
1769 if (i == ic->nb_streams)
1774 pktl = av_mallocz(sizeof(AVPacketList));
1776 ret = AVERROR_NOMEM;
1780 /* add the packet in the buffered packet list */
1782 ppktl = &pktl->next;
1787 /* duplicate the packet */
1788 if (av_dup_packet(pkt) < 0) {
1789 ret = AVERROR_NOMEM;
1793 read_size += pkt->size;
1795 st = ic->streams[pkt->stream_index];
1796 st->codec_info_duration += pkt->duration;
1797 if (pkt->duration != 0)
1798 st->codec_info_nb_frames++;
1800 if(st->codec.codec_type == CODEC_TYPE_VIDEO){
1801 int index= pkt->stream_index;
1802 int64_t last= last_dts[index];
1803 int64_t duration= pkt->dts - last;
1805 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1806 if(duration*duration_count[index]*10/9 < duration_sum[index]){
1807 duration_sum[index]= duration;
1808 duration_count[index]=1;
1810 int factor= av_rescale(duration, duration_count[index], duration_sum[index]);
1811 duration_sum[index] += duration;
1812 duration_count[index]+= factor;
1815 last_dts[pkt->stream_index]= pkt->dts;
1817 /* if still no information, we try to open the codec and to
1818 decompress the frame. We try to avoid that in most cases as
1819 it takes longer and uses more memory. For MPEG4, we need to
1820 decompress for Quicktime. */
1821 if (!has_codec_parameters(&st->codec) /*&&
1822 (st->codec.codec_id == CODEC_ID_FLV1 ||
1823 st->codec.codec_id == CODEC_ID_H264 ||
1824 st->codec.codec_id == CODEC_ID_H263 ||
1825 st->codec.codec_id == CODEC_ID_H261 ||
1826 st->codec.codec_id == CODEC_ID_VORBIS ||
1827 st->codec.codec_id == CODEC_ID_MJPEG ||
1828 st->codec.codec_id == CODEC_ID_PNG ||
1829 st->codec.codec_id == CODEC_ID_PAM ||
1830 st->codec.codec_id == CODEC_ID_PGM ||
1831 st->codec.codec_id == CODEC_ID_PGMYUV ||
1832 st->codec.codec_id == CODEC_ID_PBM ||
1833 st->codec.codec_id == CODEC_ID_PPM ||
1834 st->codec.codec_id == CODEC_ID_SHORTEN ||
1835 (st->codec.codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1836 try_decode_frame(st, pkt->data, pkt->size);
1838 if (st->codec_info_duration >= MAX_STREAM_DURATION) {
1844 for(i=0;i<ic->nb_streams;i++) {
1845 st = ic->streams[i];
1846 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1847 if(st->codec.codec_id == CODEC_ID_RAWVIDEO && !st->codec.codec_tag && !st->codec.bits_per_sample)
1848 st->codec.codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec.pix_fmt);
1850 if(duration_count[i] && st->codec.time_base.num*1000LL <= st->codec.time_base.den &&
1851 st->time_base.num*duration_sum[i]/duration_count[i]*1000LL > st->time_base.den){
1855 num= st->time_base.den*duration_count[i];
1856 den= st->time_base.num*duration_sum[i];
1858 av_reduce(&fps1.num, &fps1.den, num*1001, den*1000, FFMAX(st->time_base.den, st->time_base.num)/4);
1859 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, den, FFMAX(st->time_base.den, st->time_base.num)/4);
1860 if(fps1.num < st->r_frame_rate.num && fps1.den == 1 && (fps1.num==24 || fps1.num==30)){ //FIXME better decission
1861 st->r_frame_rate.num= fps1.num*1000;
1862 st->r_frame_rate.den= fps1.den*1001;
1866 /* set real frame rate info */
1867 /* compute the real frame rate for telecine */
1868 if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
1869 st->codec.codec_id == CODEC_ID_MPEG2VIDEO) &&
1870 st->codec.sub_id == 2) {
1871 if (st->codec_info_nb_frames >= 20) {
1872 float coded_frame_rate, est_frame_rate;
1873 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
1874 (double)st->codec_info_duration ;
1875 coded_frame_rate = 1.0/av_q2d(st->codec.time_base);
1877 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
1878 coded_frame_rate, est_frame_rate);
1880 /* if we detect that it could be a telecine, we
1881 signal it. It would be better to do it at a
1882 higher level as it can change in a film */
1883 if (coded_frame_rate >= 24.97 &&
1884 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
1885 st->r_frame_rate = (AVRational){24000, 1001};
1889 /* if no real frame rate, use the codec one */
1890 if (!st->r_frame_rate.num){
1891 st->r_frame_rate.num = st->codec.time_base.den;
1892 st->r_frame_rate.den = st->codec.time_base.num;
1897 av_estimate_timings(ic);
1899 /* correct DTS for b frame streams with no timestamps */
1900 for(i=0;i<ic->nb_streams;i++) {
1901 st = ic->streams[i];
1902 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1904 ppktl = &ic->packet_buffer;
1906 if(ppkt1->stream_index != i)
1908 if(ppkt1->pkt->dts < 0)
1910 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1912 ppkt1->pkt->dts -= delta;
1917 st->cur_dts -= delta;
1925 /*******************************************************/
1928 * start playing a network based stream (e.g. RTSP stream) at the
1931 int av_read_play(AVFormatContext *s)
1933 if (!s->iformat->read_play)
1934 return AVERROR_NOTSUPP;
1935 return s->iformat->read_play(s);
1939 * pause a network based stream (e.g. RTSP stream). Use av_read_play()
1942 int av_read_pause(AVFormatContext *s)
1944 if (!s->iformat->read_pause)
1945 return AVERROR_NOTSUPP;
1946 return s->iformat->read_pause(s);
1950 * Close a media file (but not its codecs)
1952 * @param s media file handle
1954 void av_close_input_file(AVFormatContext *s)
1956 int i, must_open_file;
1959 /* free previous packet */
1960 if (s->cur_st && s->cur_st->parser)
1961 av_free_packet(&s->cur_pkt);
1963 if (s->iformat->read_close)
1964 s->iformat->read_close(s);
1965 for(i=0;i<s->nb_streams;i++) {
1966 /* free all data in a stream component */
1969 av_parser_close(st->parser);
1971 av_free(st->index_entries);
1974 flush_packet_queue(s);
1976 if (s->iformat->flags & AVFMT_NOFILE) {
1979 if (must_open_file) {
1982 av_freep(&s->priv_data);
1987 * Add a new stream to a media file. Can only be called in the
1988 * read_header function. If the flag AVFMTCTX_NOHEADER is in the
1989 * format context, then new streams can be added in read_packet too.
1992 * @param s media file handle
1993 * @param id file format dependent stream id
1995 AVStream *av_new_stream(AVFormatContext *s, int id)
1999 if (s->nb_streams >= MAX_STREAMS)
2002 st = av_mallocz(sizeof(AVStream));
2005 avcodec_get_context_defaults(&st->codec);
2007 /* no default bitrate if decoding */
2008 st->codec.bit_rate = 0;
2010 st->index = s->nb_streams;
2012 st->start_time = AV_NOPTS_VALUE;
2013 st->duration = AV_NOPTS_VALUE;
2014 st->cur_dts = AV_NOPTS_VALUE;
2016 /* default pts settings is MPEG like */
2017 av_set_pts_info(st, 33, 1, 90000);
2018 st->last_IP_pts = AV_NOPTS_VALUE;
2020 s->streams[s->nb_streams++] = st;
2024 /************************************************************/
2025 /* output media file */
2027 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2031 if (s->oformat->priv_data_size > 0) {
2032 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2034 return AVERROR_NOMEM;
2036 s->priv_data = NULL;
2038 if (s->oformat->set_parameters) {
2039 ret = s->oformat->set_parameters(s, ap);
2047 * allocate the stream private data and write the stream header to an
2050 * @param s media file handle
2051 * @return 0 if OK. AVERROR_xxx if error.
2053 int av_write_header(AVFormatContext *s)
2058 ret = s->oformat->write_header(s);
2062 /* init PTS generation */
2063 for(i=0;i<s->nb_streams;i++) {
2066 switch (st->codec.codec_type) {
2067 case CODEC_TYPE_AUDIO:
2068 av_frac_init(&st->pts, 0, 0,
2069 (int64_t)st->time_base.num * st->codec.sample_rate);
2071 case CODEC_TYPE_VIDEO:
2072 av_frac_init(&st->pts, 0, 0,
2073 (int64_t)st->time_base.num * st->codec.time_base.den);
2082 //FIXME merge with compute_pkt_fields
2083 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2084 int b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames);
2085 int num, den, frame_size;
2087 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
2089 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2092 /* duration field */
2093 if (pkt->duration == 0) {
2094 compute_frame_duration(&num, &den, st, NULL, pkt);
2096 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2100 //XXX/FIXME this is a temporary hack until all encoders output pts
2101 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
2103 // pkt->pts= st->cur_dts;
2104 pkt->pts= st->pts.val;
2107 //calculate dts from pts
2108 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2110 if(st->last_IP_pts == AV_NOPTS_VALUE){
2111 st->last_IP_pts= -pkt->duration;
2113 if(st->last_IP_pts < pkt->pts){
2114 pkt->dts= st->last_IP_pts;
2115 st->last_IP_pts= pkt->pts;
2122 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2123 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %Ld >= %Ld\n", st->cur_dts, pkt->dts);
2126 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2127 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2131 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2132 st->cur_dts= pkt->dts;
2133 st->pts.val= pkt->dts;
2136 switch (st->codec.codec_type) {
2137 case CODEC_TYPE_AUDIO:
2138 frame_size = get_audio_frame_size(&st->codec, pkt->size);
2140 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2141 but it would be better if we had the real timestamps from the encoder */
2142 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2143 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2146 case CODEC_TYPE_VIDEO:
2147 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec.time_base.num);
2155 static void truncate_ts(AVStream *st, AVPacket *pkt){
2156 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2159 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2161 pkt->pts &= pts_mask;
2162 pkt->dts &= pts_mask;
2166 * Write a packet to an output media file. The packet shall contain
2167 * one audio or video frame.
2169 * @param s media file handle
2170 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2171 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2173 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2177 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2181 truncate_ts(s->streams[pkt->stream_index], pkt);
2183 ret= s->oformat->write_packet(s, pkt);
2185 ret= url_ferror(&s->pb);
2190 * interleave_packet implementation which will interleave per DTS.
2192 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2193 AVPacketList *pktl, **next_point, *this_pktl;
2195 int streams[MAX_STREAMS];
2198 AVStream *st= s->streams[ pkt->stream_index];
2200 assert(pkt->destruct != av_destruct_packet); //FIXME
2202 this_pktl = av_mallocz(sizeof(AVPacketList));
2203 this_pktl->pkt= *pkt;
2204 av_dup_packet(&this_pktl->pkt);
2206 next_point = &s->packet_buffer;
2208 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2209 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2210 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2211 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2213 next_point= &(*next_point)->next;
2215 this_pktl->next= *next_point;
2216 *next_point= this_pktl;
2219 memset(streams, 0, sizeof(streams));
2220 pktl= s->packet_buffer;
2222 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2223 if(streams[ pktl->pkt.stream_index ] == 0)
2225 streams[ pktl->pkt.stream_index ]++;
2229 if(s->nb_streams == stream_count || (flush && stream_count)){
2230 pktl= s->packet_buffer;
2233 s->packet_buffer= pktl->next;
2237 av_init_packet(out);
2243 * Interleaves a AVPacket correctly so it can be muxed.
2244 * @param out the interleaved packet will be output here
2245 * @param in the input packet
2246 * @param flush 1 if no further packets are available as input and all
2247 * remaining packets should be output
2248 * @return 1 if a packet was output, 0 if no packet could be output,
2249 * < 0 if an error occured
2251 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2252 if(s->oformat->interleave_packet)
2253 return s->oformat->interleave_packet(s, out, in, flush);
2255 return av_interleave_packet_per_dts(s, out, in, flush);
2259 * Writes a packet to an output media file ensuring correct interleaving.
2260 * The packet shall contain one audio or video frame.
2261 * If the packets are already correctly interleaved the application should
2262 * call av_write_frame() instead as its slightly faster, its also important
2263 * to keep in mind that completly non interleaved input will need huge amounts
2264 * of memory to interleave with this, so its prefereable to interleave at the
2267 * @param s media file handle
2268 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2269 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2271 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2272 AVStream *st= s->streams[ pkt->stream_index];
2274 //FIXME/XXX/HACK drop zero sized packets
2275 if(st->codec.codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2278 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts);
2279 if(compute_pkt_fields2(st, pkt) < 0)
2282 if(pkt->dts == AV_NOPTS_VALUE)
2287 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2288 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2291 truncate_ts(s->streams[opkt.stream_index], &opkt);
2292 ret= s->oformat->write_packet(s, &opkt);
2294 av_free_packet(&opkt);
2299 if(url_ferror(&s->pb))
2300 return url_ferror(&s->pb);
2305 * write the stream trailer to an output media file and and free the
2306 * file private data.
2308 * @param s media file handle
2309 * @return 0 if OK. AVERROR_xxx if error. */
2310 int av_write_trailer(AVFormatContext *s)
2316 ret= av_interleave_packet(s, &pkt, NULL, 1);
2317 if(ret<0) //FIXME cleanup needed for ret<0 ?
2322 truncate_ts(s->streams[pkt.stream_index], &pkt);
2323 ret= s->oformat->write_packet(s, &pkt);
2325 av_free_packet(&pkt);
2329 if(url_ferror(&s->pb))
2333 ret = s->oformat->write_trailer(s);
2336 ret=url_ferror(&s->pb);
2337 for(i=0;i<s->nb_streams;i++)
2338 av_freep(&s->streams[i]->priv_data);
2339 av_freep(&s->priv_data);
2343 /* "user interface" functions */
2345 void dump_format(AVFormatContext *ic,
2353 av_log(NULL, AV_LOG_DEBUG, "%s #%d, %s, %s '%s':\n",
2354 is_output ? "Output" : "Input",
2356 is_output ? ic->oformat->name : ic->iformat->name,
2357 is_output ? "to" : "from", url);
2359 av_log(NULL, AV_LOG_DEBUG, " Duration: ");
2360 if (ic->duration != AV_NOPTS_VALUE) {
2361 int hours, mins, secs, us;
2362 secs = ic->duration / AV_TIME_BASE;
2363 us = ic->duration % AV_TIME_BASE;
2368 av_log(NULL, AV_LOG_DEBUG, "%02d:%02d:%02d.%01d", hours, mins, secs,
2369 (10 * us) / AV_TIME_BASE);
2371 av_log(NULL, AV_LOG_DEBUG, "N/A");
2373 if (ic->start_time != AV_NOPTS_VALUE) {
2375 av_log(NULL, AV_LOG_DEBUG, ", start: ");
2376 secs = ic->start_time / AV_TIME_BASE;
2377 us = ic->start_time % AV_TIME_BASE;
2378 av_log(NULL, AV_LOG_DEBUG, "%d.%06d",
2379 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2381 av_log(NULL, AV_LOG_DEBUG, ", bitrate: ");
2383 av_log(NULL, AV_LOG_DEBUG,"%d kb/s", ic->bit_rate / 1000);
2385 av_log(NULL, AV_LOG_DEBUG, "N/A");
2387 av_log(NULL, AV_LOG_DEBUG, "\n");
2389 for(i=0;i<ic->nb_streams;i++) {
2390 AVStream *st = ic->streams[i];
2391 avcodec_string(buf, sizeof(buf), &st->codec, is_output);
2392 av_log(NULL, AV_LOG_DEBUG, " Stream #%d.%d", index, i);
2393 /* the pid is an important information, so we display it */
2394 /* XXX: add a generic system */
2396 flags = ic->oformat->flags;
2398 flags = ic->iformat->flags;
2399 if (flags & AVFMT_SHOW_IDS) {
2400 av_log(NULL, AV_LOG_DEBUG, "[0x%x]", st->id);
2402 av_log(NULL, AV_LOG_DEBUG, ": %s\n", buf);
2409 int frame_rate, frame_rate_base;
2412 static AbvEntry frame_abvs[] = {
2413 { "ntsc", 720, 480, 30000, 1001 },
2414 { "pal", 720, 576, 25, 1 },
2415 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2416 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2417 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2418 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2419 { "film", 352, 240, 24, 1 },
2420 { "ntsc-film", 352, 240, 24000, 1001 },
2421 { "sqcif", 128, 96, 0, 0 },
2422 { "qcif", 176, 144, 0, 0 },
2423 { "cif", 352, 288, 0, 0 },
2424 { "4cif", 704, 576, 0, 0 },
2427 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2430 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2432 int frame_width = 0, frame_height = 0;
2435 if (!strcmp(frame_abvs[i].abv, str)) {
2436 frame_width = frame_abvs[i].width;
2437 frame_height = frame_abvs[i].height;
2443 frame_width = strtol(p, (char **)&p, 10);
2446 frame_height = strtol(p, (char **)&p, 10);
2448 if (frame_width <= 0 || frame_height <= 0)
2450 *width_ptr = frame_width;
2451 *height_ptr = frame_height;
2455 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2460 /* First, we check our abbreviation table */
2461 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2462 if (!strcmp(frame_abvs[i].abv, arg)) {
2463 *frame_rate = frame_abvs[i].frame_rate;
2464 *frame_rate_base = frame_abvs[i].frame_rate_base;
2468 /* Then, we try to parse it as fraction */
2469 cp = strchr(arg, '/');
2471 cp = strchr(arg, ':');
2474 *frame_rate = strtol(arg, &cpp, 10);
2475 if (cpp != arg || cpp == cp)
2476 *frame_rate_base = strtol(cp+1, &cpp, 10);
2481 /* Finally we give up and parse it as double */
2482 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
2483 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5);
2485 if (!*frame_rate || !*frame_rate_base)
2492 * - If not a duration:
2493 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2494 * Time is localtime unless Z is suffixed to the end. In this case GMT
2495 * Return the date in micro seconds since 1970
2497 * HH[:MM[:SS[.m...]]]
2500 int64_t parse_date(const char *datestr, int duration)
2506 static const char *date_fmt[] = {
2510 static const char *time_fmt[] = {
2520 time_t now = time(0);
2522 len = strlen(datestr);
2524 lastch = datestr[len - 1];
2527 is_utc = (lastch == 'z' || lastch == 'Z');
2529 memset(&dt, 0, sizeof(dt));
2534 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2535 q = small_strptime(p, date_fmt[i], &dt);
2545 dt = *localtime(&now);
2547 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2552 if (*p == 'T' || *p == 't' || *p == ' ')
2555 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2556 q = small_strptime(p, time_fmt[i], &dt);
2566 q = small_strptime(p, time_fmt[0], &dt);
2568 dt.tm_sec = strtol(p, (char **)&q, 10);
2574 /* Now we have all the fields that we can get */
2579 return now * int64_t_C(1000000);
2583 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2585 dt.tm_isdst = -1; /* unknown */
2598 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2601 val += n * (*q - '0');
2605 return negative ? -t : t;
2608 /* syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. Return
2610 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2620 while (*p != '\0' && *p != '=' && *p != '&') {
2621 if ((q - tag) < sizeof(tag) - 1)
2629 while (*p != '&' && *p != '\0') {
2630 if ((q - arg) < arg_size - 1) {
2640 if (!strcmp(tag, tag1))
2649 /* Return in 'buf' the path with '%d' replaced by number. Also handles
2650 the '%0nd' format where 'n' is the total number of digits and
2651 '%%'. Return 0 if OK, and -1 if format error */
2652 int get_frame_filename(char *buf, int buf_size,
2653 const char *path, int number)
2656 char *q, buf1[20], c;
2657 int nd, len, percentd_found;
2669 while (isdigit(*p)) {
2670 nd = nd * 10 + *p++ - '0';
2673 } while (isdigit(c));
2682 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2684 if ((q - buf + len) > buf_size - 1)
2686 memcpy(q, buf1, len);
2694 if ((q - buf) < buf_size - 1)
2698 if (!percentd_found)
2708 * Print nice hexa dump of a buffer
2709 * @param f stream for output
2711 * @param size buffer size
2713 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2717 for(i=0;i<size;i+=16) {
2721 fprintf(f, "%08x ", i);
2724 fprintf(f, " %02x", buf[i+j]);
2729 for(j=0;j<len;j++) {
2731 if (c < ' ' || c > '~')
2733 fprintf(f, "%c", c);
2740 * Print on 'f' a nice dump of a packet
2741 * @param f stream for output
2742 * @param pkt packet to dump
2743 * @param dump_payload true if the payload must be displayed too
2745 //FIXME needs to know the time_base
2746 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2748 fprintf(f, "stream #%d:\n", pkt->stream_index);
2749 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2750 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2751 /* DTS is _always_ valid after av_read_frame() */
2752 fprintf(f, " dts=");
2753 if (pkt->dts == AV_NOPTS_VALUE)
2756 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2757 /* PTS may be not known if B frames are present */
2758 fprintf(f, " pts=");
2759 if (pkt->pts == AV_NOPTS_VALUE)
2762 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2764 fprintf(f, " size=%d\n", pkt->size);
2766 av_hex_dump(f, pkt->data, pkt->size);
2769 void url_split(char *proto, int proto_size,
2770 char *authorization, int authorization_size,
2771 char *hostname, int hostname_size,
2773 char *path, int path_size,
2784 while (*p != ':' && *p != '\0') {
2785 if ((q - proto) < proto_size - 1)
2791 if (authorization_size > 0)
2792 authorization[0] = '\0';
2796 if (hostname_size > 0)
2800 char *at,*slash; // PETR: position of '@' character and '/' character
2807 at = strchr(p,'@'); // PETR: get the position of '@'
2808 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2809 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2811 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2813 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2814 if (*p == '@') { // PETR: passed '@'
2815 if (authorization_size > 0)
2819 } else if (!at) { // PETR: hostname
2820 if ((q - hostname) < hostname_size - 1)
2823 if ((q - authorization) < authorization_size - 1)
2828 if (hostname_size > 0)
2832 port = strtoul(p, (char **)&p, 10);
2837 pstrcpy(path, path_size, p);
2841 * Set the pts for a given stream
2843 * @param pts_wrap_bits number of bits effectively used by the pts
2844 * (used for wrap control, 33 is the value for MPEG)
2845 * @param pts_num numerator to convert to seconds (MPEG: 1)
2846 * @param pts_den denominator to convert to seconds (MPEG: 90000)
2848 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2849 int pts_num, int pts_den)
2851 s->pts_wrap_bits = pts_wrap_bits;
2852 s->time_base.num = pts_num;
2853 s->time_base.den = pts_den;
2856 /* fraction handling */
2859 * f = val + (num / den) + 0.5. 'num' is normalized so that it is such
2860 * as 0 <= num < den.
2862 * @param f fractional number
2863 * @param val integer value
2864 * @param num must be >= 0
2865 * @param den must be >= 1
2867 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2879 /* set f to (val + 0.5) */
2880 void av_frac_set(AVFrac *f, int64_t val)
2883 f->num = f->den >> 1;
2887 * Fractionnal addition to f: f = f + (incr / f->den)
2889 * @param f fractional number
2890 * @param incr increment, can be positive or negative
2892 void av_frac_add(AVFrac *f, int64_t incr)
2896 num = f->num + incr;
2899 f->val += num / den;
2905 } else if (num >= den) {
2906 f->val += num / den;
2913 * register a new image format
2914 * @param img_fmt Image format descriptor
2916 void av_register_image_format(AVImageFormat *img_fmt)
2920 p = &first_image_format;
2921 while (*p != NULL) p = &(*p)->next;
2923 img_fmt->next = NULL;
2926 /* guess image format */
2927 AVImageFormat *av_probe_image_format(AVProbeData *pd)
2929 AVImageFormat *fmt1, *fmt;
2930 int score, score_max;
2934 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2935 if (fmt1->img_probe) {
2936 score = fmt1->img_probe(pd);
2937 if (score > score_max) {
2946 AVImageFormat *guess_image_format(const char *filename)
2948 AVImageFormat *fmt1;
2950 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2951 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
2958 * Read an image from a stream.
2959 * @param gb byte stream containing the image
2960 * @param fmt image format, NULL if probing is required
2962 int av_read_image(ByteIOContext *pb, const char *filename,
2964 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
2966 char buf[PROBE_BUF_SIZE];
2967 AVProbeData probe_data, *pd = &probe_data;
2972 pd->filename = filename;
2974 pos = url_ftell(pb);
2975 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
2976 url_fseek(pb, pos, SEEK_SET);
2977 fmt = av_probe_image_format(pd);
2980 return AVERROR_NOFMT;
2981 ret = fmt->img_read(pb, alloc_cb, opaque);
2986 * Write an image to a stream.
2987 * @param pb byte stream for the image output
2988 * @param fmt image format
2989 * @param img image data and informations
2991 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
2993 return fmt->img_write(pb, img);