2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 AVInputFormat *first_iformat = NULL;
25 AVOutputFormat *first_oformat = NULL;
26 AVImageFormat *first_image_format = NULL;
28 void av_register_input_format(AVInputFormat *format)
32 while (*p != NULL) p = &(*p)->next;
37 void av_register_output_format(AVOutputFormat *format)
41 while (*p != NULL) p = &(*p)->next;
46 int match_ext(const char *filename, const char *extensions)
54 ext = strrchr(filename, '.');
60 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
63 if (!strcasecmp(ext1, ext))
73 AVOutputFormat *guess_format(const char *short_name, const char *filename,
74 const char *mime_type)
76 AVOutputFormat *fmt, *fmt_found;
79 /* specific test for image sequences */
80 if (!short_name && filename &&
81 filename_number_test(filename) >= 0 &&
82 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
83 return guess_format("image2", NULL, NULL);
85 if (!short_name && filename &&
86 filename_number_test(filename) >= 0 &&
87 guess_image_format(filename)) {
88 return guess_format("image", NULL, NULL);
91 /* find the proper file type */
97 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
99 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
101 if (filename && fmt->extensions &&
102 match_ext(filename, fmt->extensions)) {
105 if (score > score_max) {
114 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
115 const char *mime_type)
117 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
120 AVOutputFormat *stream_fmt;
121 char stream_format_name[64];
123 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
124 stream_fmt = guess_format(stream_format_name, NULL, NULL);
134 * guesses the codec id based upon muxer and filename.
136 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
137 const char *filename, const char *mime_type, enum CodecType type){
138 if(type == CODEC_TYPE_VIDEO){
139 enum CodecID codec_id= CODEC_ID_NONE;
141 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
142 codec_id= av_guess_image2_codec(filename);
144 if(codec_id == CODEC_ID_NONE)
145 codec_id= fmt->video_codec;
147 }else if(type == CODEC_TYPE_AUDIO)
148 return fmt->audio_codec;
150 return CODEC_ID_NONE;
153 AVInputFormat *av_find_input_format(const char *short_name)
156 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
157 if (!strcmp(fmt->name, short_name))
163 /* memory handling */
166 * Default packet destructor
168 void av_destruct_packet(AVPacket *pkt)
171 pkt->data = NULL; pkt->size = 0;
175 * Allocate the payload of a packet and intialized its fields to default values.
178 * @param size wanted payload size
179 * @return 0 if OK. AVERROR_xxx otherwise.
181 int av_new_packet(AVPacket *pkt, int size)
184 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
185 return AVERROR_NOMEM;
186 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
188 return AVERROR_NOMEM;
189 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
194 pkt->destruct = av_destruct_packet;
199 * Allocate and read the payload of a packet and intialized its fields to default values.
202 * @param size wanted payload size
203 * @return >0 (read size) if OK. AVERROR_xxx otherwise.
205 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
207 int ret= av_new_packet(pkt, size);
212 pkt->pos= url_ftell(s);
214 ret= get_buffer(s, pkt->data, size);
223 /* This is a hack - the packet memory allocation stuff is broken. The
224 packet is allocated if it was not really allocated */
225 int av_dup_packet(AVPacket *pkt)
227 if (pkt->destruct != av_destruct_packet) {
229 /* we duplicate the packet and don't forget to put the padding
231 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
232 return AVERROR_NOMEM;
233 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
235 return AVERROR_NOMEM;
237 memcpy(data, pkt->data, pkt->size);
238 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
240 pkt->destruct = av_destruct_packet;
247 int fifo_init(FifoBuffer *f, int size)
249 f->buffer = av_malloc(size);
252 f->end = f->buffer + size;
253 f->wptr = f->rptr = f->buffer;
257 void fifo_free(FifoBuffer *f)
262 int fifo_size(FifoBuffer *f, uint8_t *rptr)
269 if (f->wptr >= rptr) {
270 size = f->wptr - rptr;
272 size = (f->end - rptr) + (f->wptr - f->buffer);
277 /* get data from the fifo (return -1 if not enough data) */
278 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
287 if (f->wptr >= rptr) {
288 size = f->wptr - rptr;
290 size = (f->end - rptr) + (f->wptr - f->buffer);
295 while (buf_size > 0) {
299 memcpy(buf, rptr, len);
310 void fifo_realloc(FifoBuffer *f, unsigned int new_size){
311 unsigned int old_size= f->end - f->buffer;
313 if(old_size < new_size){
314 uint8_t *old= f->buffer;
316 f->buffer= av_realloc(f->buffer, new_size);
318 f->rptr += f->buffer - old;
319 f->wptr += f->buffer - old;
321 if(f->wptr < f->rptr){
322 memmove(f->rptr + new_size - old_size, f->rptr, f->buffer + old_size - f->rptr);
323 f->rptr += new_size - old_size;
325 f->end= f->buffer + new_size;
329 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr)
342 memcpy(wptr, buf, len);
352 /* get data from the fifo (return -1 if not enough data) */
353 int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
355 uint8_t *rptr = *rptr_ptr;
358 if (f->wptr >= rptr) {
359 size = f->wptr - rptr;
361 size = (f->end - rptr) + (f->wptr - f->buffer);
366 while (buf_size > 0) {
370 put_buffer(pb, rptr, len);
380 int filename_number_test(const char *filename)
385 return get_frame_filename(buf, sizeof(buf), filename, 1);
388 /* guess file format */
389 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
391 AVInputFormat *fmt1, *fmt;
392 int score, score_max;
396 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
397 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
400 if (fmt1->read_probe) {
401 score = fmt1->read_probe(pd);
402 } else if (fmt1->extensions) {
403 if (match_ext(pd->filename, fmt1->extensions)) {
407 if (score > score_max) {
415 /************************************************************/
416 /* input media file */
419 * open a media file from an IO stream. 'fmt' must be specified.
422 static const char* format_to_name(void* ptr)
424 AVFormatContext* fc = (AVFormatContext*) ptr;
425 if(fc->iformat) return fc->iformat->name;
426 else if(fc->oformat) return fc->oformat->name;
430 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
432 AVFormatContext *av_alloc_format_context(void)
435 ic = av_mallocz(sizeof(AVFormatContext));
437 ic->av_class = &av_format_context_class;
441 int av_open_input_stream(AVFormatContext **ic_ptr,
442 ByteIOContext *pb, const char *filename,
443 AVInputFormat *fmt, AVFormatParameters *ap)
448 ic = av_alloc_format_context();
456 ic->duration = AV_NOPTS_VALUE;
457 ic->start_time = AV_NOPTS_VALUE;
458 pstrcpy(ic->filename, sizeof(ic->filename), filename);
460 /* allocate private data */
461 if (fmt->priv_data_size > 0) {
462 ic->priv_data = av_mallocz(fmt->priv_data_size);
463 if (!ic->priv_data) {
468 ic->priv_data = NULL;
471 err = ic->iformat->read_header(ic, ap);
476 ic->data_offset = url_ftell(&ic->pb);
482 av_freep(&ic->priv_data);
489 #define PROBE_BUF_SIZE 2048
492 * Open a media file as input. The codec are not opened. Only the file
493 * header (if present) is read.
495 * @param ic_ptr the opened media file handle is put here
496 * @param filename filename to open.
497 * @param fmt if non NULL, force the file format to use
498 * @param buf_size optional buffer size (zero if default is OK)
499 * @param ap additionnal parameters needed when opening the file (NULL if default)
500 * @return 0 if OK. AVERROR_xxx otherwise.
502 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
505 AVFormatParameters *ap)
507 int err, must_open_file, file_opened;
508 uint8_t buf[PROBE_BUF_SIZE];
509 AVProbeData probe_data, *pd = &probe_data;
510 ByteIOContext pb1, *pb = &pb1;
515 pd->filename = filename;
520 /* guess format if no file can be opened */
521 fmt = av_probe_input_format(pd, 0);
524 /* do not open file if the format does not need it. XXX: specific
525 hack needed to handle RTSP/TCP */
527 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
529 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
532 if (!fmt || must_open_file) {
533 /* if no file needed do not try to open one */
534 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
540 url_setbufsize(pb, buf_size);
543 /* read probe data */
544 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
545 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
547 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
555 /* guess file format */
557 fmt = av_probe_input_format(pd, 1);
560 /* if still no format found, error */
566 /* XXX: suppress this hack for redirectors */
567 #ifdef CONFIG_NETWORK
568 if (fmt == &redir_demux) {
569 err = redir_open(ic_ptr, pb);
575 /* check filename in case of an image number is expected */
576 if (fmt->flags & AVFMT_NEEDNUMBER) {
577 if (filename_number_test(filename) < 0) {
578 err = AVERROR_NUMEXPECTED;
582 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
594 /*******************************************************/
597 * Read a transport packet from a media file. This function is
598 * absolete and should never be used. Use av_read_frame() instead.
600 * @param s media file handle
601 * @param pkt is filled
602 * @return 0 if OK. AVERROR_xxx if error.
604 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
606 return s->iformat->read_packet(s, pkt);
609 /**********************************************************/
611 /* get the number of samples of an audio frame. Return (-1) if error */
612 static int get_audio_frame_size(AVCodecContext *enc, int size)
616 if (enc->frame_size <= 1) {
617 /* specific hack for pcm codecs because no frame size is
619 switch(enc->codec_id) {
620 case CODEC_ID_PCM_S16LE:
621 case CODEC_ID_PCM_S16BE:
622 case CODEC_ID_PCM_U16LE:
623 case CODEC_ID_PCM_U16BE:
624 if (enc->channels == 0)
626 frame_size = size / (2 * enc->channels);
628 case CODEC_ID_PCM_S8:
629 case CODEC_ID_PCM_U8:
630 case CODEC_ID_PCM_MULAW:
631 case CODEC_ID_PCM_ALAW:
632 if (enc->channels == 0)
634 frame_size = size / (enc->channels);
637 /* used for example by ADPCM codecs */
638 if (enc->bit_rate == 0)
640 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
644 frame_size = enc->frame_size;
650 /* return the frame duration in seconds, return 0 if not available */
651 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
652 AVCodecParserContext *pc, AVPacket *pkt)
658 switch(st->codec.codec_type) {
659 case CODEC_TYPE_VIDEO:
660 if(st->time_base.num*1000LL > st->time_base.den){
661 *pnum = st->time_base.num;
662 *pden = st->time_base.den;
663 }else if(st->codec.time_base.num*1000LL > st->codec.time_base.den){
664 *pnum = st->codec.time_base.num;
665 *pden = st->codec.time_base.den;
666 if (pc && pc->repeat_pict) {
668 *pnum = (*pnum) * (2 + pc->repeat_pict);
672 case CODEC_TYPE_AUDIO:
673 frame_size = get_audio_frame_size(&st->codec, pkt->size);
677 *pden = st->codec.sample_rate;
684 static int is_intra_only(AVCodecContext *enc){
685 if(enc->codec_type == CODEC_TYPE_AUDIO){
687 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
688 switch(enc->codec_id){
690 case CODEC_ID_MJPEGB:
692 case CODEC_ID_RAWVIDEO:
693 case CODEC_ID_DVVIDEO:
694 case CODEC_ID_HUFFYUV:
695 case CODEC_ID_FFVHUFF:
706 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
707 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
708 int64_t delta= last_ts - mask/2;
709 return ((lsb - delta)&mask) + delta;
712 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
713 AVCodecParserContext *pc, AVPacket *pkt)
715 int num, den, presentation_delayed;
716 /* handle wrapping */
717 if(st->cur_dts != AV_NOPTS_VALUE){
718 if(pkt->pts != AV_NOPTS_VALUE)
719 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
720 if(pkt->dts != AV_NOPTS_VALUE)
721 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
724 if (pkt->duration == 0) {
725 compute_frame_duration(&num, &den, st, pc, pkt);
727 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
731 if(is_intra_only(&st->codec))
732 pkt->flags |= PKT_FLAG_KEY;
734 /* do we have a video B frame ? */
735 presentation_delayed = 0;
736 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
737 /* XXX: need has_b_frame, but cannot get it if the codec is
739 if (( st->codec.codec_id == CODEC_ID_H264
740 || st->codec.has_b_frames) &&
741 pc && pc->pict_type != FF_B_TYPE)
742 presentation_delayed = 1;
743 /* this may be redundant, but it shouldnt hurt */
744 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
745 presentation_delayed = 1;
748 if(st->cur_dts == AV_NOPTS_VALUE){
749 if(presentation_delayed) st->cur_dts = -pkt->duration;
750 else st->cur_dts = 0;
753 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
754 /* interpolate PTS and DTS if they are not present */
755 if (presentation_delayed) {
756 /* DTS = decompression time stamp */
757 /* PTS = presentation time stamp */
758 if (pkt->dts == AV_NOPTS_VALUE) {
759 /* if we know the last pts, use it */
760 if(st->last_IP_pts != AV_NOPTS_VALUE)
761 st->cur_dts = pkt->dts = st->last_IP_pts;
763 pkt->dts = st->cur_dts;
765 st->cur_dts = pkt->dts;
767 /* this is tricky: the dts must be incremented by the duration
768 of the frame we are displaying, i.e. the last I or P frame */
769 if (st->last_IP_duration == 0)
770 st->cur_dts += pkt->duration;
772 st->cur_dts += st->last_IP_duration;
773 st->last_IP_duration = pkt->duration;
774 st->last_IP_pts= pkt->pts;
775 /* cannot compute PTS if not present (we can compute it only
776 by knowing the futur */
777 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
778 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
779 int64_t old_diff= ABS(st->cur_dts - pkt->duration - pkt->pts);
780 int64_t new_diff= ABS(st->cur_dts - pkt->pts);
781 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
782 pkt->pts += pkt->duration;
783 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%Ld new:%Ld dur:%d cur:%Ld size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
787 /* presentation is not delayed : PTS and DTS are the same */
788 if (pkt->pts == AV_NOPTS_VALUE) {
789 if (pkt->dts == AV_NOPTS_VALUE) {
790 pkt->pts = st->cur_dts;
791 pkt->dts = st->cur_dts;
794 st->cur_dts = pkt->dts;
798 st->cur_dts = pkt->pts;
801 st->cur_dts += pkt->duration;
803 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
808 /* key frame computation */
809 switch(st->codec.codec_type) {
810 case CODEC_TYPE_VIDEO:
811 if (pc->pict_type == FF_I_TYPE)
812 pkt->flags |= PKT_FLAG_KEY;
814 case CODEC_TYPE_AUDIO:
815 pkt->flags |= PKT_FLAG_KEY;
823 void av_destruct_packet_nofree(AVPacket *pkt)
825 pkt->data = NULL; pkt->size = 0;
828 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
834 /* select current input stream component */
837 if (!st->need_parsing || !st->parser) {
838 /* no parsing needed: we just output the packet as is */
839 /* raw data support */
841 compute_pkt_fields(s, st, NULL, pkt);
844 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
845 len = av_parser_parse(st->parser, &st->codec, &pkt->data, &pkt->size,
846 s->cur_ptr, s->cur_len,
847 s->cur_pkt.pts, s->cur_pkt.dts);
848 s->cur_pkt.pts = AV_NOPTS_VALUE;
849 s->cur_pkt.dts = AV_NOPTS_VALUE;
850 /* increment read pointer */
854 /* return packet if any */
858 pkt->stream_index = st->index;
859 pkt->pts = st->parser->pts;
860 pkt->dts = st->parser->dts;
861 pkt->destruct = av_destruct_packet_nofree;
862 compute_pkt_fields(s, st, st->parser, pkt);
867 av_free_packet(&s->cur_pkt);
871 /* read next packet */
872 ret = av_read_packet(s, &s->cur_pkt);
876 /* return the last frames, if any */
877 for(i = 0; i < s->nb_streams; i++) {
879 if (st->parser && st->need_parsing) {
880 av_parser_parse(st->parser, &st->codec,
881 &pkt->data, &pkt->size,
883 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
888 /* no more packets: really terminates parsing */
892 st = s->streams[s->cur_pkt.stream_index];
895 s->cur_ptr = s->cur_pkt.data;
896 s->cur_len = s->cur_pkt.size;
897 if (st->need_parsing && !st->parser) {
898 st->parser = av_parser_init(st->codec.codec_id);
900 /* no parser available : just output the raw packets */
901 st->need_parsing = 0;
909 * Return the next frame of a stream. The returned packet is valid
910 * until the next av_read_frame() or until av_close_input_file() and
911 * must be freed with av_free_packet. For video, the packet contains
912 * exactly one frame. For audio, it contains an integer number of
913 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
914 * data). If the audio frames have a variable size (e.g. MPEG audio),
915 * then it contains one frame.
917 * pkt->pts, pkt->dts and pkt->duration are always set to correct
918 * values in AV_TIME_BASE unit (and guessed if the format cannot
919 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
920 * has B frames, so it is better to rely on pkt->dts if you do not
921 * decompress the payload.
923 * Return 0 if OK, < 0 if error or end of file.
925 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
929 pktl = s->packet_buffer;
931 /* read packet from packet buffer, if there is data */
933 s->packet_buffer = pktl->next;
937 return av_read_frame_internal(s, pkt);
941 /* XXX: suppress the packet queue */
942 static void flush_packet_queue(AVFormatContext *s)
947 pktl = s->packet_buffer;
950 s->packet_buffer = pktl->next;
951 av_free_packet(&pktl->pkt);
956 /*******************************************************/
959 int av_find_default_stream_index(AVFormatContext *s)
964 if (s->nb_streams <= 0)
966 for(i = 0; i < s->nb_streams; i++) {
968 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
975 /* flush the frame reader */
976 static void av_read_frame_flush(AVFormatContext *s)
981 flush_packet_queue(s);
983 /* free previous packet */
985 if (s->cur_st->parser)
986 av_free_packet(&s->cur_pkt);
993 /* for each stream, reset read state */
994 for(i = 0; i < s->nb_streams; i++) {
998 av_parser_close(st->parser);
1001 st->last_IP_pts = AV_NOPTS_VALUE;
1002 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
1007 * updates cur_dts of all streams based on given timestamp and AVStream.
1008 * stream ref_st unchanged, others set cur_dts in their native timebase
1009 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
1010 * @param timestamp new dts expressed in time_base of param ref_st
1011 * @param ref_st reference stream giving time_base of param timestamp
1013 static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1016 for(i = 0; i < s->nb_streams; i++) {
1017 AVStream *st = s->streams[i];
1019 st->cur_dts = av_rescale(timestamp,
1020 st->time_base.den * (int64_t)ref_st->time_base.num,
1021 st->time_base.num * (int64_t)ref_st->time_base.den);
1026 * add a index entry into a sorted list updateing if it is already there.
1027 * @param timestamp timestamp in the timebase of the given stream
1029 int av_add_index_entry(AVStream *st,
1030 int64_t pos, int64_t timestamp, int distance, int flags)
1032 AVIndexEntry *entries, *ie;
1035 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1038 entries = av_fast_realloc(st->index_entries,
1039 &st->index_entries_allocated_size,
1040 (st->nb_index_entries + 1) *
1041 sizeof(AVIndexEntry));
1045 st->index_entries= entries;
1047 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1050 index= st->nb_index_entries++;
1051 ie= &entries[index];
1052 assert(index==0 || ie[-1].timestamp < timestamp);
1054 ie= &entries[index];
1055 if(ie->timestamp != timestamp){
1056 if(ie->timestamp <= timestamp)
1058 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1059 st->nb_index_entries++;
1060 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1061 distance= ie->min_distance;
1065 ie->timestamp = timestamp;
1066 ie->min_distance= distance;
1072 /* build an index for raw streams using a parser */
1073 static void av_build_index_raw(AVFormatContext *s)
1075 AVPacket pkt1, *pkt = &pkt1;
1080 av_read_frame_flush(s);
1081 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1084 ret = av_read_frame(s, pkt);
1087 if (pkt->stream_index == 0 && st->parser &&
1088 (pkt->flags & PKT_FLAG_KEY)) {
1089 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1090 0, AVINDEX_KEYFRAME);
1092 av_free_packet(pkt);
1096 /* return TRUE if we deal with a raw stream (raw codec data and
1098 static int is_raw_stream(AVFormatContext *s)
1102 if (s->nb_streams != 1)
1105 if (!st->need_parsing)
1111 * gets the index for a specific timestamp.
1112 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1113 * the timestamp which is <= the requested one, if backward is 0
1114 * then it will be >=
1115 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1116 * @return < 0 if no such timestamp could be found
1118 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1121 AVIndexEntry *entries= st->index_entries;
1122 int nb_entries= st->nb_index_entries;
1131 timestamp = entries[m].timestamp;
1132 if(timestamp >= wanted_timestamp)
1134 if(timestamp <= wanted_timestamp)
1137 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1139 if(!(flags & AVSEEK_FLAG_ANY)){
1140 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1141 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1153 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1154 * this isnt supposed to be called directly by a user application, but by demuxers
1155 * @param target_ts target timestamp in the time base of the given stream
1156 * @param stream_index stream number
1158 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1159 AVInputFormat *avif= s->iformat;
1160 int64_t pos_min, pos_max, pos, pos_limit;
1161 int64_t ts_min, ts_max, ts;
1163 int index, no_change;
1166 if (stream_index < 0)
1170 av_log(s, AV_LOG_DEBUG, "read_seek: %d %lld\n", stream_index, target_ts);
1174 ts_min= AV_NOPTS_VALUE;
1175 pos_limit= -1; //gcc falsely says it may be uninitalized
1177 st= s->streams[stream_index];
1178 if(st->index_entries){
1181 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1182 index= FFMAX(index, 0);
1183 e= &st->index_entries[index];
1185 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1187 ts_min= e->timestamp;
1189 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%llx dts_min=%lld\n",
1196 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1197 assert(index < st->nb_index_entries);
1199 e= &st->index_entries[index];
1200 assert(e->timestamp >= target_ts);
1202 ts_max= e->timestamp;
1203 pos_limit= pos_max - e->min_distance;
1205 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%llx pos_limit=0x%llx dts_max=%lld\n",
1206 pos_max,pos_limit, ts_max);
1211 if(ts_min == AV_NOPTS_VALUE){
1212 pos_min = s->data_offset;
1213 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1214 if (ts_min == AV_NOPTS_VALUE)
1218 if(ts_max == AV_NOPTS_VALUE){
1220 pos_max = url_fsize(&s->pb) - 1;
1223 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1225 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1226 if (ts_max == AV_NOPTS_VALUE)
1230 int64_t tmp_pos= pos_max + 1;
1231 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1232 if(tmp_ts == AV_NOPTS_VALUE)
1241 while (pos_min < pos_limit) {
1243 av_log(s, AV_LOG_DEBUG, "pos_min=0x%llx pos_max=0x%llx dts_min=%lld dts_max=%lld\n",
1247 assert(pos_limit <= pos_max);
1250 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1251 // interpolate position (better than dichotomy)
1252 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1253 + pos_min - approximate_keyframe_distance;
1254 }else if(no_change==1){
1255 // bisection, if interpolation failed to change min or max pos last time
1256 pos = (pos_min + pos_limit)>>1;
1258 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1263 else if(pos > pos_limit)
1267 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1273 av_log(s, AV_LOG_DEBUG, "%Ld %Ld %Ld / %Ld %Ld %Ld target:%Ld limit:%Ld start:%Ld noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1275 assert(ts != AV_NOPTS_VALUE);
1276 if (target_ts <= ts) {
1277 pos_limit = start_pos - 1;
1281 if (target_ts >= ts) {
1287 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1288 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1291 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1293 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1294 av_log(s, AV_LOG_DEBUG, "pos=0x%llx %lld<=%lld<=%lld\n",
1295 pos, ts_min, target_ts, ts_max);
1298 url_fseek(&s->pb, pos, SEEK_SET);
1300 av_update_cur_dts(s, st, ts);
1305 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1306 int64_t pos_min, pos_max;
1310 if (stream_index < 0)
1313 st= s->streams[stream_index];
1316 pos_min = s->data_offset;
1317 pos_max = url_fsize(&s->pb) - 1;
1319 if (pos < pos_min) pos= pos_min;
1320 else if(pos > pos_max) pos= pos_max;
1322 url_fseek(&s->pb, pos, SEEK_SET);
1325 av_update_cur_dts(s, st, ts);
1330 static int av_seek_frame_generic(AVFormatContext *s,
1331 int stream_index, int64_t timestamp, int flags)
1337 if (!s->index_built) {
1338 if (is_raw_stream(s)) {
1339 av_build_index_raw(s);
1346 st = s->streams[stream_index];
1347 index = av_index_search_timestamp(st, timestamp, flags);
1351 /* now we have found the index, we can seek */
1352 ie = &st->index_entries[index];
1353 av_read_frame_flush(s);
1354 url_fseek(&s->pb, ie->pos, SEEK_SET);
1356 av_update_cur_dts(s, st, ie->timestamp);
1362 * Seek to the key frame at timestamp.
1363 * 'timestamp' in 'stream_index'.
1364 * @param stream_index If stream_index is (-1), a default
1365 * stream is selected, and timestamp is automatically converted
1366 * from AV_TIME_BASE units to the stream specific time_base.
1367 * @param timestamp timestamp in AVStream.time_base units
1368 * @param flags flags which select direction and seeking mode
1369 * @return >= 0 on success
1371 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1376 av_read_frame_flush(s);
1378 if(flags & AVSEEK_FLAG_BYTE)
1379 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1381 if(stream_index < 0){
1382 stream_index= av_find_default_stream_index(s);
1383 if(stream_index < 0)
1386 st= s->streams[stream_index];
1387 /* timestamp for default must be expressed in AV_TIME_BASE units */
1388 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1390 st= s->streams[stream_index];
1392 /* first, we try the format specific seek */
1393 if (s->iformat->read_seek)
1394 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1401 if(s->iformat->read_timestamp)
1402 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1404 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1407 /*******************************************************/
1409 /* return TRUE if the stream has accurate timings for at least one component */
1410 static int av_has_timings(AVFormatContext *ic)
1415 for(i = 0;i < ic->nb_streams; i++) {
1416 st = ic->streams[i];
1417 if (st->start_time != AV_NOPTS_VALUE &&
1418 st->duration != AV_NOPTS_VALUE)
1424 /* estimate the stream timings from the one of each components. Also
1425 compute the global bitrate if possible */
1426 static void av_update_stream_timings(AVFormatContext *ic)
1428 int64_t start_time, start_time1, end_time, end_time1;
1432 start_time = MAXINT64;
1433 end_time = MININT64;
1434 for(i = 0;i < ic->nb_streams; i++) {
1435 st = ic->streams[i];
1436 if (st->start_time != AV_NOPTS_VALUE) {
1437 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1438 if (start_time1 < start_time)
1439 start_time = start_time1;
1440 if (st->duration != AV_NOPTS_VALUE) {
1441 end_time1 = start_time1
1442 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1443 if (end_time1 > end_time)
1444 end_time = end_time1;
1448 if (start_time != MAXINT64) {
1449 ic->start_time = start_time;
1450 if (end_time != MININT64) {
1451 ic->duration = end_time - start_time;
1452 if (ic->file_size > 0) {
1453 /* compute the bit rate */
1454 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1455 (double)ic->duration;
1462 static void fill_all_stream_timings(AVFormatContext *ic)
1467 av_update_stream_timings(ic);
1468 for(i = 0;i < ic->nb_streams; i++) {
1469 st = ic->streams[i];
1470 if (st->start_time == AV_NOPTS_VALUE) {
1471 if(ic->start_time != AV_NOPTS_VALUE)
1472 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1473 if(ic->duration != AV_NOPTS_VALUE)
1474 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1479 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1481 int64_t filesize, duration;
1485 /* if bit_rate is already set, we believe it */
1486 if (ic->bit_rate == 0) {
1488 for(i=0;i<ic->nb_streams;i++) {
1489 st = ic->streams[i];
1490 bit_rate += st->codec.bit_rate;
1492 ic->bit_rate = bit_rate;
1495 /* if duration is already set, we believe it */
1496 if (ic->duration == AV_NOPTS_VALUE &&
1497 ic->bit_rate != 0 &&
1498 ic->file_size != 0) {
1499 filesize = ic->file_size;
1501 for(i = 0; i < ic->nb_streams; i++) {
1502 st = ic->streams[i];
1503 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1504 if (st->start_time == AV_NOPTS_VALUE ||
1505 st->duration == AV_NOPTS_VALUE) {
1507 st->duration = duration;
1514 #define DURATION_MAX_READ_SIZE 250000
1516 /* only usable for MPEG-PS streams */
1517 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1519 AVPacket pkt1, *pkt = &pkt1;
1521 int read_size, i, ret;
1522 int64_t start_time, end_time, end_time1;
1523 int64_t filesize, offset, duration;
1525 /* free previous packet */
1526 if (ic->cur_st && ic->cur_st->parser)
1527 av_free_packet(&ic->cur_pkt);
1530 /* flush packet queue */
1531 flush_packet_queue(ic);
1533 for(i=0;i<ic->nb_streams;i++) {
1534 st = ic->streams[i];
1536 av_parser_close(st->parser);
1541 /* we read the first packets to get the first PTS (not fully
1542 accurate, but it is enough now) */
1543 url_fseek(&ic->pb, 0, SEEK_SET);
1546 if (read_size >= DURATION_MAX_READ_SIZE)
1548 /* if all info is available, we can stop */
1549 for(i = 0;i < ic->nb_streams; i++) {
1550 st = ic->streams[i];
1551 if (st->start_time == AV_NOPTS_VALUE)
1554 if (i == ic->nb_streams)
1557 ret = av_read_packet(ic, pkt);
1560 read_size += pkt->size;
1561 st = ic->streams[pkt->stream_index];
1562 if (pkt->pts != AV_NOPTS_VALUE) {
1563 if (st->start_time == AV_NOPTS_VALUE)
1564 st->start_time = pkt->pts;
1566 av_free_packet(pkt);
1569 /* estimate the end time (duration) */
1570 /* XXX: may need to support wrapping */
1571 filesize = ic->file_size;
1572 offset = filesize - DURATION_MAX_READ_SIZE;
1576 url_fseek(&ic->pb, offset, SEEK_SET);
1579 if (read_size >= DURATION_MAX_READ_SIZE)
1581 /* if all info is available, we can stop */
1582 for(i = 0;i < ic->nb_streams; i++) {
1583 st = ic->streams[i];
1584 if (st->duration == AV_NOPTS_VALUE)
1587 if (i == ic->nb_streams)
1590 ret = av_read_packet(ic, pkt);
1593 read_size += pkt->size;
1594 st = ic->streams[pkt->stream_index];
1595 if (pkt->pts != AV_NOPTS_VALUE) {
1596 end_time = pkt->pts;
1597 duration = end_time - st->start_time;
1599 if (st->duration == AV_NOPTS_VALUE ||
1600 st->duration < duration)
1601 st->duration = duration;
1604 av_free_packet(pkt);
1607 fill_all_stream_timings(ic);
1609 url_fseek(&ic->pb, 0, SEEK_SET);
1612 static void av_estimate_timings(AVFormatContext *ic)
1616 /* get the file size, if possible */
1617 if (ic->iformat->flags & AVFMT_NOFILE) {
1620 file_size = url_fsize(&ic->pb);
1624 ic->file_size = file_size;
1626 if ((ic->iformat == &mpegps_demux || ic->iformat == &mpegts_demux) && file_size && !ic->pb.is_streamed) {
1627 /* get accurate estimate from the PTSes */
1628 av_estimate_timings_from_pts(ic);
1629 } else if (av_has_timings(ic)) {
1630 /* at least one components has timings - we use them for all
1632 fill_all_stream_timings(ic);
1634 /* less precise: use bit rate info */
1635 av_estimate_timings_from_bit_rate(ic);
1637 av_update_stream_timings(ic);
1643 for(i = 0;i < ic->nb_streams; i++) {
1644 st = ic->streams[i];
1645 printf("%d: start_time: %0.3f duration: %0.3f\n",
1646 i, (double)st->start_time / AV_TIME_BASE,
1647 (double)st->duration / AV_TIME_BASE);
1649 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1650 (double)ic->start_time / AV_TIME_BASE,
1651 (double)ic->duration / AV_TIME_BASE,
1652 ic->bit_rate / 1000);
1657 static int has_codec_parameters(AVCodecContext *enc)
1660 switch(enc->codec_type) {
1661 case CODEC_TYPE_AUDIO:
1662 val = enc->sample_rate;
1664 case CODEC_TYPE_VIDEO:
1665 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1674 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1678 int got_picture, ret;
1681 codec = avcodec_find_decoder(st->codec.codec_id);
1684 ret = avcodec_open(&st->codec, codec);
1688 if(!has_codec_parameters(&st->codec)){
1689 switch(st->codec.codec_type) {
1690 case CODEC_TYPE_VIDEO:
1691 ret = avcodec_decode_video(&st->codec, &picture,
1692 &got_picture, (uint8_t *)data, size);
1694 case CODEC_TYPE_AUDIO:
1695 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1698 ret = avcodec_decode_audio(&st->codec, samples,
1699 &got_picture, (uint8_t *)data, size);
1707 avcodec_close(&st->codec);
1711 /* absolute maximum size we read until we abort */
1712 #define MAX_READ_SIZE 5000000
1714 /* maximum duration until we stop analysing the stream */
1715 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 1.0))
1718 * Read the beginning of a media file to get stream information. This
1719 * is useful for file formats with no headers such as MPEG. This
1720 * function also compute the real frame rate in case of mpeg2 repeat
1723 * @param ic media file handle
1724 * @return >=0 if OK. AVERROR_xxx if error.
1725 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1727 int av_find_stream_info(AVFormatContext *ic)
1729 int i, count, ret, read_size;
1731 AVPacket pkt1, *pkt;
1732 AVPacketList *pktl=NULL, **ppktl;
1733 int64_t last_dts[MAX_STREAMS];
1734 int64_t duration_sum[MAX_STREAMS];
1735 int duration_count[MAX_STREAMS]={0};
1737 for(i=0;i<ic->nb_streams;i++) {
1738 st = ic->streams[i];
1739 if(st->codec.codec_type == CODEC_TYPE_VIDEO){
1740 /* if(!st->time_base.num)
1742 if(!st->codec.time_base.num)
1743 st->codec.time_base= st->time_base;
1745 //only for the split stuff
1747 st->parser = av_parser_init(st->codec.codec_id);
1751 for(i=0;i<MAX_STREAMS;i++){
1752 last_dts[i]= AV_NOPTS_VALUE;
1753 duration_sum[i]= INT64_MAX;
1758 ppktl = &ic->packet_buffer;
1760 /* check if one codec still needs to be handled */
1761 for(i=0;i<ic->nb_streams;i++) {
1762 st = ic->streams[i];
1763 if (!has_codec_parameters(&st->codec))
1765 /* variable fps and no guess at the real fps */
1766 if( st->codec.time_base.den >= 1000LL*st->codec.time_base.num
1767 && duration_count[i]<20 && st->codec.codec_type == CODEC_TYPE_VIDEO)
1769 if(st->parser && st->parser->parser->split && !st->codec.extradata)
1772 if (i == ic->nb_streams) {
1773 /* NOTE: if the format has no header, then we need to read
1774 some packets to get most of the streams, so we cannot
1776 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1777 /* if we found the info for all the codecs, we can stop */
1782 /* we did not get all the codec info, but we read too much data */
1783 if (read_size >= MAX_READ_SIZE) {
1789 /* NOTE: a new stream can be added there if no header in file
1790 (AVFMTCTX_NOHEADER) */
1791 ret = av_read_frame_internal(ic, &pkt1);
1794 ret = -1; /* we could not have all the codec parameters before EOF */
1795 for(i=0;i<ic->nb_streams;i++) {
1796 st = ic->streams[i];
1797 if (!has_codec_parameters(&st->codec))
1800 if (i == ic->nb_streams)
1805 pktl = av_mallocz(sizeof(AVPacketList));
1807 ret = AVERROR_NOMEM;
1811 /* add the packet in the buffered packet list */
1813 ppktl = &pktl->next;
1818 /* duplicate the packet */
1819 if (av_dup_packet(pkt) < 0) {
1820 ret = AVERROR_NOMEM;
1824 read_size += pkt->size;
1826 st = ic->streams[pkt->stream_index];
1827 st->codec_info_duration += pkt->duration;
1828 if (pkt->duration != 0)
1829 st->codec_info_nb_frames++;
1832 int index= pkt->stream_index;
1833 int64_t last= last_dts[index];
1834 int64_t duration= pkt->dts - last;
1836 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1837 if(duration*duration_count[index]*10/9 < duration_sum[index]){
1838 duration_sum[index]= duration;
1839 duration_count[index]=1;
1841 int factor= av_rescale(duration, duration_count[index], duration_sum[index]);
1842 duration_sum[index] += duration;
1843 duration_count[index]+= factor;
1845 if(st->codec_info_nb_frames == 0)
1846 st->codec_info_duration += duration;
1848 last_dts[pkt->stream_index]= pkt->dts;
1850 if(st->parser && st->parser->parser->split && !st->codec.extradata){
1851 int i= st->parser->parser->split(&st->codec, pkt->data, pkt->size);
1853 st->codec.extradata_size= i;
1854 st->codec.extradata= av_malloc(st->codec.extradata_size);
1855 memcpy(st->codec.extradata, pkt->data, st->codec.extradata_size);
1859 /* if still no information, we try to open the codec and to
1860 decompress the frame. We try to avoid that in most cases as
1861 it takes longer and uses more memory. For MPEG4, we need to
1862 decompress for Quicktime. */
1863 if (!has_codec_parameters(&st->codec) /*&&
1864 (st->codec.codec_id == CODEC_ID_FLV1 ||
1865 st->codec.codec_id == CODEC_ID_H264 ||
1866 st->codec.codec_id == CODEC_ID_H263 ||
1867 st->codec.codec_id == CODEC_ID_H261 ||
1868 st->codec.codec_id == CODEC_ID_VORBIS ||
1869 st->codec.codec_id == CODEC_ID_MJPEG ||
1870 st->codec.codec_id == CODEC_ID_PNG ||
1871 st->codec.codec_id == CODEC_ID_PAM ||
1872 st->codec.codec_id == CODEC_ID_PGM ||
1873 st->codec.codec_id == CODEC_ID_PGMYUV ||
1874 st->codec.codec_id == CODEC_ID_PBM ||
1875 st->codec.codec_id == CODEC_ID_PPM ||
1876 st->codec.codec_id == CODEC_ID_SHORTEN ||
1877 (st->codec.codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1878 try_decode_frame(st, pkt->data, pkt->size);
1880 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
1886 for(i=0;i<ic->nb_streams;i++) {
1887 st = ic->streams[i];
1888 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1889 if(st->codec.codec_id == CODEC_ID_RAWVIDEO && !st->codec.codec_tag && !st->codec.bits_per_sample)
1890 st->codec.codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec.pix_fmt);
1892 if(duration_count[i] && st->codec.time_base.num*1000LL <= st->codec.time_base.den &&
1893 st->time_base.num*duration_sum[i]/duration_count[i]*1000LL > st->time_base.den){
1897 num= st->time_base.den*duration_count[i];
1898 den= st->time_base.num*duration_sum[i];
1900 av_reduce(&fps1.num, &fps1.den, num*1001, den*1000, FFMAX(st->time_base.den, st->time_base.num)/4);
1901 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, den, FFMAX(st->time_base.den, st->time_base.num)/4);
1902 if(fps1.num < st->r_frame_rate.num && fps1.den == 1 && (fps1.num==24 || fps1.num==30)){ //FIXME better decission
1903 st->r_frame_rate.num= fps1.num*1000;
1904 st->r_frame_rate.den= fps1.den*1001;
1908 /* set real frame rate info */
1909 /* compute the real frame rate for telecine */
1910 if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
1911 st->codec.codec_id == CODEC_ID_MPEG2VIDEO) &&
1912 st->codec.sub_id == 2) {
1913 if (st->codec_info_nb_frames >= 20) {
1914 float coded_frame_rate, est_frame_rate;
1915 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
1916 (double)st->codec_info_duration ;
1917 coded_frame_rate = 1.0/av_q2d(st->codec.time_base);
1919 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
1920 coded_frame_rate, est_frame_rate);
1922 /* if we detect that it could be a telecine, we
1923 signal it. It would be better to do it at a
1924 higher level as it can change in a film */
1925 if (coded_frame_rate >= 24.97 &&
1926 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
1927 st->r_frame_rate = (AVRational){24000, 1001};
1931 /* if no real frame rate, use the codec one */
1932 if (!st->r_frame_rate.num){
1933 st->r_frame_rate.num = st->codec.time_base.den;
1934 st->r_frame_rate.den = st->codec.time_base.num;
1939 av_estimate_timings(ic);
1941 /* correct DTS for b frame streams with no timestamps */
1942 for(i=0;i<ic->nb_streams;i++) {
1943 st = ic->streams[i];
1944 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1946 ppktl = &ic->packet_buffer;
1948 if(ppkt1->stream_index != i)
1950 if(ppkt1->pkt->dts < 0)
1952 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1954 ppkt1->pkt->dts -= delta;
1959 st->cur_dts -= delta;
1967 /*******************************************************/
1970 * start playing a network based stream (e.g. RTSP stream) at the
1973 int av_read_play(AVFormatContext *s)
1975 if (!s->iformat->read_play)
1976 return AVERROR_NOTSUPP;
1977 return s->iformat->read_play(s);
1981 * pause a network based stream (e.g. RTSP stream). Use av_read_play()
1984 int av_read_pause(AVFormatContext *s)
1986 if (!s->iformat->read_pause)
1987 return AVERROR_NOTSUPP;
1988 return s->iformat->read_pause(s);
1992 * Close a media file (but not its codecs)
1994 * @param s media file handle
1996 void av_close_input_file(AVFormatContext *s)
1998 int i, must_open_file;
2001 /* free previous packet */
2002 if (s->cur_st && s->cur_st->parser)
2003 av_free_packet(&s->cur_pkt);
2005 if (s->iformat->read_close)
2006 s->iformat->read_close(s);
2007 for(i=0;i<s->nb_streams;i++) {
2008 /* free all data in a stream component */
2011 av_parser_close(st->parser);
2013 av_free(st->index_entries);
2016 flush_packet_queue(s);
2018 if (s->iformat->flags & AVFMT_NOFILE) {
2021 if (must_open_file) {
2024 av_freep(&s->priv_data);
2029 * Add a new stream to a media file. Can only be called in the
2030 * read_header function. If the flag AVFMTCTX_NOHEADER is in the
2031 * format context, then new streams can be added in read_packet too.
2034 * @param s media file handle
2035 * @param id file format dependent stream id
2037 AVStream *av_new_stream(AVFormatContext *s, int id)
2041 if (s->nb_streams >= MAX_STREAMS)
2044 st = av_mallocz(sizeof(AVStream));
2047 avcodec_get_context_defaults(&st->codec);
2049 /* no default bitrate if decoding */
2050 st->codec.bit_rate = 0;
2052 st->index = s->nb_streams;
2054 st->start_time = AV_NOPTS_VALUE;
2055 st->duration = AV_NOPTS_VALUE;
2056 st->cur_dts = AV_NOPTS_VALUE;
2058 /* default pts settings is MPEG like */
2059 av_set_pts_info(st, 33, 1, 90000);
2060 st->last_IP_pts = AV_NOPTS_VALUE;
2062 s->streams[s->nb_streams++] = st;
2066 /************************************************************/
2067 /* output media file */
2069 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2073 if (s->oformat->priv_data_size > 0) {
2074 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2076 return AVERROR_NOMEM;
2078 s->priv_data = NULL;
2080 if (s->oformat->set_parameters) {
2081 ret = s->oformat->set_parameters(s, ap);
2089 * allocate the stream private data and write the stream header to an
2092 * @param s media file handle
2093 * @return 0 if OK. AVERROR_xxx if error.
2095 int av_write_header(AVFormatContext *s)
2100 ret = s->oformat->write_header(s);
2104 /* init PTS generation */
2105 for(i=0;i<s->nb_streams;i++) {
2108 switch (st->codec.codec_type) {
2109 case CODEC_TYPE_AUDIO:
2110 av_frac_init(&st->pts, 0, 0,
2111 (int64_t)st->time_base.num * st->codec.sample_rate);
2113 case CODEC_TYPE_VIDEO:
2114 av_frac_init(&st->pts, 0, 0,
2115 (int64_t)st->time_base.num * st->codec.time_base.den);
2124 //FIXME merge with compute_pkt_fields
2125 static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2126 int b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames);
2127 int num, den, frame_size;
2129 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
2131 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2134 /* duration field */
2135 if (pkt->duration == 0) {
2136 compute_frame_duration(&num, &den, st, NULL, pkt);
2138 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2142 //XXX/FIXME this is a temporary hack until all encoders output pts
2143 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
2145 // pkt->pts= st->cur_dts;
2146 pkt->pts= st->pts.val;
2149 //calculate dts from pts
2150 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2152 if(st->last_IP_pts == AV_NOPTS_VALUE){
2153 st->last_IP_pts= -pkt->duration;
2155 if(st->last_IP_pts < pkt->pts){
2156 pkt->dts= st->last_IP_pts;
2157 st->last_IP_pts= pkt->pts;
2164 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2165 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %Ld >= %Ld\n", st->cur_dts, pkt->dts);
2168 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2169 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2173 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2174 st->cur_dts= pkt->dts;
2175 st->pts.val= pkt->dts;
2178 switch (st->codec.codec_type) {
2179 case CODEC_TYPE_AUDIO:
2180 frame_size = get_audio_frame_size(&st->codec, pkt->size);
2182 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2183 but it would be better if we had the real timestamps from the encoder */
2184 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2185 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2188 case CODEC_TYPE_VIDEO:
2189 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec.time_base.num);
2197 static void truncate_ts(AVStream *st, AVPacket *pkt){
2198 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2201 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2203 pkt->pts &= pts_mask;
2204 pkt->dts &= pts_mask;
2208 * Write a packet to an output media file. The packet shall contain
2209 * one audio or video frame.
2211 * @param s media file handle
2212 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2213 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2215 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2219 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2223 truncate_ts(s->streams[pkt->stream_index], pkt);
2225 ret= s->oformat->write_packet(s, pkt);
2227 ret= url_ferror(&s->pb);
2232 * interleave_packet implementation which will interleave per DTS.
2234 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2235 AVPacketList *pktl, **next_point, *this_pktl;
2237 int streams[MAX_STREAMS];
2240 AVStream *st= s->streams[ pkt->stream_index];
2242 assert(pkt->destruct != av_destruct_packet); //FIXME
2244 this_pktl = av_mallocz(sizeof(AVPacketList));
2245 this_pktl->pkt= *pkt;
2246 av_dup_packet(&this_pktl->pkt);
2248 next_point = &s->packet_buffer;
2250 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2251 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2252 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2253 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2255 next_point= &(*next_point)->next;
2257 this_pktl->next= *next_point;
2258 *next_point= this_pktl;
2261 memset(streams, 0, sizeof(streams));
2262 pktl= s->packet_buffer;
2264 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2265 if(streams[ pktl->pkt.stream_index ] == 0)
2267 streams[ pktl->pkt.stream_index ]++;
2271 if(s->nb_streams == stream_count || (flush && stream_count)){
2272 pktl= s->packet_buffer;
2275 s->packet_buffer= pktl->next;
2279 av_init_packet(out);
2285 * Interleaves a AVPacket correctly so it can be muxed.
2286 * @param out the interleaved packet will be output here
2287 * @param in the input packet
2288 * @param flush 1 if no further packets are available as input and all
2289 * remaining packets should be output
2290 * @return 1 if a packet was output, 0 if no packet could be output,
2291 * < 0 if an error occured
2293 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2294 if(s->oformat->interleave_packet)
2295 return s->oformat->interleave_packet(s, out, in, flush);
2297 return av_interleave_packet_per_dts(s, out, in, flush);
2301 * Writes a packet to an output media file ensuring correct interleaving.
2302 * The packet shall contain one audio or video frame.
2303 * If the packets are already correctly interleaved the application should
2304 * call av_write_frame() instead as its slightly faster, its also important
2305 * to keep in mind that completly non interleaved input will need huge amounts
2306 * of memory to interleave with this, so its prefereable to interleave at the
2309 * @param s media file handle
2310 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2311 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2313 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2314 AVStream *st= s->streams[ pkt->stream_index];
2316 //FIXME/XXX/HACK drop zero sized packets
2317 if(st->codec.codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2320 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %Ld %Ld\n", pkt->size, pkt->dts, pkt->pts);
2321 if(compute_pkt_fields2(st, pkt) < 0)
2324 if(pkt->dts == AV_NOPTS_VALUE)
2329 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2330 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2333 truncate_ts(s->streams[opkt.stream_index], &opkt);
2334 ret= s->oformat->write_packet(s, &opkt);
2336 av_free_packet(&opkt);
2341 if(url_ferror(&s->pb))
2342 return url_ferror(&s->pb);
2347 * write the stream trailer to an output media file and and free the
2348 * file private data.
2350 * @param s media file handle
2351 * @return 0 if OK. AVERROR_xxx if error. */
2352 int av_write_trailer(AVFormatContext *s)
2358 ret= av_interleave_packet(s, &pkt, NULL, 1);
2359 if(ret<0) //FIXME cleanup needed for ret<0 ?
2364 truncate_ts(s->streams[pkt.stream_index], &pkt);
2365 ret= s->oformat->write_packet(s, &pkt);
2367 av_free_packet(&pkt);
2371 if(url_ferror(&s->pb))
2375 ret = s->oformat->write_trailer(s);
2378 ret=url_ferror(&s->pb);
2379 for(i=0;i<s->nb_streams;i++)
2380 av_freep(&s->streams[i]->priv_data);
2381 av_freep(&s->priv_data);
2385 /* "user interface" functions */
2387 void dump_format(AVFormatContext *ic,
2395 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2396 is_output ? "Output" : "Input",
2398 is_output ? ic->oformat->name : ic->iformat->name,
2399 is_output ? "to" : "from", url);
2401 av_log(NULL, AV_LOG_INFO, " Duration: ");
2402 if (ic->duration != AV_NOPTS_VALUE) {
2403 int hours, mins, secs, us;
2404 secs = ic->duration / AV_TIME_BASE;
2405 us = ic->duration % AV_TIME_BASE;
2410 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2411 (10 * us) / AV_TIME_BASE);
2413 av_log(NULL, AV_LOG_INFO, "N/A");
2415 if (ic->start_time != AV_NOPTS_VALUE) {
2417 av_log(NULL, AV_LOG_INFO, ", start: ");
2418 secs = ic->start_time / AV_TIME_BASE;
2419 us = ic->start_time % AV_TIME_BASE;
2420 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2421 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2423 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2425 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2427 av_log(NULL, AV_LOG_INFO, "N/A");
2429 av_log(NULL, AV_LOG_INFO, "\n");
2431 for(i=0;i<ic->nb_streams;i++) {
2432 AVStream *st = ic->streams[i];
2433 avcodec_string(buf, sizeof(buf), &st->codec, is_output);
2434 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2435 /* the pid is an important information, so we display it */
2436 /* XXX: add a generic system */
2438 flags = ic->oformat->flags;
2440 flags = ic->iformat->flags;
2441 if (flags & AVFMT_SHOW_IDS) {
2442 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2444 av_log(NULL, AV_LOG_INFO, ": %s\n", buf);
2451 int frame_rate, frame_rate_base;
2454 static AbvEntry frame_abvs[] = {
2455 { "ntsc", 720, 480, 30000, 1001 },
2456 { "pal", 720, 576, 25, 1 },
2457 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2458 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2459 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2460 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2461 { "film", 352, 240, 24, 1 },
2462 { "ntsc-film", 352, 240, 24000, 1001 },
2463 { "sqcif", 128, 96, 0, 0 },
2464 { "qcif", 176, 144, 0, 0 },
2465 { "cif", 352, 288, 0, 0 },
2466 { "4cif", 704, 576, 0, 0 },
2469 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2472 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2474 int frame_width = 0, frame_height = 0;
2477 if (!strcmp(frame_abvs[i].abv, str)) {
2478 frame_width = frame_abvs[i].width;
2479 frame_height = frame_abvs[i].height;
2485 frame_width = strtol(p, (char **)&p, 10);
2488 frame_height = strtol(p, (char **)&p, 10);
2490 if (frame_width <= 0 || frame_height <= 0)
2492 *width_ptr = frame_width;
2493 *height_ptr = frame_height;
2497 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2502 /* First, we check our abbreviation table */
2503 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2504 if (!strcmp(frame_abvs[i].abv, arg)) {
2505 *frame_rate = frame_abvs[i].frame_rate;
2506 *frame_rate_base = frame_abvs[i].frame_rate_base;
2510 /* Then, we try to parse it as fraction */
2511 cp = strchr(arg, '/');
2513 cp = strchr(arg, ':');
2516 *frame_rate = strtol(arg, &cpp, 10);
2517 if (cpp != arg || cpp == cp)
2518 *frame_rate_base = strtol(cp+1, &cpp, 10);
2523 /* Finally we give up and parse it as double */
2524 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
2525 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5);
2527 if (!*frame_rate || !*frame_rate_base)
2534 * - If not a duration:
2535 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2536 * Time is localtime unless Z is suffixed to the end. In this case GMT
2537 * Return the date in micro seconds since 1970
2539 * HH[:MM[:SS[.m...]]]
2542 int64_t parse_date(const char *datestr, int duration)
2548 static const char *date_fmt[] = {
2552 static const char *time_fmt[] = {
2562 time_t now = time(0);
2564 len = strlen(datestr);
2566 lastch = datestr[len - 1];
2569 is_utc = (lastch == 'z' || lastch == 'Z');
2571 memset(&dt, 0, sizeof(dt));
2576 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2577 q = small_strptime(p, date_fmt[i], &dt);
2587 dt = *localtime(&now);
2589 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2594 if (*p == 'T' || *p == 't' || *p == ' ')
2597 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2598 q = small_strptime(p, time_fmt[i], &dt);
2608 q = small_strptime(p, time_fmt[0], &dt);
2610 dt.tm_sec = strtol(p, (char **)&q, 10);
2616 /* Now we have all the fields that we can get */
2621 return now * int64_t_C(1000000);
2625 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2627 dt.tm_isdst = -1; /* unknown */
2640 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2643 val += n * (*q - '0');
2647 return negative ? -t : t;
2650 /* syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. Return
2652 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2662 while (*p != '\0' && *p != '=' && *p != '&') {
2663 if ((q - tag) < sizeof(tag) - 1)
2671 while (*p != '&' && *p != '\0') {
2672 if ((q - arg) < arg_size - 1) {
2682 if (!strcmp(tag, tag1))
2691 /* Return in 'buf' the path with '%d' replaced by number. Also handles
2692 the '%0nd' format where 'n' is the total number of digits and
2693 '%%'. Return 0 if OK, and -1 if format error */
2694 int get_frame_filename(char *buf, int buf_size,
2695 const char *path, int number)
2698 char *q, buf1[20], c;
2699 int nd, len, percentd_found;
2711 while (isdigit(*p)) {
2712 nd = nd * 10 + *p++ - '0';
2715 } while (isdigit(c));
2724 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2726 if ((q - buf + len) > buf_size - 1)
2728 memcpy(q, buf1, len);
2736 if ((q - buf) < buf_size - 1)
2740 if (!percentd_found)
2750 * Print nice hexa dump of a buffer
2751 * @param f stream for output
2753 * @param size buffer size
2755 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2759 for(i=0;i<size;i+=16) {
2763 fprintf(f, "%08x ", i);
2766 fprintf(f, " %02x", buf[i+j]);
2771 for(j=0;j<len;j++) {
2773 if (c < ' ' || c > '~')
2775 fprintf(f, "%c", c);
2782 * Print on 'f' a nice dump of a packet
2783 * @param f stream for output
2784 * @param pkt packet to dump
2785 * @param dump_payload true if the payload must be displayed too
2787 //FIXME needs to know the time_base
2788 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2790 fprintf(f, "stream #%d:\n", pkt->stream_index);
2791 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2792 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2793 /* DTS is _always_ valid after av_read_frame() */
2794 fprintf(f, " dts=");
2795 if (pkt->dts == AV_NOPTS_VALUE)
2798 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2799 /* PTS may be not known if B frames are present */
2800 fprintf(f, " pts=");
2801 if (pkt->pts == AV_NOPTS_VALUE)
2804 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2806 fprintf(f, " size=%d\n", pkt->size);
2808 av_hex_dump(f, pkt->data, pkt->size);
2811 void url_split(char *proto, int proto_size,
2812 char *authorization, int authorization_size,
2813 char *hostname, int hostname_size,
2815 char *path, int path_size,
2826 while (*p != ':' && *p != '\0') {
2827 if ((q - proto) < proto_size - 1)
2833 if (authorization_size > 0)
2834 authorization[0] = '\0';
2838 if (hostname_size > 0)
2842 char *at,*slash; // PETR: position of '@' character and '/' character
2849 at = strchr(p,'@'); // PETR: get the position of '@'
2850 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2851 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2853 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2855 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2856 if (*p == '@') { // PETR: passed '@'
2857 if (authorization_size > 0)
2861 } else if (!at) { // PETR: hostname
2862 if ((q - hostname) < hostname_size - 1)
2865 if ((q - authorization) < authorization_size - 1)
2870 if (hostname_size > 0)
2874 port = strtoul(p, (char **)&p, 10);
2879 pstrcpy(path, path_size, p);
2883 * Set the pts for a given stream
2885 * @param pts_wrap_bits number of bits effectively used by the pts
2886 * (used for wrap control, 33 is the value for MPEG)
2887 * @param pts_num numerator to convert to seconds (MPEG: 1)
2888 * @param pts_den denominator to convert to seconds (MPEG: 90000)
2890 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2891 int pts_num, int pts_den)
2893 s->pts_wrap_bits = pts_wrap_bits;
2894 s->time_base.num = pts_num;
2895 s->time_base.den = pts_den;
2898 /* fraction handling */
2901 * f = val + (num / den) + 0.5. 'num' is normalized so that it is such
2902 * as 0 <= num < den.
2904 * @param f fractional number
2905 * @param val integer value
2906 * @param num must be >= 0
2907 * @param den must be >= 1
2909 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2921 /* set f to (val + 0.5) */
2922 void av_frac_set(AVFrac *f, int64_t val)
2925 f->num = f->den >> 1;
2929 * Fractionnal addition to f: f = f + (incr / f->den)
2931 * @param f fractional number
2932 * @param incr increment, can be positive or negative
2934 void av_frac_add(AVFrac *f, int64_t incr)
2938 num = f->num + incr;
2941 f->val += num / den;
2947 } else if (num >= den) {
2948 f->val += num / den;
2955 * register a new image format
2956 * @param img_fmt Image format descriptor
2958 void av_register_image_format(AVImageFormat *img_fmt)
2962 p = &first_image_format;
2963 while (*p != NULL) p = &(*p)->next;
2965 img_fmt->next = NULL;
2968 /* guess image format */
2969 AVImageFormat *av_probe_image_format(AVProbeData *pd)
2971 AVImageFormat *fmt1, *fmt;
2972 int score, score_max;
2976 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2977 if (fmt1->img_probe) {
2978 score = fmt1->img_probe(pd);
2979 if (score > score_max) {
2988 AVImageFormat *guess_image_format(const char *filename)
2990 AVImageFormat *fmt1;
2992 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2993 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
3000 * Read an image from a stream.
3001 * @param gb byte stream containing the image
3002 * @param fmt image format, NULL if probing is required
3004 int av_read_image(ByteIOContext *pb, const char *filename,
3006 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
3008 char buf[PROBE_BUF_SIZE];
3009 AVProbeData probe_data, *pd = &probe_data;
3014 pd->filename = filename;
3016 pos = url_ftell(pb);
3017 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
3018 url_fseek(pb, pos, SEEK_SET);
3019 fmt = av_probe_image_format(pd);
3022 return AVERROR_NOFMT;
3023 ret = fmt->img_read(pb, alloc_cb, opaque);
3028 * Write an image to a stream.
3029 * @param pb byte stream for the image output
3030 * @param fmt image format
3031 * @param img image data and informations
3033 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
3035 return fmt->img_write(pb, img);