2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 AVInputFormat *first_iformat = NULL;
25 AVOutputFormat *first_oformat = NULL;
26 AVImageFormat *first_image_format = NULL;
28 void av_register_input_format(AVInputFormat *format)
32 while (*p != NULL) p = &(*p)->next;
37 void av_register_output_format(AVOutputFormat *format)
41 while (*p != NULL) p = &(*p)->next;
46 int match_ext(const char *filename, const char *extensions)
54 ext = strrchr(filename, '.');
60 while (*p != '\0' && *p != ',')
63 if (!strcasecmp(ext1, ext))
73 AVOutputFormat *guess_format(const char *short_name, const char *filename,
74 const char *mime_type)
76 AVOutputFormat *fmt, *fmt_found;
79 /* specific test for image sequences */
80 if (!short_name && filename &&
81 filename_number_test(filename) >= 0 &&
82 guess_image_format(filename)) {
83 return guess_format("image", NULL, NULL);
86 /* find the proper file type */
92 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
94 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
96 if (filename && fmt->extensions &&
97 match_ext(filename, fmt->extensions)) {
100 if (score > score_max) {
109 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
110 const char *mime_type)
112 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
115 AVOutputFormat *stream_fmt;
116 char stream_format_name[64];
118 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
119 stream_fmt = guess_format(stream_format_name, NULL, NULL);
128 AVInputFormat *av_find_input_format(const char *short_name)
131 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
132 if (!strcmp(fmt->name, short_name))
138 /* memory handling */
141 * Default packet destructor
143 static void av_destruct_packet(AVPacket *pkt)
146 pkt->data = NULL; pkt->size = 0;
150 * Allocate the payload of a packet and intialized its fields to default values.
153 * @param size wanted payload size
154 * @return 0 if OK. AVERROR_xxx otherwise.
156 int av_new_packet(AVPacket *pkt, int size)
158 void *data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
160 return AVERROR_NOMEM;
161 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
166 pkt->destruct = av_destruct_packet;
170 /* This is a hack - the packet memory allocation stuff is broken. The
171 packet is allocated if it was not really allocated */
172 int av_dup_packet(AVPacket *pkt)
174 if (pkt->destruct != av_destruct_packet) {
176 /* we duplicate the packet and don't forget to put the padding
178 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
180 return AVERROR_NOMEM;
182 memcpy(data, pkt->data, pkt->size);
183 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
185 pkt->destruct = av_destruct_packet;
192 int fifo_init(FifoBuffer *f, int size)
194 f->buffer = av_malloc(size);
197 f->end = f->buffer + size;
198 f->wptr = f->rptr = f->buffer;
202 void fifo_free(FifoBuffer *f)
207 int fifo_size(FifoBuffer *f, uint8_t *rptr)
211 if (f->wptr >= rptr) {
212 size = f->wptr - rptr;
214 size = (f->end - rptr) + (f->wptr - f->buffer);
219 /* get data from the fifo (return -1 if not enough data) */
220 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
222 uint8_t *rptr = *rptr_ptr;
225 if (f->wptr >= rptr) {
226 size = f->wptr - rptr;
228 size = (f->end - rptr) + (f->wptr - f->buffer);
233 while (buf_size > 0) {
237 memcpy(buf, rptr, len);
248 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr)
257 memcpy(wptr, buf, len);
267 /* get data from the fifo (return -1 if not enough data) */
268 int put_fifo(ByteIOContext *pb, FifoBuffer *f, int buf_size, uint8_t **rptr_ptr)
270 uint8_t *rptr = *rptr_ptr;
273 if (f->wptr >= rptr) {
274 size = f->wptr - rptr;
276 size = (f->end - rptr) + (f->wptr - f->buffer);
281 while (buf_size > 0) {
285 put_buffer(pb, rptr, len);
295 int filename_number_test(const char *filename)
300 return get_frame_filename(buf, sizeof(buf), filename, 1);
303 /* guess file format */
304 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
306 AVInputFormat *fmt1, *fmt;
307 int score, score_max;
311 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
312 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
315 if (fmt1->read_probe) {
316 score = fmt1->read_probe(pd);
317 } else if (fmt1->extensions) {
318 if (match_ext(pd->filename, fmt1->extensions)) {
322 if (score > score_max) {
330 /************************************************************/
331 /* input media file */
334 * open a media file from an IO stream. 'fmt' must be specified.
337 static const char* format_to_name(void* ptr)
339 AVFormatContext* fc = (AVFormatContext*) ptr;
340 if(fc->iformat) return fc->iformat->name;
341 else if(fc->oformat) return fc->oformat->name;
345 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
347 AVFormatContext *av_alloc_format_context(void)
350 ic = av_mallocz(sizeof(AVFormatContext));
352 ic->av_class = &av_format_context_class;
356 int av_open_input_stream(AVFormatContext **ic_ptr,
357 ByteIOContext *pb, const char *filename,
358 AVInputFormat *fmt, AVFormatParameters *ap)
363 ic = av_alloc_format_context();
371 ic->duration = AV_NOPTS_VALUE;
372 ic->start_time = AV_NOPTS_VALUE;
373 pstrcpy(ic->filename, sizeof(ic->filename), filename);
375 /* allocate private data */
376 if (fmt->priv_data_size > 0) {
377 ic->priv_data = av_mallocz(fmt->priv_data_size);
378 if (!ic->priv_data) {
383 ic->priv_data = NULL;
386 err = ic->iformat->read_header(ic, ap);
391 ic->data_offset = url_ftell(&ic->pb);
397 av_freep(&ic->priv_data);
404 #define PROBE_BUF_SIZE 2048
407 * Open a media file as input. The codec are not opened. Only the file
408 * header (if present) is read.
410 * @param ic_ptr the opened media file handle is put here
411 * @param filename filename to open.
412 * @param fmt if non NULL, force the file format to use
413 * @param buf_size optional buffer size (zero if default is OK)
414 * @param ap additionnal parameters needed when opening the file (NULL if default)
415 * @return 0 if OK. AVERROR_xxx otherwise.
417 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
420 AVFormatParameters *ap)
422 int err, must_open_file, file_opened;
423 uint8_t buf[PROBE_BUF_SIZE];
424 AVProbeData probe_data, *pd = &probe_data;
425 ByteIOContext pb1, *pb = &pb1;
430 pd->filename = filename;
435 /* guess format if no file can be opened */
436 fmt = av_probe_input_format(pd, 0);
439 /* do not open file if the format does not need it. XXX: specific
440 hack needed to handle RTSP/TCP */
442 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
444 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
447 if (!fmt || must_open_file) {
448 /* if no file needed do not try to open one */
449 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
455 url_setbufsize(pb, buf_size);
458 /* read probe data */
459 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
460 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
462 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
470 /* guess file format */
472 fmt = av_probe_input_format(pd, 1);
475 /* if still no format found, error */
481 /* XXX: suppress this hack for redirectors */
482 #ifdef CONFIG_NETWORK
483 if (fmt == &redir_demux) {
484 err = redir_open(ic_ptr, pb);
490 /* check filename in case of an image number is expected */
491 if (fmt->flags & AVFMT_NEEDNUMBER) {
492 if (filename_number_test(filename) < 0) {
493 err = AVERROR_NUMEXPECTED;
497 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
509 /*******************************************************/
512 * Read a transport packet from a media file. This function is
513 * absolete and should never be used. Use av_read_frame() instead.
515 * @param s media file handle
516 * @param pkt is filled
517 * @return 0 if OK. AVERROR_xxx if error.
519 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
521 return s->iformat->read_packet(s, pkt);
524 /**********************************************************/
526 /* get the number of samples of an audio frame. Return (-1) if error */
527 static int get_audio_frame_size(AVCodecContext *enc, int size)
531 if (enc->frame_size <= 1) {
532 /* specific hack for pcm codecs because no frame size is
534 switch(enc->codec_id) {
535 case CODEC_ID_PCM_S16LE:
536 case CODEC_ID_PCM_S16BE:
537 case CODEC_ID_PCM_U16LE:
538 case CODEC_ID_PCM_U16BE:
539 if (enc->channels == 0)
541 frame_size = size / (2 * enc->channels);
543 case CODEC_ID_PCM_S8:
544 case CODEC_ID_PCM_U8:
545 case CODEC_ID_PCM_MULAW:
546 case CODEC_ID_PCM_ALAW:
547 if (enc->channels == 0)
549 frame_size = size / (enc->channels);
552 /* used for example by ADPCM codecs */
553 if (enc->bit_rate == 0)
555 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
559 frame_size = enc->frame_size;
565 /* return the frame duration in seconds, return 0 if not available */
566 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
567 AVCodecParserContext *pc, AVPacket *pkt)
573 switch(st->codec.codec_type) {
574 case CODEC_TYPE_VIDEO:
575 *pnum = st->codec.frame_rate_base;
576 *pden = st->codec.frame_rate;
577 if (pc && pc->repeat_pict) {
579 *pnum = (*pnum) * (2 + pc->repeat_pict);
582 case CODEC_TYPE_AUDIO:
583 frame_size = get_audio_frame_size(&st->codec, pkt->size);
587 *pden = st->codec.sample_rate;
594 static int is_intra_only(AVCodecContext *enc){
595 if(enc->codec_type == CODEC_TYPE_AUDIO){
597 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
598 switch(enc->codec_id){
600 case CODEC_ID_MJPEGB:
602 case CODEC_ID_RAWVIDEO:
603 case CODEC_ID_DVVIDEO:
604 case CODEC_ID_HUFFYUV:
615 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
616 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
617 int64_t delta= last_ts - mask/2;
618 return ((lsb - delta)&mask) + delta;
621 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
622 AVCodecParserContext *pc, AVPacket *pkt)
624 int num, den, presentation_delayed;
626 /* handle wrapping */
627 if(st->cur_dts != AV_NOPTS_VALUE){
628 if(pkt->pts != AV_NOPTS_VALUE)
629 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
630 if(pkt->dts != AV_NOPTS_VALUE)
631 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
634 if (pkt->duration == 0) {
635 compute_frame_duration(&num, &den, st, pc, pkt);
637 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
641 if(is_intra_only(&st->codec))
642 pkt->flags |= PKT_FLAG_KEY;
644 /* do we have a video B frame ? */
645 presentation_delayed = 0;
646 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
647 /* XXX: need has_b_frame, but cannot get it if the codec is
649 if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
650 st->codec.codec_id == CODEC_ID_MPEG2VIDEO ||
651 st->codec.codec_id == CODEC_ID_MPEG4 ||
652 st->codec.codec_id == CODEC_ID_H264) &&
653 pc && pc->pict_type != FF_B_TYPE)
654 presentation_delayed = 1;
655 /* this may be redundant, but it shouldnt hurt */
656 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
657 presentation_delayed = 1;
660 if(st->cur_dts == AV_NOPTS_VALUE){
661 if(presentation_delayed) st->cur_dts = -pkt->duration;
662 else st->cur_dts = 0;
665 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
666 /* interpolate PTS and DTS if they are not present */
667 if (presentation_delayed) {
668 /* DTS = decompression time stamp */
669 /* PTS = presentation time stamp */
670 if (pkt->dts == AV_NOPTS_VALUE) {
671 /* if we know the last pts, use it */
672 if(st->last_IP_pts != AV_NOPTS_VALUE)
673 st->cur_dts = pkt->dts = st->last_IP_pts;
675 pkt->dts = st->cur_dts;
677 st->cur_dts = pkt->dts;
679 /* this is tricky: the dts must be incremented by the duration
680 of the frame we are displaying, i.e. the last I or P frame */
681 if (st->last_IP_duration == 0)
682 st->cur_dts += pkt->duration;
684 st->cur_dts += st->last_IP_duration;
685 st->last_IP_duration = pkt->duration;
686 st->last_IP_pts= pkt->pts;
687 /* cannot compute PTS if not present (we can compute it only
688 by knowing the futur */
690 /* presentation is not delayed : PTS and DTS are the same */
691 if (pkt->pts == AV_NOPTS_VALUE) {
692 if (pkt->dts == AV_NOPTS_VALUE) {
693 pkt->pts = st->cur_dts;
694 pkt->dts = st->cur_dts;
697 st->cur_dts = pkt->dts;
701 st->cur_dts = pkt->pts;
704 st->cur_dts += pkt->duration;
706 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
711 /* key frame computation */
712 switch(st->codec.codec_type) {
713 case CODEC_TYPE_VIDEO:
714 if (pc->pict_type == FF_I_TYPE)
715 pkt->flags |= PKT_FLAG_KEY;
717 case CODEC_TYPE_AUDIO:
718 pkt->flags |= PKT_FLAG_KEY;
725 /* convert the packet time stamp units */
726 if(pkt->pts != AV_NOPTS_VALUE)
727 pkt->pts = av_rescale(pkt->pts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
728 if(pkt->dts != AV_NOPTS_VALUE)
729 pkt->dts = av_rescale(pkt->dts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
732 pkt->duration = av_rescale(pkt->duration, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
735 void av_destruct_packet_nofree(AVPacket *pkt)
737 pkt->data = NULL; pkt->size = 0;
740 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
746 /* select current input stream component */
750 /* no parsing needed: we just output the packet as is */
751 /* raw data support */
753 compute_pkt_fields(s, st, NULL, pkt);
756 } else if (s->cur_len > 0) {
757 len = av_parser_parse(st->parser, &st->codec, &pkt->data, &pkt->size,
758 s->cur_ptr, s->cur_len,
759 s->cur_pkt.pts, s->cur_pkt.dts);
760 s->cur_pkt.pts = AV_NOPTS_VALUE;
761 s->cur_pkt.dts = AV_NOPTS_VALUE;
762 /* increment read pointer */
766 /* return packet if any */
770 pkt->stream_index = st->index;
771 pkt->pts = st->parser->pts;
772 pkt->dts = st->parser->dts;
773 pkt->destruct = av_destruct_packet_nofree;
774 compute_pkt_fields(s, st, st->parser, pkt);
779 av_free_packet(&s->cur_pkt);
783 /* read next packet */
784 ret = av_read_packet(s, &s->cur_pkt);
788 /* return the last frames, if any */
789 for(i = 0; i < s->nb_streams; i++) {
792 av_parser_parse(st->parser, &st->codec,
793 &pkt->data, &pkt->size,
795 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
800 /* no more packets: really terminates parsing */
804 st = s->streams[s->cur_pkt.stream_index];
807 s->cur_ptr = s->cur_pkt.data;
808 s->cur_len = s->cur_pkt.size;
809 if (st->need_parsing && !st->parser) {
810 st->parser = av_parser_init(st->codec.codec_id);
812 /* no parser available : just output the raw packets */
813 st->need_parsing = 0;
821 * Return the next frame of a stream. The returned packet is valid
822 * until the next av_read_frame() or until av_close_input_file() and
823 * must be freed with av_free_packet. For video, the packet contains
824 * exactly one frame. For audio, it contains an integer number of
825 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
826 * data). If the audio frames have a variable size (e.g. MPEG audio),
827 * then it contains one frame.
829 * pkt->pts, pkt->dts and pkt->duration are always set to correct
830 * values in AV_TIME_BASE unit (and guessed if the format cannot
831 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
832 * has B frames, so it is better to rely on pkt->dts if you do not
833 * decompress the payload.
835 * Return 0 if OK, < 0 if error or end of file.
837 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
841 pktl = s->packet_buffer;
843 /* read packet from packet buffer, if there is data */
845 s->packet_buffer = pktl->next;
849 return av_read_frame_internal(s, pkt);
853 /* XXX: suppress the packet queue */
854 static void flush_packet_queue(AVFormatContext *s)
859 pktl = s->packet_buffer;
862 s->packet_buffer = pktl->next;
863 av_free_packet(&pktl->pkt);
868 /*******************************************************/
871 int av_find_default_stream_index(AVFormatContext *s)
876 if (s->nb_streams <= 0)
878 for(i = 0; i < s->nb_streams; i++) {
880 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
887 /* flush the frame reader */
888 static void av_read_frame_flush(AVFormatContext *s)
893 flush_packet_queue(s);
895 /* free previous packet */
897 if (s->cur_st->parser)
898 av_free_packet(&s->cur_pkt);
905 /* for each stream, reset read state */
906 for(i = 0; i < s->nb_streams; i++) {
910 av_parser_close(st->parser);
913 st->last_IP_pts = AV_NOPTS_VALUE;
914 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
919 * updates cur_dts of all streams based on given timestamp and AVStream.
920 * stream ref_st unchanged, others set cur_dts in their native timebase
921 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
922 * @param timestamp new dts expressed in time_base of param ref_st
923 * @param ref_st reference stream giving time_base of param timestamp
925 static void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
928 for(i = 0; i < s->nb_streams; i++) {
929 AVStream *st = s->streams[i];
931 st->cur_dts = av_rescale(timestamp,
932 st->time_base.den * (int64_t)ref_st->time_base.num,
933 st->time_base.num * (int64_t)ref_st->time_base.den);
938 * add a index entry into a sorted list updateing if it is already there.
939 * @param timestamp timestamp in the timebase of the given stream
941 int av_add_index_entry(AVStream *st,
942 int64_t pos, int64_t timestamp, int distance, int flags)
944 AVIndexEntry *entries, *ie;
947 entries = av_fast_realloc(st->index_entries,
948 &st->index_entries_allocated_size,
949 (st->nb_index_entries + 1) *
950 sizeof(AVIndexEntry));
951 st->index_entries= entries;
953 index= av_index_search_timestamp(st, timestamp, 0);
956 index= st->nb_index_entries++;
958 assert(index==0 || ie[-1].timestamp < timestamp);
961 if(ie->timestamp != timestamp){
962 if(ie->timestamp <= timestamp)
964 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
965 st->nb_index_entries++;
966 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
967 distance= ie->min_distance;
971 ie->timestamp = timestamp;
972 ie->min_distance= distance;
978 /* build an index for raw streams using a parser */
979 static void av_build_index_raw(AVFormatContext *s)
981 AVPacket pkt1, *pkt = &pkt1;
986 av_read_frame_flush(s);
987 url_fseek(&s->pb, s->data_offset, SEEK_SET);
990 ret = av_read_frame(s, pkt);
993 if (pkt->stream_index == 0 && st->parser &&
994 (pkt->flags & PKT_FLAG_KEY)) {
995 int64_t dts= av_rescale(pkt->dts, st->time_base.den, AV_TIME_BASE*(int64_t)st->time_base.num);
996 av_add_index_entry(st, st->parser->frame_offset, dts,
997 0, AVINDEX_KEYFRAME);
1003 /* return TRUE if we deal with a raw stream (raw codec data and
1005 static int is_raw_stream(AVFormatContext *s)
1009 if (s->nb_streams != 1)
1012 if (!st->need_parsing)
1018 * gets the index for a specific timestamp.
1019 * @param backward if non zero then the returned index will correspond to
1020 * the timestamp which is <= the requested one, if backward is 0
1021 * then it will be >=
1022 * @return < 0 if no such timestamp could be found
1024 int av_index_search_timestamp(AVStream *st, int wanted_timestamp, int backward)
1026 AVIndexEntry *entries= st->index_entries;
1027 int nb_entries= st->nb_index_entries;
1036 timestamp = entries[m].timestamp;
1037 if(timestamp >= wanted_timestamp)
1039 if(timestamp <= wanted_timestamp)
1042 m= backward ? a : b;
1052 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1053 * this isnt supposed to be called directly by a user application, but by demuxers
1054 * @param target_ts target timestamp in the time base of the given stream
1055 * @param stream_index stream number
1057 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1058 AVInputFormat *avif= s->iformat;
1059 int64_t pos_min, pos_max, pos, pos_limit;
1060 int64_t ts_min, ts_max, ts;
1062 int index, no_change;
1065 if (stream_index < 0)
1069 av_log(s, AV_LOG_DEBUG, "read_seek: %d %lld\n", stream_index, target_ts);
1073 ts_min= AV_NOPTS_VALUE;
1074 pos_limit= -1; //gcc falsely says it may be uninitalized
1076 st= s->streams[stream_index];
1077 if(st->index_entries){
1080 index= av_index_search_timestamp(st, target_ts, 1);
1081 index= FFMAX(index, 0);
1082 e= &st->index_entries[index];
1084 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1086 ts_min= e->timestamp;
1088 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%llx dts_min=%lld\n",
1095 if(index < st->nb_index_entries){
1096 e= &st->index_entries[index];
1097 assert(e->timestamp >= target_ts);
1099 ts_max= e->timestamp;
1100 pos_limit= pos_max - e->min_distance;
1102 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%llx pos_limit=0x%llx dts_max=%lld\n",
1103 pos_max,pos_limit, ts_max);
1108 if(ts_min == AV_NOPTS_VALUE){
1109 pos_min = s->data_offset;
1110 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1111 if (ts_min == AV_NOPTS_VALUE)
1115 if(ts_max == AV_NOPTS_VALUE){
1117 pos_max = url_filesize(url_fileno(&s->pb)) - 1;
1120 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1122 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1123 if (ts_max == AV_NOPTS_VALUE)
1127 int64_t tmp_pos= pos_max + 1;
1128 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1129 if(tmp_ts == AV_NOPTS_VALUE)
1138 while (pos_min < pos_limit) {
1140 av_log(s, AV_LOG_DEBUG, "pos_min=0x%llx pos_max=0x%llx dts_min=%lld dts_max=%lld\n",
1144 assert(pos_limit <= pos_max);
1147 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1148 // interpolate position (better than dichotomy)
1149 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1150 + pos_min - approximate_keyframe_distance;
1151 }else if(no_change==1){
1152 // bisection, if interpolation failed to change min or max pos last time
1153 pos = (pos_min + pos_limit)>>1;
1155 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1160 else if(pos > pos_limit)
1164 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1170 av_log(s, AV_LOG_DEBUG, "%Ld %Ld %Ld / %Ld %Ld %Ld target:%Ld limit:%Ld start:%Ld noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1172 assert(ts != AV_NOPTS_VALUE);
1173 if (target_ts <= ts) {
1174 pos_limit = start_pos - 1;
1178 if (target_ts >= ts) {
1184 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1185 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1188 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1190 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1191 av_log(s, AV_LOG_DEBUG, "pos=0x%llx %lld<=%lld<=%lld\n",
1192 pos, ts_min, target_ts, ts_max);
1195 url_fseek(&s->pb, pos, SEEK_SET);
1197 av_update_cur_dts(s, st, ts);
1202 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1203 AVInputFormat *avif= s->iformat;
1204 int64_t pos_min, pos_max;
1208 if (stream_index < 0)
1211 st= s->streams[stream_index];
1214 pos_min = s->data_offset;
1215 pos_max = url_filesize(url_fileno(&s->pb)) - 1;
1217 if (pos < pos_min) pos= pos_min;
1218 else if(pos > pos_max) pos= pos_max;
1220 url_fseek(&s->pb, pos, SEEK_SET);
1223 av_update_cur_dts(s, st, ts);
1228 static int av_seek_frame_generic(AVFormatContext *s,
1229 int stream_index, int64_t timestamp, int flags)
1235 if (!s->index_built) {
1236 if (is_raw_stream(s)) {
1237 av_build_index_raw(s);
1244 st = s->streams[stream_index];
1245 index = av_index_search_timestamp(st, timestamp, flags & AVSEEK_FLAG_BACKWARD);
1249 /* now we have found the index, we can seek */
1250 ie = &st->index_entries[index];
1251 av_read_frame_flush(s);
1252 url_fseek(&s->pb, ie->pos, SEEK_SET);
1254 av_update_cur_dts(s, st, ie->timestamp);
1260 * Seek to the key frame at timestamp.
1261 * 'timestamp' in 'stream_index'.
1262 * @param stream_index If stream_index is (-1), a default
1263 * stream is selected, and timestamp is automatically converted
1264 * from AV_TIME_BASE units to the stream specific time_base.
1265 * @param timestamp timestamp in AVStream.time_base units
1266 * @param flags flags which select direction and seeking mode
1267 * @return >= 0 on success
1269 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1274 av_read_frame_flush(s);
1276 if(flags & AVSEEK_FLAG_BYTE)
1277 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1279 if(stream_index < 0){
1280 stream_index= av_find_default_stream_index(s);
1281 if(stream_index < 0)
1284 st= s->streams[stream_index];
1285 /* timestamp for default must be expressed in AV_TIME_BASE units */
1286 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1288 st= s->streams[stream_index];
1290 /* first, we try the format specific seek */
1291 if (s->iformat->read_seek)
1292 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1299 if(s->iformat->read_timestamp)
1300 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1302 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1305 /*******************************************************/
1307 /* return TRUE if the stream has accurate timings for at least one component */
1308 static int av_has_timings(AVFormatContext *ic)
1313 for(i = 0;i < ic->nb_streams; i++) {
1314 st = ic->streams[i];
1315 if (st->start_time != AV_NOPTS_VALUE &&
1316 st->duration != AV_NOPTS_VALUE)
1322 /* estimate the stream timings from the one of each components. Also
1323 compute the global bitrate if possible */
1324 static void av_update_stream_timings(AVFormatContext *ic)
1326 int64_t start_time, end_time, end_time1;
1330 start_time = MAXINT64;
1331 end_time = MININT64;
1332 for(i = 0;i < ic->nb_streams; i++) {
1333 st = ic->streams[i];
1334 if (st->start_time != AV_NOPTS_VALUE) {
1335 if (st->start_time < start_time)
1336 start_time = st->start_time;
1337 if (st->duration != AV_NOPTS_VALUE) {
1338 end_time1 = st->start_time + st->duration;
1339 if (end_time1 > end_time)
1340 end_time = end_time1;
1344 if (start_time != MAXINT64) {
1345 ic->start_time = start_time;
1346 if (end_time != MAXINT64) {
1347 ic->duration = end_time - start_time;
1348 if (ic->file_size > 0) {
1349 /* compute the bit rate */
1350 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1351 (double)ic->duration;
1358 static void fill_all_stream_timings(AVFormatContext *ic)
1363 av_update_stream_timings(ic);
1364 for(i = 0;i < ic->nb_streams; i++) {
1365 st = ic->streams[i];
1366 if (st->start_time == AV_NOPTS_VALUE) {
1367 st->start_time = ic->start_time;
1368 st->duration = ic->duration;
1373 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1375 int64_t filesize, duration;
1379 /* if bit_rate is already set, we believe it */
1380 if (ic->bit_rate == 0) {
1382 for(i=0;i<ic->nb_streams;i++) {
1383 st = ic->streams[i];
1384 bit_rate += st->codec.bit_rate;
1386 ic->bit_rate = bit_rate;
1389 /* if duration is already set, we believe it */
1390 if (ic->duration == AV_NOPTS_VALUE &&
1391 ic->bit_rate != 0 &&
1392 ic->file_size != 0) {
1393 filesize = ic->file_size;
1395 duration = (int64_t)((8 * AV_TIME_BASE * (double)filesize) / (double)ic->bit_rate);
1396 for(i = 0; i < ic->nb_streams; i++) {
1397 st = ic->streams[i];
1398 if (st->start_time == AV_NOPTS_VALUE ||
1399 st->duration == AV_NOPTS_VALUE) {
1401 st->duration = duration;
1408 #define DURATION_MAX_READ_SIZE 250000
1410 /* only usable for MPEG-PS streams */
1411 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1413 AVPacket pkt1, *pkt = &pkt1;
1415 int read_size, i, ret;
1416 int64_t start_time, end_time, end_time1;
1417 int64_t filesize, offset, duration;
1419 /* free previous packet */
1420 if (ic->cur_st && ic->cur_st->parser)
1421 av_free_packet(&ic->cur_pkt);
1424 /* flush packet queue */
1425 flush_packet_queue(ic);
1427 for(i=0;i<ic->nb_streams;i++) {
1428 st = ic->streams[i];
1430 av_parser_close(st->parser);
1435 /* we read the first packets to get the first PTS (not fully
1436 accurate, but it is enough now) */
1437 url_fseek(&ic->pb, 0, SEEK_SET);
1440 if (read_size >= DURATION_MAX_READ_SIZE)
1442 /* if all info is available, we can stop */
1443 for(i = 0;i < ic->nb_streams; i++) {
1444 st = ic->streams[i];
1445 if (st->start_time == AV_NOPTS_VALUE)
1448 if (i == ic->nb_streams)
1451 ret = av_read_packet(ic, pkt);
1454 read_size += pkt->size;
1455 st = ic->streams[pkt->stream_index];
1456 if (pkt->pts != AV_NOPTS_VALUE) {
1457 if (st->start_time == AV_NOPTS_VALUE)
1458 st->start_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
1460 av_free_packet(pkt);
1463 /* we compute the minimum start_time and use it as default */
1464 start_time = MAXINT64;
1465 for(i = 0; i < ic->nb_streams; i++) {
1466 st = ic->streams[i];
1467 if (st->start_time != AV_NOPTS_VALUE &&
1468 st->start_time < start_time)
1469 start_time = st->start_time;
1471 if (start_time != MAXINT64)
1472 ic->start_time = start_time;
1474 /* estimate the end time (duration) */
1475 /* XXX: may need to support wrapping */
1476 filesize = ic->file_size;
1477 offset = filesize - DURATION_MAX_READ_SIZE;
1481 url_fseek(&ic->pb, offset, SEEK_SET);
1484 if (read_size >= DURATION_MAX_READ_SIZE)
1486 /* if all info is available, we can stop */
1487 for(i = 0;i < ic->nb_streams; i++) {
1488 st = ic->streams[i];
1489 if (st->duration == AV_NOPTS_VALUE)
1492 if (i == ic->nb_streams)
1495 ret = av_read_packet(ic, pkt);
1498 read_size += pkt->size;
1499 st = ic->streams[pkt->stream_index];
1500 if (pkt->pts != AV_NOPTS_VALUE) {
1501 end_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
1502 duration = end_time - st->start_time;
1504 if (st->duration == AV_NOPTS_VALUE ||
1505 st->duration < duration)
1506 st->duration = duration;
1509 av_free_packet(pkt);
1512 /* estimate total duration */
1513 end_time = MININT64;
1514 for(i = 0;i < ic->nb_streams; i++) {
1515 st = ic->streams[i];
1516 if (st->duration != AV_NOPTS_VALUE) {
1517 end_time1 = st->start_time + st->duration;
1518 if (end_time1 > end_time)
1519 end_time = end_time1;
1523 /* update start_time (new stream may have been created, so we do
1525 if (ic->start_time != AV_NOPTS_VALUE) {
1526 for(i = 0; i < ic->nb_streams; i++) {
1527 st = ic->streams[i];
1528 if (st->start_time == AV_NOPTS_VALUE)
1529 st->start_time = ic->start_time;
1533 if (end_time != MININT64) {
1534 /* put dummy values for duration if needed */
1535 for(i = 0;i < ic->nb_streams; i++) {
1536 st = ic->streams[i];
1537 if (st->duration == AV_NOPTS_VALUE &&
1538 st->start_time != AV_NOPTS_VALUE)
1539 st->duration = end_time - st->start_time;
1541 ic->duration = end_time - ic->start_time;
1544 url_fseek(&ic->pb, 0, SEEK_SET);
1547 static void av_estimate_timings(AVFormatContext *ic)
1552 /* get the file size, if possible */
1553 if (ic->iformat->flags & AVFMT_NOFILE) {
1556 h = url_fileno(&ic->pb);
1557 file_size = url_filesize(h);
1561 ic->file_size = file_size;
1563 if (ic->iformat == &mpegps_demux) {
1564 /* get accurate estimate from the PTSes */
1565 av_estimate_timings_from_pts(ic);
1566 } else if (av_has_timings(ic)) {
1567 /* at least one components has timings - we use them for all
1569 fill_all_stream_timings(ic);
1571 /* less precise: use bit rate info */
1572 av_estimate_timings_from_bit_rate(ic);
1574 av_update_stream_timings(ic);
1580 for(i = 0;i < ic->nb_streams; i++) {
1581 st = ic->streams[i];
1582 printf("%d: start_time: %0.3f duration: %0.3f\n",
1583 i, (double)st->start_time / AV_TIME_BASE,
1584 (double)st->duration / AV_TIME_BASE);
1586 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1587 (double)ic->start_time / AV_TIME_BASE,
1588 (double)ic->duration / AV_TIME_BASE,
1589 ic->bit_rate / 1000);
1594 static int has_codec_parameters(AVCodecContext *enc)
1597 switch(enc->codec_type) {
1598 case CODEC_TYPE_AUDIO:
1599 val = enc->sample_rate;
1601 case CODEC_TYPE_VIDEO:
1611 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1615 int got_picture, ret;
1618 codec = avcodec_find_decoder(st->codec.codec_id);
1621 ret = avcodec_open(&st->codec, codec);
1624 switch(st->codec.codec_type) {
1625 case CODEC_TYPE_VIDEO:
1626 ret = avcodec_decode_video(&st->codec, &picture,
1627 &got_picture, (uint8_t *)data, size);
1629 case CODEC_TYPE_AUDIO:
1630 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1633 ret = avcodec_decode_audio(&st->codec, samples,
1634 &got_picture, (uint8_t *)data, size);
1641 avcodec_close(&st->codec);
1645 /* absolute maximum size we read until we abort */
1646 #define MAX_READ_SIZE 5000000
1648 /* maximum duration until we stop analysing the stream */
1649 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 1.0))
1652 * Read the beginning of a media file to get stream information. This
1653 * is useful for file formats with no headers such as MPEG. This
1654 * function also compute the real frame rate in case of mpeg2 repeat
1657 * @param ic media file handle
1658 * @return >=0 if OK. AVERROR_xxx if error.
1660 int av_find_stream_info(AVFormatContext *ic)
1662 int i, count, ret, read_size;
1664 AVPacket pkt1, *pkt;
1665 AVPacketList *pktl=NULL, **ppktl;
1669 ppktl = &ic->packet_buffer;
1671 /* check if one codec still needs to be handled */
1672 for(i=0;i<ic->nb_streams;i++) {
1673 st = ic->streams[i];
1674 if (!has_codec_parameters(&st->codec))
1677 if (i == ic->nb_streams) {
1678 /* NOTE: if the format has no header, then we need to read
1679 some packets to get most of the streams, so we cannot
1681 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1682 /* if we found the info for all the codecs, we can stop */
1687 /* we did not get all the codec info, but we read too much data */
1688 if (read_size >= MAX_READ_SIZE) {
1694 /* NOTE: a new stream can be added there if no header in file
1695 (AVFMTCTX_NOHEADER) */
1696 ret = av_read_frame_internal(ic, &pkt1);
1699 ret = -1; /* we could not have all the codec parameters before EOF */
1700 if ((ic->ctx_flags & AVFMTCTX_NOHEADER) &&
1701 i == ic->nb_streams)
1706 pktl = av_mallocz(sizeof(AVPacketList));
1708 ret = AVERROR_NOMEM;
1712 /* add the packet in the buffered packet list */
1714 ppktl = &pktl->next;
1719 /* duplicate the packet */
1720 if (av_dup_packet(pkt) < 0) {
1721 ret = AVERROR_NOMEM;
1725 read_size += pkt->size;
1727 st = ic->streams[pkt->stream_index];
1728 st->codec_info_duration += pkt->duration;
1729 if (pkt->duration != 0)
1730 st->codec_info_nb_frames++;
1732 /* if still no information, we try to open the codec and to
1733 decompress the frame. We try to avoid that in most cases as
1734 it takes longer and uses more memory. For MPEG4, we need to
1735 decompress for Quicktime. */
1736 if (!has_codec_parameters(&st->codec) &&
1737 (st->codec.codec_id == CODEC_ID_FLV1 ||
1738 st->codec.codec_id == CODEC_ID_H264 ||
1739 st->codec.codec_id == CODEC_ID_H263 ||
1740 st->codec.codec_id == CODEC_ID_VORBIS ||
1741 st->codec.codec_id == CODEC_ID_MJPEG ||
1742 (st->codec.codec_id == CODEC_ID_MPEG4 && !st->need_parsing)))
1743 try_decode_frame(st, pkt->data, pkt->size);
1745 if (st->codec_info_duration >= MAX_STREAM_DURATION) {
1751 for(i=0;i<ic->nb_streams;i++) {
1752 st = ic->streams[i];
1753 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1754 if(st->codec.codec_id == CODEC_ID_RAWVIDEO && !st->codec.codec_tag)
1755 st->codec.codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec.pix_fmt);
1756 /* set real frame rate info */
1757 /* compute the real frame rate for telecine */
1758 if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
1759 st->codec.codec_id == CODEC_ID_MPEG2VIDEO) &&
1760 st->codec.sub_id == 2) {
1761 if (st->codec_info_nb_frames >= 20) {
1762 float coded_frame_rate, est_frame_rate;
1763 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
1764 (double)st->codec_info_duration ;
1765 coded_frame_rate = (double)st->codec.frame_rate /
1766 (double)st->codec.frame_rate_base;
1768 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
1769 coded_frame_rate, est_frame_rate);
1771 /* if we detect that it could be a telecine, we
1772 signal it. It would be better to do it at a
1773 higher level as it can change in a film */
1774 if (coded_frame_rate >= 24.97 &&
1775 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
1776 st->r_frame_rate = 24024;
1777 st->r_frame_rate_base = 1001;
1781 /* if no real frame rate, use the codec one */
1782 if (!st->r_frame_rate){
1783 st->r_frame_rate = st->codec.frame_rate;
1784 st->r_frame_rate_base = st->codec.frame_rate_base;
1789 av_estimate_timings(ic);
1791 /* correct DTS for b frame streams with no timestamps */
1792 for(i=0;i<ic->nb_streams;i++) {
1793 st = ic->streams[i];
1794 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1796 ppktl = &ic->packet_buffer;
1798 if(ppkt1->stream_index != i)
1800 if(ppkt1->pkt->dts < 0)
1802 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1804 ppkt1->pkt->dts -= delta;
1809 st->cur_dts -= delta;
1817 /*******************************************************/
1820 * start playing a network based stream (e.g. RTSP stream) at the
1823 int av_read_play(AVFormatContext *s)
1825 if (!s->iformat->read_play)
1826 return AVERROR_NOTSUPP;
1827 return s->iformat->read_play(s);
1831 * pause a network based stream (e.g. RTSP stream). Use av_read_play()
1834 int av_read_pause(AVFormatContext *s)
1836 if (!s->iformat->read_pause)
1837 return AVERROR_NOTSUPP;
1838 return s->iformat->read_pause(s);
1842 * Close a media file (but not its codecs)
1844 * @param s media file handle
1846 void av_close_input_file(AVFormatContext *s)
1848 int i, must_open_file;
1851 /* free previous packet */
1852 if (s->cur_st && s->cur_st->parser)
1853 av_free_packet(&s->cur_pkt);
1855 if (s->iformat->read_close)
1856 s->iformat->read_close(s);
1857 for(i=0;i<s->nb_streams;i++) {
1858 /* free all data in a stream component */
1861 av_parser_close(st->parser);
1863 av_free(st->index_entries);
1866 flush_packet_queue(s);
1868 if (s->iformat->flags & AVFMT_NOFILE) {
1871 if (must_open_file) {
1874 av_freep(&s->priv_data);
1879 * Add a new stream to a media file. Can only be called in the
1880 * read_header function. If the flag AVFMTCTX_NOHEADER is in the
1881 * format context, then new streams can be added in read_packet too.
1884 * @param s media file handle
1885 * @param id file format dependent stream id
1887 AVStream *av_new_stream(AVFormatContext *s, int id)
1891 if (s->nb_streams >= MAX_STREAMS)
1894 st = av_mallocz(sizeof(AVStream));
1897 avcodec_get_context_defaults(&st->codec);
1899 /* no default bitrate if decoding */
1900 st->codec.bit_rate = 0;
1902 st->index = s->nb_streams;
1904 st->start_time = AV_NOPTS_VALUE;
1905 st->duration = AV_NOPTS_VALUE;
1906 st->cur_dts = AV_NOPTS_VALUE;
1908 /* default pts settings is MPEG like */
1909 av_set_pts_info(st, 33, 1, 90000);
1910 st->last_IP_pts = AV_NOPTS_VALUE;
1912 s->streams[s->nb_streams++] = st;
1916 /************************************************************/
1917 /* output media file */
1919 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
1923 if (s->oformat->priv_data_size > 0) {
1924 s->priv_data = av_mallocz(s->oformat->priv_data_size);
1926 return AVERROR_NOMEM;
1928 s->priv_data = NULL;
1930 if (s->oformat->set_parameters) {
1931 ret = s->oformat->set_parameters(s, ap);
1939 * allocate the stream private data and write the stream header to an
1942 * @param s media file handle
1943 * @return 0 if OK. AVERROR_xxx if error.
1945 int av_write_header(AVFormatContext *s)
1950 ret = s->oformat->write_header(s);
1954 /* init PTS generation */
1955 for(i=0;i<s->nb_streams;i++) {
1958 switch (st->codec.codec_type) {
1959 case CODEC_TYPE_AUDIO:
1960 av_frac_init(&st->pts, 0, 0,
1961 (int64_t)st->time_base.num * st->codec.sample_rate);
1963 case CODEC_TYPE_VIDEO:
1964 av_frac_init(&st->pts, 0, 0,
1965 (int64_t)st->time_base.num * st->codec.frame_rate);
1974 //FIXME merge with compute_pkt_fields
1975 static void compute_pkt_fields2(AVStream *st, AVPacket *pkt){
1976 int b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames);
1977 int num, den, frame_size;
1979 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
1981 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
1984 if(pkt->pts != AV_NOPTS_VALUE)
1985 pkt->pts = av_rescale(pkt->pts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1986 if(pkt->dts != AV_NOPTS_VALUE)
1987 pkt->dts = av_rescale(pkt->dts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1989 /* duration field */
1990 pkt->duration = av_rescale(pkt->duration, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1991 if (pkt->duration == 0) {
1992 compute_frame_duration(&num, &den, st, NULL, pkt);
1994 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
1998 //XXX/FIXME this is a temporary hack until all encoders output pts
1999 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
2001 // pkt->pts= st->cur_dts;
2002 pkt->pts= st->pts.val;
2005 //calculate dts from pts
2006 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2008 if(st->last_IP_pts == AV_NOPTS_VALUE){
2009 st->last_IP_pts= -pkt->duration;
2011 if(st->last_IP_pts < pkt->pts){
2012 pkt->dts= st->last_IP_pts;
2013 st->last_IP_pts= pkt->pts;
2020 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
2021 st->cur_dts= pkt->dts;
2022 st->pts.val= pkt->dts;
2025 switch (st->codec.codec_type) {
2026 case CODEC_TYPE_AUDIO:
2027 frame_size = get_audio_frame_size(&st->codec, pkt->size);
2029 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2030 but it would be better if we had the real timestamps from the encoder */
2031 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2032 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2035 case CODEC_TYPE_VIDEO:
2036 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec.frame_rate_base);
2043 static void truncate_ts(AVStream *st, AVPacket *pkt){
2044 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2047 // pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2049 pkt->pts &= pts_mask;
2050 pkt->dts &= pts_mask;
2054 * Write a packet to an output media file. The packet shall contain
2055 * one audio or video frame.
2057 * @param s media file handle
2058 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2059 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2061 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2065 compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2067 truncate_ts(s->streams[pkt->stream_index], pkt);
2069 ret= s->oformat->write_packet(s, pkt);
2071 ret= url_ferror(&s->pb);
2076 * interleave_packet implementation which will interleave per DTS.
2078 static int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2079 AVPacketList *pktl, **next_point, *this_pktl;
2081 int streams[MAX_STREAMS];
2084 AVStream *st= s->streams[ pkt->stream_index];
2086 assert(pkt->destruct != av_destruct_packet); //FIXME
2088 this_pktl = av_mallocz(sizeof(AVPacketList));
2089 this_pktl->pkt= *pkt;
2090 av_dup_packet(&this_pktl->pkt);
2092 next_point = &s->packet_buffer;
2094 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2095 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2096 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2097 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2099 next_point= &(*next_point)->next;
2101 this_pktl->next= *next_point;
2102 *next_point= this_pktl;
2105 memset(streams, 0, sizeof(streams));
2106 pktl= s->packet_buffer;
2108 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2109 if(streams[ pktl->pkt.stream_index ] == 0)
2111 streams[ pktl->pkt.stream_index ]++;
2115 if(s->nb_streams == stream_count || (flush && stream_count)){
2116 pktl= s->packet_buffer;
2119 s->packet_buffer= pktl->next;
2123 av_init_packet(out);
2129 * Interleaves a AVPacket correctly so it can be muxed.
2130 * @param out the interleaved packet will be output here
2131 * @param in the input packet
2132 * @param flush 1 if no further packets are available as input and all
2133 * remaining packets should be output
2134 * @return 1 if a packet was output, 0 if no packet could be output,
2135 * < 0 if an error occured
2137 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2138 if(s->oformat->interleave_packet)
2139 return s->oformat->interleave_packet(s, out, in, flush);
2141 return av_interleave_packet_per_dts(s, out, in, flush);
2145 * Writes a packet to an output media file ensuring correct interleaving.
2146 * The packet shall contain one audio or video frame.
2147 * If the packets are already correctly interleaved the application should
2148 * call av_write_frame() instead as its slightly faster, its also important
2149 * to keep in mind that completly non interleaved input will need huge amounts
2150 * of memory to interleave with this, so its prefereable to interleave at the
2153 * @param s media file handle
2154 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2155 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2157 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2158 AVStream *st= s->streams[ pkt->stream_index];
2160 compute_pkt_fields2(st, pkt);
2162 //FIXME/XXX/HACK drop zero sized packets
2163 if(st->codec.codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2166 if(pkt->dts == AV_NOPTS_VALUE)
2171 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2172 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2175 truncate_ts(s->streams[opkt.stream_index], &opkt);
2176 ret= s->oformat->write_packet(s, &opkt);
2178 av_free_packet(&opkt);
2183 if(url_ferror(&s->pb))
2184 return url_ferror(&s->pb);
2189 * write the stream trailer to an output media file and and free the
2190 * file private data.
2192 * @param s media file handle
2193 * @return 0 if OK. AVERROR_xxx if error. */
2194 int av_write_trailer(AVFormatContext *s)
2200 ret= av_interleave_packet(s, &pkt, NULL, 1);
2201 if(ret<0) //FIXME cleanup needed for ret<0 ?
2206 truncate_ts(s->streams[pkt.stream_index], &pkt);
2207 ret= s->oformat->write_packet(s, &pkt);
2209 av_free_packet(&pkt);
2213 if(url_ferror(&s->pb))
2217 ret = s->oformat->write_trailer(s);
2220 ret=url_ferror(&s->pb);
2221 for(i=0;i<s->nb_streams;i++)
2222 av_freep(&s->streams[i]->priv_data);
2223 av_freep(&s->priv_data);
2227 /* "user interface" functions */
2229 void dump_format(AVFormatContext *ic,
2237 av_log(NULL, AV_LOG_DEBUG, "%s #%d, %s, %s '%s':\n",
2238 is_output ? "Output" : "Input",
2240 is_output ? ic->oformat->name : ic->iformat->name,
2241 is_output ? "to" : "from", url);
2243 av_log(NULL, AV_LOG_DEBUG, " Duration: ");
2244 if (ic->duration != AV_NOPTS_VALUE) {
2245 int hours, mins, secs, us;
2246 secs = ic->duration / AV_TIME_BASE;
2247 us = ic->duration % AV_TIME_BASE;
2252 av_log(NULL, AV_LOG_DEBUG, "%02d:%02d:%02d.%01d", hours, mins, secs,
2253 (10 * us) / AV_TIME_BASE);
2255 av_log(NULL, AV_LOG_DEBUG, "N/A");
2257 if (ic->start_time != AV_NOPTS_VALUE) {
2259 av_log(NULL, AV_LOG_DEBUG, ", start: ");
2260 secs = ic->start_time / AV_TIME_BASE;
2261 us = ic->start_time % AV_TIME_BASE;
2262 av_log(NULL, AV_LOG_DEBUG, "%d.%06d",
2263 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2265 av_log(NULL, AV_LOG_DEBUG, ", bitrate: ");
2267 av_log(NULL, AV_LOG_DEBUG,"%d kb/s", ic->bit_rate / 1000);
2269 av_log(NULL, AV_LOG_DEBUG, "N/A");
2271 av_log(NULL, AV_LOG_DEBUG, "\n");
2273 for(i=0;i<ic->nb_streams;i++) {
2274 AVStream *st = ic->streams[i];
2275 avcodec_string(buf, sizeof(buf), &st->codec, is_output);
2276 av_log(NULL, AV_LOG_DEBUG, " Stream #%d.%d", index, i);
2277 /* the pid is an important information, so we display it */
2278 /* XXX: add a generic system */
2280 flags = ic->oformat->flags;
2282 flags = ic->iformat->flags;
2283 if (flags & AVFMT_SHOW_IDS) {
2284 av_log(NULL, AV_LOG_DEBUG, "[0x%x]", st->id);
2286 av_log(NULL, AV_LOG_DEBUG, ": %s\n", buf);
2293 int frame_rate, frame_rate_base;
2296 static AbvEntry frame_abvs[] = {
2297 { "ntsc", 720, 480, 30000, 1001 },
2298 { "pal", 720, 576, 25, 1 },
2299 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2300 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2301 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2302 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2303 { "film", 352, 240, 24, 1 },
2304 { "ntsc-film", 352, 240, 24000, 1001 },
2305 { "sqcif", 128, 96, 0, 0 },
2306 { "qcif", 176, 144, 0, 0 },
2307 { "cif", 352, 288, 0, 0 },
2308 { "4cif", 704, 576, 0, 0 },
2311 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2314 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2316 int frame_width = 0, frame_height = 0;
2319 if (!strcmp(frame_abvs[i].abv, str)) {
2320 frame_width = frame_abvs[i].width;
2321 frame_height = frame_abvs[i].height;
2327 frame_width = strtol(p, (char **)&p, 10);
2330 frame_height = strtol(p, (char **)&p, 10);
2332 if (frame_width <= 0 || frame_height <= 0)
2334 *width_ptr = frame_width;
2335 *height_ptr = frame_height;
2339 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2344 /* First, we check our abbreviation table */
2345 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2346 if (!strcmp(frame_abvs[i].abv, arg)) {
2347 *frame_rate = frame_abvs[i].frame_rate;
2348 *frame_rate_base = frame_abvs[i].frame_rate_base;
2352 /* Then, we try to parse it as fraction */
2353 cp = strchr(arg, '/');
2356 *frame_rate = strtol(arg, &cpp, 10);
2357 if (cpp != arg || cpp == cp)
2358 *frame_rate_base = strtol(cp+1, &cpp, 10);
2363 /* Finally we give up and parse it as double */
2364 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
2365 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5);
2367 if (!*frame_rate || !*frame_rate_base)
2374 * - If not a duration:
2375 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2376 * Time is localtime unless Z is suffixed to the end. In this case GMT
2377 * Return the date in micro seconds since 1970
2379 * HH[:MM[:SS[.m...]]]
2382 int64_t parse_date(const char *datestr, int duration)
2388 static const char *date_fmt[] = {
2392 static const char *time_fmt[] = {
2402 time_t now = time(0);
2404 len = strlen(datestr);
2406 lastch = datestr[len - 1];
2409 is_utc = (lastch == 'z' || lastch == 'Z');
2411 memset(&dt, 0, sizeof(dt));
2416 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2417 q = small_strptime(p, date_fmt[i], &dt);
2427 dt = *localtime(&now);
2429 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2434 if (*p == 'T' || *p == 't' || *p == ' ')
2437 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2438 q = small_strptime(p, time_fmt[i], &dt);
2448 q = small_strptime(p, time_fmt[0], &dt);
2450 dt.tm_sec = strtol(p, (char **)&q, 10);
2456 /* Now we have all the fields that we can get */
2461 return now * int64_t_C(1000000);
2465 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2467 dt.tm_isdst = -1; /* unknown */
2480 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2483 val += n * (*q - '0');
2487 return negative ? -t : t;
2490 /* syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. Return
2492 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2502 while (*p != '\0' && *p != '=' && *p != '&') {
2503 if ((q - tag) < sizeof(tag) - 1)
2511 while (*p != '&' && *p != '\0') {
2512 if ((q - arg) < arg_size - 1) {
2522 if (!strcmp(tag, tag1))
2531 /* Return in 'buf' the path with '%d' replaced by number. Also handles
2532 the '%0nd' format where 'n' is the total number of digits and
2533 '%%'. Return 0 if OK, and -1 if format error */
2534 int get_frame_filename(char *buf, int buf_size,
2535 const char *path, int number)
2538 char *q, buf1[20], c;
2539 int nd, len, percentd_found;
2551 while (isdigit(*p)) {
2552 nd = nd * 10 + *p++ - '0';
2555 } while (isdigit(c));
2564 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2566 if ((q - buf + len) > buf_size - 1)
2568 memcpy(q, buf1, len);
2576 if ((q - buf) < buf_size - 1)
2580 if (!percentd_found)
2590 * Print nice hexa dump of a buffer
2591 * @param f stream for output
2593 * @param size buffer size
2595 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2599 for(i=0;i<size;i+=16) {
2603 fprintf(f, "%08x ", i);
2606 fprintf(f, " %02x", buf[i+j]);
2611 for(j=0;j<len;j++) {
2613 if (c < ' ' || c > '~')
2615 fprintf(f, "%c", c);
2622 * Print on 'f' a nice dump of a packet
2623 * @param f stream for output
2624 * @param pkt packet to dump
2625 * @param dump_payload true if the payload must be displayed too
2627 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2629 fprintf(f, "stream #%d:\n", pkt->stream_index);
2630 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2631 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2632 /* DTS is _always_ valid after av_read_frame() */
2633 fprintf(f, " dts=");
2634 if (pkt->dts == AV_NOPTS_VALUE)
2637 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2638 /* PTS may be not known if B frames are present */
2639 fprintf(f, " pts=");
2640 if (pkt->pts == AV_NOPTS_VALUE)
2643 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2645 fprintf(f, " size=%d\n", pkt->size);
2647 av_hex_dump(f, pkt->data, pkt->size);
2650 void url_split(char *proto, int proto_size,
2651 char *authorization, int authorization_size,
2652 char *hostname, int hostname_size,
2654 char *path, int path_size,
2665 while (*p != ':' && *p != '\0') {
2666 if ((q - proto) < proto_size - 1)
2672 if (authorization_size > 0)
2673 authorization[0] = '\0';
2677 if (hostname_size > 0)
2681 char *at,*slash; // PETR: position of '@' character and '/' character
2688 at = strchr(p,'@'); // PETR: get the position of '@'
2689 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2690 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2692 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2694 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2695 if (*p == '@') { // PETR: passed '@'
2696 if (authorization_size > 0)
2700 } else if (!at) { // PETR: hostname
2701 if ((q - hostname) < hostname_size - 1)
2704 if ((q - authorization) < authorization_size - 1)
2709 if (hostname_size > 0)
2713 port = strtoul(p, (char **)&p, 10);
2718 pstrcpy(path, path_size, p);
2722 * Set the pts for a given stream
2724 * @param pts_wrap_bits number of bits effectively used by the pts
2725 * (used for wrap control, 33 is the value for MPEG)
2726 * @param pts_num numerator to convert to seconds (MPEG: 1)
2727 * @param pts_den denominator to convert to seconds (MPEG: 90000)
2729 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2730 int pts_num, int pts_den)
2732 s->pts_wrap_bits = pts_wrap_bits;
2733 s->time_base.num = pts_num;
2734 s->time_base.den = pts_den;
2737 /* fraction handling */
2740 * f = val + (num / den) + 0.5. 'num' is normalized so that it is such
2741 * as 0 <= num < den.
2743 * @param f fractional number
2744 * @param val integer value
2745 * @param num must be >= 0
2746 * @param den must be >= 1
2748 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2760 /* set f to (val + 0.5) */
2761 void av_frac_set(AVFrac *f, int64_t val)
2764 f->num = f->den >> 1;
2768 * Fractionnal addition to f: f = f + (incr / f->den)
2770 * @param f fractional number
2771 * @param incr increment, can be positive or negative
2773 void av_frac_add(AVFrac *f, int64_t incr)
2777 num = f->num + incr;
2780 f->val += num / den;
2786 } else if (num >= den) {
2787 f->val += num / den;
2794 * register a new image format
2795 * @param img_fmt Image format descriptor
2797 void av_register_image_format(AVImageFormat *img_fmt)
2801 p = &first_image_format;
2802 while (*p != NULL) p = &(*p)->next;
2804 img_fmt->next = NULL;
2807 /* guess image format */
2808 AVImageFormat *av_probe_image_format(AVProbeData *pd)
2810 AVImageFormat *fmt1, *fmt;
2811 int score, score_max;
2815 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2816 if (fmt1->img_probe) {
2817 score = fmt1->img_probe(pd);
2818 if (score > score_max) {
2827 AVImageFormat *guess_image_format(const char *filename)
2829 AVImageFormat *fmt1;
2831 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2832 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
2839 * Read an image from a stream.
2840 * @param gb byte stream containing the image
2841 * @param fmt image format, NULL if probing is required
2843 int av_read_image(ByteIOContext *pb, const char *filename,
2845 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
2847 char buf[PROBE_BUF_SIZE];
2848 AVProbeData probe_data, *pd = &probe_data;
2853 pd->filename = filename;
2855 pos = url_ftell(pb);
2856 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
2857 url_fseek(pb, pos, SEEK_SET);
2858 fmt = av_probe_image_format(pd);
2861 return AVERROR_NOFMT;
2862 ret = fmt->img_read(pb, alloc_cb, opaque);
2867 * Write an image to a stream.
2868 * @param pb byte stream for the image output
2869 * @param fmt image format
2870 * @param img image data and informations
2872 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
2874 return fmt->img_write(pb, img);