2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 AVInputFormat *first_iformat = NULL;
25 AVOutputFormat *first_oformat = NULL;
26 AVImageFormat *first_image_format = NULL;
28 void av_register_input_format(AVInputFormat *format)
32 while (*p != NULL) p = &(*p)->next;
37 void av_register_output_format(AVOutputFormat *format)
41 while (*p != NULL) p = &(*p)->next;
46 int match_ext(const char *filename, const char *extensions)
54 ext = strrchr(filename, '.');
60 while (*p != '\0' && *p != ',')
63 if (!strcasecmp(ext1, ext))
73 AVOutputFormat *guess_format(const char *short_name, const char *filename,
74 const char *mime_type)
76 AVOutputFormat *fmt, *fmt_found;
79 /* specific test for image sequences */
80 if (!short_name && filename &&
81 filename_number_test(filename) >= 0 &&
82 guess_image_format(filename)) {
83 return guess_format("image", NULL, NULL);
86 /* find the proper file type */
92 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
94 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
96 if (filename && fmt->extensions &&
97 match_ext(filename, fmt->extensions)) {
100 if (score > score_max) {
109 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
110 const char *mime_type)
112 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
115 AVOutputFormat *stream_fmt;
116 char stream_format_name[64];
118 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
119 stream_fmt = guess_format(stream_format_name, NULL, NULL);
128 AVInputFormat *av_find_input_format(const char *short_name)
131 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
132 if (!strcmp(fmt->name, short_name))
138 /* memory handling */
141 * Default packet destructor
143 static void av_destruct_packet(AVPacket *pkt)
146 pkt->data = NULL; pkt->size = 0;
150 * Allocate the payload of a packet and intialized its fields to default values.
153 * @param size wanted payload size
154 * @return 0 if OK. AVERROR_xxx otherwise.
156 int av_new_packet(AVPacket *pkt, int size)
158 void *data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
160 return AVERROR_NOMEM;
161 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
166 pkt->destruct = av_destruct_packet;
170 /* This is a hack - the packet memory allocation stuff is broken. The
171 packet is allocated if it was not really allocated */
172 int av_dup_packet(AVPacket *pkt)
174 if (pkt->destruct != av_destruct_packet) {
176 /* we duplicate the packet and don't forget to put the padding
178 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
180 return AVERROR_NOMEM;
182 memcpy(data, pkt->data, pkt->size);
183 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
185 pkt->destruct = av_destruct_packet;
192 int fifo_init(FifoBuffer *f, int size)
194 f->buffer = av_malloc(size);
197 f->end = f->buffer + size;
198 f->wptr = f->rptr = f->buffer;
202 void fifo_free(FifoBuffer *f)
207 int fifo_size(FifoBuffer *f, uint8_t *rptr)
211 if (f->wptr >= rptr) {
212 size = f->wptr - rptr;
214 size = (f->end - rptr) + (f->wptr - f->buffer);
219 /* get data from the fifo (return -1 if not enough data) */
220 int fifo_read(FifoBuffer *f, uint8_t *buf, int buf_size, uint8_t **rptr_ptr)
222 uint8_t *rptr = *rptr_ptr;
225 if (f->wptr >= rptr) {
226 size = f->wptr - rptr;
228 size = (f->end - rptr) + (f->wptr - f->buffer);
233 while (buf_size > 0) {
237 memcpy(buf, rptr, len);
248 void fifo_write(FifoBuffer *f, uint8_t *buf, int size, uint8_t **wptr_ptr)
257 memcpy(wptr, buf, len);
267 int filename_number_test(const char *filename)
272 return get_frame_filename(buf, sizeof(buf), filename, 1);
275 /* guess file format */
276 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
278 AVInputFormat *fmt1, *fmt;
279 int score, score_max;
283 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
284 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
287 if (fmt1->read_probe) {
288 score = fmt1->read_probe(pd);
289 } else if (fmt1->extensions) {
290 if (match_ext(pd->filename, fmt1->extensions)) {
294 if (score > score_max) {
302 /************************************************************/
303 /* input media file */
306 * open a media file from an IO stream. 'fmt' must be specified.
309 static const char* format_to_name(void* ptr)
311 AVFormatContext* fc = (AVFormatContext*) ptr;
312 if(fc->iformat) return fc->iformat->name;
313 else if(fc->oformat) return fc->oformat->name;
317 static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
319 AVFormatContext *av_alloc_format_context(void)
322 ic = av_mallocz(sizeof(AVFormatContext));
324 ic->av_class = &av_format_context_class;
328 int av_open_input_stream(AVFormatContext **ic_ptr,
329 ByteIOContext *pb, const char *filename,
330 AVInputFormat *fmt, AVFormatParameters *ap)
335 ic = av_alloc_format_context();
343 ic->duration = AV_NOPTS_VALUE;
344 ic->start_time = AV_NOPTS_VALUE;
345 pstrcpy(ic->filename, sizeof(ic->filename), filename);
347 /* allocate private data */
348 if (fmt->priv_data_size > 0) {
349 ic->priv_data = av_mallocz(fmt->priv_data_size);
350 if (!ic->priv_data) {
355 ic->priv_data = NULL;
358 err = ic->iformat->read_header(ic, ap);
363 ic->data_offset = url_ftell(&ic->pb);
369 av_freep(&ic->priv_data);
376 #define PROBE_BUF_SIZE 2048
379 * Open a media file as input. The codec are not opened. Only the file
380 * header (if present) is read.
382 * @param ic_ptr the opened media file handle is put here
383 * @param filename filename to open.
384 * @param fmt if non NULL, force the file format to use
385 * @param buf_size optional buffer size (zero if default is OK)
386 * @param ap additionnal parameters needed when opening the file (NULL if default)
387 * @return 0 if OK. AVERROR_xxx otherwise.
389 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
392 AVFormatParameters *ap)
394 int err, must_open_file, file_opened;
395 uint8_t buf[PROBE_BUF_SIZE];
396 AVProbeData probe_data, *pd = &probe_data;
397 ByteIOContext pb1, *pb = &pb1;
402 pd->filename = filename;
407 /* guess format if no file can be opened */
408 fmt = av_probe_input_format(pd, 0);
411 /* do not open file if the format does not need it. XXX: specific
412 hack needed to handle RTSP/TCP */
414 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
418 if (!fmt || must_open_file) {
419 /* if no file needed do not try to open one */
420 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
426 url_setbufsize(pb, buf_size);
429 /* read probe data */
430 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
431 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
433 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
441 /* guess file format */
443 fmt = av_probe_input_format(pd, 1);
446 /* if still no format found, error */
452 /* XXX: suppress this hack for redirectors */
453 #ifdef CONFIG_NETWORK
454 if (fmt == &redir_demux) {
455 err = redir_open(ic_ptr, pb);
461 /* check filename in case of an image number is expected */
462 if (fmt->flags & AVFMT_NEEDNUMBER) {
463 if (filename_number_test(filename) < 0) {
464 err = AVERROR_NUMEXPECTED;
468 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
480 /*******************************************************/
483 * Read a transport packet from a media file. This function is
484 * absolete and should never be used. Use av_read_frame() instead.
486 * @param s media file handle
487 * @param pkt is filled
488 * @return 0 if OK. AVERROR_xxx if error.
490 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
492 return s->iformat->read_packet(s, pkt);
495 /**********************************************************/
497 /* get the number of samples of an audio frame. Return (-1) if error */
498 static int get_audio_frame_size(AVCodecContext *enc, int size)
502 if (enc->frame_size <= 1) {
503 /* specific hack for pcm codecs because no frame size is
505 switch(enc->codec_id) {
506 case CODEC_ID_PCM_S16LE:
507 case CODEC_ID_PCM_S16BE:
508 case CODEC_ID_PCM_U16LE:
509 case CODEC_ID_PCM_U16BE:
510 if (enc->channels == 0)
512 frame_size = size / (2 * enc->channels);
514 case CODEC_ID_PCM_S8:
515 case CODEC_ID_PCM_U8:
516 case CODEC_ID_PCM_MULAW:
517 case CODEC_ID_PCM_ALAW:
518 if (enc->channels == 0)
520 frame_size = size / (enc->channels);
523 /* used for example by ADPCM codecs */
524 if (enc->bit_rate == 0)
526 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
530 frame_size = enc->frame_size;
536 /* return the frame duration in seconds, return 0 if not available */
537 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
538 AVCodecParserContext *pc, AVPacket *pkt)
544 switch(st->codec.codec_type) {
545 case CODEC_TYPE_VIDEO:
546 *pnum = st->codec.frame_rate_base;
547 *pden = st->codec.frame_rate;
548 if (pc && pc->repeat_pict) {
550 *pnum = (*pnum) * (2 + pc->repeat_pict);
553 case CODEC_TYPE_AUDIO:
554 frame_size = get_audio_frame_size(&st->codec, pkt->size);
558 *pden = st->codec.sample_rate;
565 static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
566 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
567 int64_t delta= last_ts - mask/2;
568 return ((lsb - delta)&mask) + delta;
571 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
572 AVCodecParserContext *pc, AVPacket *pkt)
574 int num, den, presentation_delayed;
576 /* handle wrapping */
577 if(st->cur_dts != AV_NOPTS_VALUE){
578 if(pkt->pts != AV_NOPTS_VALUE)
579 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
580 if(pkt->dts != AV_NOPTS_VALUE)
581 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
584 if (pkt->duration == 0) {
585 compute_frame_duration(&num, &den, st, pc, pkt);
587 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
591 /* do we have a video B frame ? */
592 presentation_delayed = 0;
593 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
594 /* XXX: need has_b_frame, but cannot get it if the codec is
596 if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
597 st->codec.codec_id == CODEC_ID_MPEG2VIDEO ||
598 st->codec.codec_id == CODEC_ID_MPEG4 ||
599 st->codec.codec_id == CODEC_ID_H264) &&
600 pc && pc->pict_type != FF_B_TYPE)
601 presentation_delayed = 1;
602 /* this may be redundant, but it shouldnt hurt */
603 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
604 presentation_delayed = 1;
607 if(st->cur_dts == AV_NOPTS_VALUE){
608 if(presentation_delayed) st->cur_dts = -pkt->duration;
609 else st->cur_dts = 0;
612 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
613 /* interpolate PTS and DTS if they are not present */
614 if (presentation_delayed) {
615 /* DTS = decompression time stamp */
616 /* PTS = presentation time stamp */
617 if (pkt->dts == AV_NOPTS_VALUE) {
618 /* if we know the last pts, use it */
619 if(st->last_IP_pts != AV_NOPTS_VALUE)
620 st->cur_dts = pkt->dts = st->last_IP_pts;
622 pkt->dts = st->cur_dts;
624 st->cur_dts = pkt->dts;
626 /* this is tricky: the dts must be incremented by the duration
627 of the frame we are displaying, i.e. the last I or P frame */
628 if (st->last_IP_duration == 0)
629 st->cur_dts += pkt->duration;
631 st->cur_dts += st->last_IP_duration;
632 st->last_IP_duration = pkt->duration;
633 st->last_IP_pts= pkt->pts;
634 /* cannot compute PTS if not present (we can compute it only
635 by knowing the futur */
637 /* presentation is not delayed : PTS and DTS are the same */
638 if (pkt->pts == AV_NOPTS_VALUE) {
639 if (pkt->dts == AV_NOPTS_VALUE) {
640 pkt->pts = st->cur_dts;
641 pkt->dts = st->cur_dts;
644 st->cur_dts = pkt->dts;
648 st->cur_dts = pkt->pts;
651 st->cur_dts += pkt->duration;
653 // av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
658 /* key frame computation */
659 switch(st->codec.codec_type) {
660 case CODEC_TYPE_VIDEO:
661 if (pc->pict_type == FF_I_TYPE)
662 pkt->flags |= PKT_FLAG_KEY;
664 case CODEC_TYPE_AUDIO:
665 pkt->flags |= PKT_FLAG_KEY;
672 /* convert the packet time stamp units */
673 if(pkt->pts != AV_NOPTS_VALUE)
674 pkt->pts = av_rescale(pkt->pts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
675 if(pkt->dts != AV_NOPTS_VALUE)
676 pkt->dts = av_rescale(pkt->dts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
679 pkt->duration = av_rescale(pkt->duration, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
682 static void av_destruct_packet_nofree(AVPacket *pkt)
684 pkt->data = NULL; pkt->size = 0;
687 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
693 /* select current input stream component */
697 /* no parsing needed: we just output the packet as is */
698 /* raw data support */
700 compute_pkt_fields(s, st, NULL, pkt);
703 } else if (s->cur_len > 0) {
704 len = av_parser_parse(st->parser, &st->codec, &pkt->data, &pkt->size,
705 s->cur_ptr, s->cur_len,
706 s->cur_pkt.pts, s->cur_pkt.dts);
707 s->cur_pkt.pts = AV_NOPTS_VALUE;
708 s->cur_pkt.dts = AV_NOPTS_VALUE;
709 /* increment read pointer */
713 /* return packet if any */
717 pkt->stream_index = st->index;
718 pkt->pts = st->parser->pts;
719 pkt->dts = st->parser->dts;
720 pkt->destruct = av_destruct_packet_nofree;
721 compute_pkt_fields(s, st, st->parser, pkt);
726 av_free_packet(&s->cur_pkt);
730 /* read next packet */
731 ret = av_read_packet(s, &s->cur_pkt);
735 /* return the last frames, if any */
736 for(i = 0; i < s->nb_streams; i++) {
739 av_parser_parse(st->parser, &st->codec,
740 &pkt->data, &pkt->size,
742 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
747 /* no more packets: really terminates parsing */
751 st = s->streams[s->cur_pkt.stream_index];
754 s->cur_ptr = s->cur_pkt.data;
755 s->cur_len = s->cur_pkt.size;
756 if (st->need_parsing && !st->parser) {
757 st->parser = av_parser_init(st->codec.codec_id);
759 /* no parser available : just output the raw packets */
760 st->need_parsing = 0;
768 * Return the next frame of a stream. The returned packet is valid
769 * until the next av_read_frame() or until av_close_input_file() and
770 * must be freed with av_free_packet. For video, the packet contains
771 * exactly one frame. For audio, it contains an integer number of
772 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
773 * data). If the audio frames have a variable size (e.g. MPEG audio),
774 * then it contains one frame.
776 * pkt->pts, pkt->dts and pkt->duration are always set to correct
777 * values in AV_TIME_BASE unit (and guessed if the format cannot
778 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
779 * has B frames, so it is better to rely on pkt->dts if you do not
780 * decompress the payload.
782 * Return 0 if OK, < 0 if error or end of file.
784 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
788 pktl = s->packet_buffer;
790 /* read packet from packet buffer, if there is data */
792 s->packet_buffer = pktl->next;
796 return av_read_frame_internal(s, pkt);
800 /* XXX: suppress the packet queue */
801 static void flush_packet_queue(AVFormatContext *s)
806 pktl = s->packet_buffer;
809 s->packet_buffer = pktl->next;
810 av_free_packet(&pktl->pkt);
815 /*******************************************************/
818 int av_find_default_stream_index(AVFormatContext *s)
823 if (s->nb_streams <= 0)
825 for(i = 0; i < s->nb_streams; i++) {
827 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
834 /* flush the frame reader */
835 static void av_read_frame_flush(AVFormatContext *s)
840 flush_packet_queue(s);
842 /* free previous packet */
844 if (s->cur_st->parser)
845 av_free_packet(&s->cur_pkt);
852 /* for each stream, reset read state */
853 for(i = 0; i < s->nb_streams; i++) {
857 av_parser_close(st->parser);
860 st->last_IP_pts = AV_NOPTS_VALUE;
861 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
866 * add a index entry into a sorted list updateing if it is already there.
867 * @param timestamp timestamp in the timebase of the given stream
869 int av_add_index_entry(AVStream *st,
870 int64_t pos, int64_t timestamp, int distance, int flags)
872 AVIndexEntry *entries, *ie;
875 entries = av_fast_realloc(st->index_entries,
876 &st->index_entries_allocated_size,
877 (st->nb_index_entries + 1) *
878 sizeof(AVIndexEntry));
879 st->index_entries= entries;
881 if(st->nb_index_entries){
882 index= av_index_search_timestamp(st, timestamp);
885 if(ie->timestamp != timestamp){
886 if(ie->timestamp < timestamp){
887 index++; //index points to next instead of previous entry, maybe nonexistant
888 ie= &st->index_entries[index];
892 if(index != st->nb_index_entries){
893 assert(index < st->nb_index_entries);
894 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
896 st->nb_index_entries++;
898 if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
899 distance= ie->min_distance;
902 index= st->nb_index_entries++;
907 ie->timestamp = timestamp;
908 ie->min_distance= distance;
914 /* build an index for raw streams using a parser */
915 static void av_build_index_raw(AVFormatContext *s)
917 AVPacket pkt1, *pkt = &pkt1;
922 av_read_frame_flush(s);
923 url_fseek(&s->pb, s->data_offset, SEEK_SET);
926 ret = av_read_frame(s, pkt);
929 if (pkt->stream_index == 0 && st->parser &&
930 (pkt->flags & PKT_FLAG_KEY)) {
931 int64_t dts= av_rescale(pkt->dts, st->time_base.den, AV_TIME_BASE*(int64_t)st->time_base.num);
932 av_add_index_entry(st, st->parser->frame_offset, dts,
933 0, AVINDEX_KEYFRAME);
939 /* return TRUE if we deal with a raw stream (raw codec data and
941 static int is_raw_stream(AVFormatContext *s)
945 if (s->nb_streams != 1)
948 if (!st->need_parsing)
953 /* return the largest index entry whose timestamp is <=
955 int av_index_search_timestamp(AVStream *st, int wanted_timestamp)
957 AVIndexEntry *entries= st->index_entries;
958 int nb_entries= st->nb_index_entries;
969 m = (a + b + 1) >> 1;
970 timestamp = entries[m].timestamp;
971 if (timestamp > wanted_timestamp) {
983 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
984 * this isnt supposed to be called directly by a user application, but by demuxers
985 * @param target_ts target timestamp in the time base of the given stream
986 * @param stream_index stream number
988 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts){
989 AVInputFormat *avif= s->iformat;
990 int64_t pos_min, pos_max, pos, pos_limit;
991 int64_t ts_min, ts_max, ts;
993 int index, no_change, i;
996 if (stream_index < 0)
1000 av_log(s, AV_LOG_DEBUG, "read_seek: %d %lld\n", stream_index, target_ts);
1004 ts_min= AV_NOPTS_VALUE;
1005 pos_limit= -1; //gcc falsely says it may be uninitalized
1007 st= s->streams[stream_index];
1008 if(st->index_entries){
1011 index= av_index_search_timestamp(st, target_ts);
1012 e= &st->index_entries[index];
1014 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1016 ts_min= e->timestamp;
1018 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%llx dts_min=%lld\n",
1025 if(index < st->nb_index_entries){
1026 e= &st->index_entries[index];
1027 assert(e->timestamp >= target_ts);
1029 ts_max= e->timestamp;
1030 pos_limit= pos_max - e->min_distance;
1032 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%llx pos_limit=0x%llx dts_max=%lld\n",
1033 pos_max,pos_limit, ts_max);
1038 if(ts_min == AV_NOPTS_VALUE){
1039 pos_min = s->data_offset;
1040 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1041 if (ts_min == AV_NOPTS_VALUE)
1045 if(ts_max == AV_NOPTS_VALUE){
1047 pos_max = url_filesize(url_fileno(&s->pb)) - 1;
1050 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1052 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1053 if (ts_max == AV_NOPTS_VALUE)
1057 int64_t tmp_pos= pos_max + 1;
1058 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1059 if(tmp_ts == AV_NOPTS_VALUE)
1068 while (pos_min < pos_limit) {
1070 av_log(s, AV_LOG_DEBUG, "pos_min=0x%llx pos_max=0x%llx dts_min=%lld dts_max=%lld\n",
1074 assert(pos_limit <= pos_max);
1077 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1078 // interpolate position (better than dichotomy)
1079 pos = (int64_t)((double)(pos_max - pos_min) *
1080 (double)(target_ts - ts_min) /
1081 (double)(ts_max - ts_min)) + pos_min - approximate_keyframe_distance;
1082 }else if(no_change==1){
1083 // bisection, if interpolation failed to change min or max pos last time
1084 pos = (pos_min + pos_limit)>>1;
1086 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1091 else if(pos > pos_limit)
1095 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1101 av_log(s, AV_LOG_DEBUG, "%Ld %Ld %Ld / %Ld %Ld %Ld target:%Ld limit:%Ld start:%Ld noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1103 assert(ts != AV_NOPTS_VALUE);
1104 if (target_ts < ts) {
1105 pos_limit = start_pos - 1;
1111 /* check if we are lucky */
1112 if (target_ts == ts)
1120 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1122 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1123 av_log(s, AV_LOG_DEBUG, "pos=0x%llx %lld<=%lld<=%lld\n",
1124 pos, ts_min, target_ts, ts_max);
1127 url_fseek(&s->pb, pos, SEEK_SET);
1129 ts= av_rescale(ts_min, AV_TIME_BASE*(int64_t)st->time_base.num, st->time_base.den);
1130 for(i = 0; i < s->nb_streams; i++) {
1133 st->cur_dts = av_rescale(ts, st->time_base.den, AV_TIME_BASE*(int64_t)st->time_base.num);
1139 static int av_seek_frame_generic(AVFormatContext *s,
1140 int stream_index, int64_t timestamp)
1146 if (!s->index_built) {
1147 if (is_raw_stream(s)) {
1148 av_build_index_raw(s);
1155 st = s->streams[stream_index];
1156 index = av_index_search_timestamp(st, timestamp);
1160 /* now we have found the index, we can seek */
1161 ie = &st->index_entries[index];
1162 av_read_frame_flush(s);
1163 url_fseek(&s->pb, ie->pos, SEEK_SET);
1165 timestamp= av_rescale(ie->timestamp, AV_TIME_BASE*(int64_t)st->time_base.num, st->time_base.den);
1166 for(i = 0; i < s->nb_streams; i++) {
1169 st->cur_dts = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE*(int64_t)st->time_base.num);
1176 * Seek to the key frame just before the frame at timestamp
1177 * 'timestamp' in 'stream_index'.
1178 * @param stream_index If stream_index is (-1), a default
1179 * stream is selected
1180 * @param timestamp timestamp in AV_TIME_BASE units
1181 * @return >= 0 on success
1183 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp)
1188 av_read_frame_flush(s);
1190 if(stream_index < 0){
1191 stream_index= av_find_default_stream_index(s);
1192 if(stream_index < 0)
1195 st= s->streams[stream_index];
1197 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1199 /* first, we try the format specific seek */
1200 if (s->iformat->read_seek)
1201 ret = s->iformat->read_seek(s, stream_index, timestamp);
1208 if(s->iformat->read_timestamp)
1209 return av_seek_frame_binary(s, stream_index, timestamp);
1211 return av_seek_frame_generic(s, stream_index, timestamp);
1214 /*******************************************************/
1216 /* return TRUE if the stream has accurate timings for at least one component */
1217 static int av_has_timings(AVFormatContext *ic)
1222 for(i = 0;i < ic->nb_streams; i++) {
1223 st = ic->streams[i];
1224 if (st->start_time != AV_NOPTS_VALUE &&
1225 st->duration != AV_NOPTS_VALUE)
1231 /* estimate the stream timings from the one of each components. Also
1232 compute the global bitrate if possible */
1233 static void av_update_stream_timings(AVFormatContext *ic)
1235 int64_t start_time, end_time, end_time1;
1239 start_time = MAXINT64;
1240 end_time = MININT64;
1241 for(i = 0;i < ic->nb_streams; i++) {
1242 st = ic->streams[i];
1243 if (st->start_time != AV_NOPTS_VALUE) {
1244 if (st->start_time < start_time)
1245 start_time = st->start_time;
1246 if (st->duration != AV_NOPTS_VALUE) {
1247 end_time1 = st->start_time + st->duration;
1248 if (end_time1 > end_time)
1249 end_time = end_time1;
1253 if (start_time != MAXINT64) {
1254 ic->start_time = start_time;
1255 if (end_time != MAXINT64) {
1256 ic->duration = end_time - start_time;
1257 if (ic->file_size > 0) {
1258 /* compute the bit rate */
1259 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1260 (double)ic->duration;
1267 static void fill_all_stream_timings(AVFormatContext *ic)
1272 av_update_stream_timings(ic);
1273 for(i = 0;i < ic->nb_streams; i++) {
1274 st = ic->streams[i];
1275 if (st->start_time == AV_NOPTS_VALUE) {
1276 st->start_time = ic->start_time;
1277 st->duration = ic->duration;
1282 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1284 int64_t filesize, duration;
1288 /* if bit_rate is already set, we believe it */
1289 if (ic->bit_rate == 0) {
1291 for(i=0;i<ic->nb_streams;i++) {
1292 st = ic->streams[i];
1293 bit_rate += st->codec.bit_rate;
1295 ic->bit_rate = bit_rate;
1298 /* if duration is already set, we believe it */
1299 if (ic->duration == AV_NOPTS_VALUE &&
1300 ic->bit_rate != 0 &&
1301 ic->file_size != 0) {
1302 filesize = ic->file_size;
1304 duration = (int64_t)((8 * AV_TIME_BASE * (double)filesize) / (double)ic->bit_rate);
1305 for(i = 0; i < ic->nb_streams; i++) {
1306 st = ic->streams[i];
1307 if (st->start_time == AV_NOPTS_VALUE ||
1308 st->duration == AV_NOPTS_VALUE) {
1310 st->duration = duration;
1317 #define DURATION_MAX_READ_SIZE 250000
1319 /* only usable for MPEG-PS streams */
1320 static void av_estimate_timings_from_pts(AVFormatContext *ic)
1322 AVPacket pkt1, *pkt = &pkt1;
1324 int read_size, i, ret;
1325 int64_t start_time, end_time, end_time1;
1326 int64_t filesize, offset, duration;
1328 /* free previous packet */
1329 if (ic->cur_st && ic->cur_st->parser)
1330 av_free_packet(&ic->cur_pkt);
1333 /* flush packet queue */
1334 flush_packet_queue(ic);
1336 for(i=0;i<ic->nb_streams;i++) {
1337 st = ic->streams[i];
1339 av_parser_close(st->parser);
1344 /* we read the first packets to get the first PTS (not fully
1345 accurate, but it is enough now) */
1346 url_fseek(&ic->pb, 0, SEEK_SET);
1349 if (read_size >= DURATION_MAX_READ_SIZE)
1351 /* if all info is available, we can stop */
1352 for(i = 0;i < ic->nb_streams; i++) {
1353 st = ic->streams[i];
1354 if (st->start_time == AV_NOPTS_VALUE)
1357 if (i == ic->nb_streams)
1360 ret = av_read_packet(ic, pkt);
1363 read_size += pkt->size;
1364 st = ic->streams[pkt->stream_index];
1365 if (pkt->pts != AV_NOPTS_VALUE) {
1366 if (st->start_time == AV_NOPTS_VALUE)
1367 st->start_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
1369 av_free_packet(pkt);
1372 /* we compute the minimum start_time and use it as default */
1373 start_time = MAXINT64;
1374 for(i = 0; i < ic->nb_streams; i++) {
1375 st = ic->streams[i];
1376 if (st->start_time != AV_NOPTS_VALUE &&
1377 st->start_time < start_time)
1378 start_time = st->start_time;
1380 if (start_time != MAXINT64)
1381 ic->start_time = start_time;
1383 /* estimate the end time (duration) */
1384 /* XXX: may need to support wrapping */
1385 filesize = ic->file_size;
1386 offset = filesize - DURATION_MAX_READ_SIZE;
1390 url_fseek(&ic->pb, offset, SEEK_SET);
1393 if (read_size >= DURATION_MAX_READ_SIZE)
1395 /* if all info is available, we can stop */
1396 for(i = 0;i < ic->nb_streams; i++) {
1397 st = ic->streams[i];
1398 if (st->duration == AV_NOPTS_VALUE)
1401 if (i == ic->nb_streams)
1404 ret = av_read_packet(ic, pkt);
1407 read_size += pkt->size;
1408 st = ic->streams[pkt->stream_index];
1409 if (pkt->pts != AV_NOPTS_VALUE) {
1410 end_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
1411 duration = end_time - st->start_time;
1413 if (st->duration == AV_NOPTS_VALUE ||
1414 st->duration < duration)
1415 st->duration = duration;
1418 av_free_packet(pkt);
1421 /* estimate total duration */
1422 end_time = MININT64;
1423 for(i = 0;i < ic->nb_streams; i++) {
1424 st = ic->streams[i];
1425 if (st->duration != AV_NOPTS_VALUE) {
1426 end_time1 = st->start_time + st->duration;
1427 if (end_time1 > end_time)
1428 end_time = end_time1;
1432 /* update start_time (new stream may have been created, so we do
1434 if (ic->start_time != AV_NOPTS_VALUE) {
1435 for(i = 0; i < ic->nb_streams; i++) {
1436 st = ic->streams[i];
1437 if (st->start_time == AV_NOPTS_VALUE)
1438 st->start_time = ic->start_time;
1442 if (end_time != MININT64) {
1443 /* put dummy values for duration if needed */
1444 for(i = 0;i < ic->nb_streams; i++) {
1445 st = ic->streams[i];
1446 if (st->duration == AV_NOPTS_VALUE &&
1447 st->start_time != AV_NOPTS_VALUE)
1448 st->duration = end_time - st->start_time;
1450 ic->duration = end_time - ic->start_time;
1453 url_fseek(&ic->pb, 0, SEEK_SET);
1456 static void av_estimate_timings(AVFormatContext *ic)
1461 /* get the file size, if possible */
1462 if (ic->iformat->flags & AVFMT_NOFILE) {
1465 h = url_fileno(&ic->pb);
1466 file_size = url_filesize(h);
1470 ic->file_size = file_size;
1472 if (ic->iformat == &mpegps_demux) {
1473 /* get accurate estimate from the PTSes */
1474 av_estimate_timings_from_pts(ic);
1475 } else if (av_has_timings(ic)) {
1476 /* at least one components has timings - we use them for all
1478 fill_all_stream_timings(ic);
1480 /* less precise: use bit rate info */
1481 av_estimate_timings_from_bit_rate(ic);
1483 av_update_stream_timings(ic);
1489 for(i = 0;i < ic->nb_streams; i++) {
1490 st = ic->streams[i];
1491 printf("%d: start_time: %0.3f duration: %0.3f\n",
1492 i, (double)st->start_time / AV_TIME_BASE,
1493 (double)st->duration / AV_TIME_BASE);
1495 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1496 (double)ic->start_time / AV_TIME_BASE,
1497 (double)ic->duration / AV_TIME_BASE,
1498 ic->bit_rate / 1000);
1503 static int has_codec_parameters(AVCodecContext *enc)
1506 switch(enc->codec_type) {
1507 case CODEC_TYPE_AUDIO:
1508 val = enc->sample_rate;
1510 case CODEC_TYPE_VIDEO:
1520 static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1524 int got_picture, ret;
1527 codec = avcodec_find_decoder(st->codec.codec_id);
1530 ret = avcodec_open(&st->codec, codec);
1533 switch(st->codec.codec_type) {
1534 case CODEC_TYPE_VIDEO:
1535 ret = avcodec_decode_video(&st->codec, &picture,
1536 &got_picture, (uint8_t *)data, size);
1538 case CODEC_TYPE_AUDIO:
1539 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1542 ret = avcodec_decode_audio(&st->codec, samples,
1543 &got_picture, (uint8_t *)data, size);
1550 avcodec_close(&st->codec);
1554 /* absolute maximum size we read until we abort */
1555 #define MAX_READ_SIZE 5000000
1557 /* maximum duration until we stop analysing the stream */
1558 #define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 1.0))
1561 * Read the beginning of a media file to get stream information. This
1562 * is useful for file formats with no headers such as MPEG. This
1563 * function also compute the real frame rate in case of mpeg2 repeat
1566 * @param ic media file handle
1567 * @return >=0 if OK. AVERROR_xxx if error.
1569 int av_find_stream_info(AVFormatContext *ic)
1571 int i, count, ret, read_size;
1573 AVPacket pkt1, *pkt;
1574 AVPacketList *pktl=NULL, **ppktl;
1578 ppktl = &ic->packet_buffer;
1580 /* check if one codec still needs to be handled */
1581 for(i=0;i<ic->nb_streams;i++) {
1582 st = ic->streams[i];
1583 if (!has_codec_parameters(&st->codec))
1586 if (i == ic->nb_streams) {
1587 /* NOTE: if the format has no header, then we need to read
1588 some packets to get most of the streams, so we cannot
1590 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1591 /* if we found the info for all the codecs, we can stop */
1596 /* we did not get all the codec info, but we read too much data */
1597 if (read_size >= MAX_READ_SIZE) {
1603 /* NOTE: a new stream can be added there if no header in file
1604 (AVFMTCTX_NOHEADER) */
1605 ret = av_read_frame_internal(ic, &pkt1);
1608 ret = -1; /* we could not have all the codec parameters before EOF */
1609 if ((ic->ctx_flags & AVFMTCTX_NOHEADER) &&
1610 i == ic->nb_streams)
1615 pktl = av_mallocz(sizeof(AVPacketList));
1617 ret = AVERROR_NOMEM;
1621 /* add the packet in the buffered packet list */
1623 ppktl = &pktl->next;
1628 /* duplicate the packet */
1629 if (av_dup_packet(pkt) < 0) {
1630 ret = AVERROR_NOMEM;
1634 read_size += pkt->size;
1636 st = ic->streams[pkt->stream_index];
1637 st->codec_info_duration += pkt->duration;
1638 if (pkt->duration != 0)
1639 st->codec_info_nb_frames++;
1641 /* if still no information, we try to open the codec and to
1642 decompress the frame. We try to avoid that in most cases as
1643 it takes longer and uses more memory. For MPEG4, we need to
1644 decompress for Quicktime. */
1645 if (!has_codec_parameters(&st->codec) &&
1646 (st->codec.codec_id == CODEC_ID_FLV1 ||
1647 st->codec.codec_id == CODEC_ID_H264 ||
1648 st->codec.codec_id == CODEC_ID_H263 ||
1649 st->codec.codec_id == CODEC_ID_VORBIS ||
1650 st->codec.codec_id == CODEC_ID_MJPEG ||
1651 (st->codec.codec_id == CODEC_ID_MPEG4 && !st->need_parsing)))
1652 try_decode_frame(st, pkt->data, pkt->size);
1654 if (st->codec_info_duration >= MAX_STREAM_DURATION) {
1660 /* set real frame rate info */
1661 for(i=0;i<ic->nb_streams;i++) {
1662 st = ic->streams[i];
1663 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1664 /* compute the real frame rate for telecine */
1665 if ((st->codec.codec_id == CODEC_ID_MPEG1VIDEO ||
1666 st->codec.codec_id == CODEC_ID_MPEG2VIDEO) &&
1667 st->codec.sub_id == 2) {
1668 if (st->codec_info_nb_frames >= 20) {
1669 float coded_frame_rate, est_frame_rate;
1670 est_frame_rate = ((double)st->codec_info_nb_frames * AV_TIME_BASE) /
1671 (double)st->codec_info_duration ;
1672 coded_frame_rate = (double)st->codec.frame_rate /
1673 (double)st->codec.frame_rate_base;
1675 printf("telecine: coded_frame_rate=%0.3f est_frame_rate=%0.3f\n",
1676 coded_frame_rate, est_frame_rate);
1678 /* if we detect that it could be a telecine, we
1679 signal it. It would be better to do it at a
1680 higher level as it can change in a film */
1681 if (coded_frame_rate >= 24.97 &&
1682 (est_frame_rate >= 23.5 && est_frame_rate < 24.5)) {
1683 st->r_frame_rate = 24024;
1684 st->r_frame_rate_base = 1001;
1688 /* if no real frame rate, use the codec one */
1689 if (!st->r_frame_rate){
1690 st->r_frame_rate = st->codec.frame_rate;
1691 st->r_frame_rate_base = st->codec.frame_rate_base;
1696 av_estimate_timings(ic);
1698 /* correct DTS for b frame streams with no timestamps */
1699 for(i=0;i<ic->nb_streams;i++) {
1700 st = ic->streams[i];
1701 if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
1703 ppktl = &ic->packet_buffer;
1705 if(ppkt1->stream_index != i)
1707 if(ppkt1->pkt->dts < 0)
1709 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1711 ppkt1->pkt->dts -= delta;
1716 st->cur_dts -= delta;
1724 /*******************************************************/
1727 * start playing a network based stream (e.g. RTSP stream) at the
1730 int av_read_play(AVFormatContext *s)
1732 if (!s->iformat->read_play)
1733 return AVERROR_NOTSUPP;
1734 return s->iformat->read_play(s);
1738 * pause a network based stream (e.g. RTSP stream). Use av_read_play()
1741 int av_read_pause(AVFormatContext *s)
1743 if (!s->iformat->read_pause)
1744 return AVERROR_NOTSUPP;
1745 return s->iformat->read_pause(s);
1749 * Close a media file (but not its codecs)
1751 * @param s media file handle
1753 void av_close_input_file(AVFormatContext *s)
1755 int i, must_open_file;
1758 /* free previous packet */
1759 if (s->cur_st && s->cur_st->parser)
1760 av_free_packet(&s->cur_pkt);
1762 if (s->iformat->read_close)
1763 s->iformat->read_close(s);
1764 for(i=0;i<s->nb_streams;i++) {
1765 /* free all data in a stream component */
1768 av_parser_close(st->parser);
1770 av_free(st->index_entries);
1773 flush_packet_queue(s);
1775 if (s->iformat->flags & AVFMT_NOFILE) {
1778 if (must_open_file) {
1781 av_freep(&s->priv_data);
1786 * Add a new stream to a media file. Can only be called in the
1787 * read_header function. If the flag AVFMTCTX_NOHEADER is in the
1788 * format context, then new streams can be added in read_packet too.
1791 * @param s media file handle
1792 * @param id file format dependent stream id
1794 AVStream *av_new_stream(AVFormatContext *s, int id)
1798 if (s->nb_streams >= MAX_STREAMS)
1801 st = av_mallocz(sizeof(AVStream));
1804 avcodec_get_context_defaults(&st->codec);
1806 /* no default bitrate if decoding */
1807 st->codec.bit_rate = 0;
1809 st->index = s->nb_streams;
1811 st->start_time = AV_NOPTS_VALUE;
1812 st->duration = AV_NOPTS_VALUE;
1813 st->cur_dts = AV_NOPTS_VALUE;
1815 /* default pts settings is MPEG like */
1816 av_set_pts_info(st, 33, 1, 90000);
1817 st->last_IP_pts = AV_NOPTS_VALUE;
1819 s->streams[s->nb_streams++] = st;
1823 /************************************************************/
1824 /* output media file */
1826 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
1830 if (s->oformat->priv_data_size > 0) {
1831 s->priv_data = av_mallocz(s->oformat->priv_data_size);
1833 return AVERROR_NOMEM;
1835 s->priv_data = NULL;
1837 if (s->oformat->set_parameters) {
1838 ret = s->oformat->set_parameters(s, ap);
1846 * allocate the stream private data and write the stream header to an
1849 * @param s media file handle
1850 * @return 0 if OK. AVERROR_xxx if error.
1852 int av_write_header(AVFormatContext *s)
1857 ret = s->oformat->write_header(s);
1861 /* init PTS generation */
1862 for(i=0;i<s->nb_streams;i++) {
1865 switch (st->codec.codec_type) {
1866 case CODEC_TYPE_AUDIO:
1867 av_frac_init(&st->pts, 0, 0,
1868 (int64_t)st->time_base.num * st->codec.sample_rate);
1870 case CODEC_TYPE_VIDEO:
1871 av_frac_init(&st->pts, 0, 0,
1872 (int64_t)st->time_base.num * st->codec.frame_rate);
1881 //FIXME merge with compute_pkt_fields
1882 static void compute_pkt_fields2(AVStream *st, AVPacket *pkt){
1883 int b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames);
1884 int num, den, frame_size;
1886 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size, pkt->stream_index);
1888 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
1891 if(pkt->pts != AV_NOPTS_VALUE)
1892 pkt->pts = av_rescale(pkt->pts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1893 if(pkt->dts != AV_NOPTS_VALUE)
1894 pkt->dts = av_rescale(pkt->dts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1896 /* duration field */
1897 pkt->duration = av_rescale(pkt->duration, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1898 if (pkt->duration == 0) {
1899 compute_frame_duration(&num, &den, st, NULL, pkt);
1901 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
1905 //XXX/FIXME this is a temporary hack until all encoders output pts
1906 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
1908 // pkt->pts= st->cur_dts;
1909 pkt->pts= st->pts.val;
1912 //calculate dts from pts
1913 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
1915 if(st->last_IP_pts == AV_NOPTS_VALUE){
1916 st->last_IP_pts= -pkt->duration;
1918 if(st->last_IP_pts < pkt->pts){
1919 pkt->dts= st->last_IP_pts;
1920 st->last_IP_pts= pkt->pts;
1927 // av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
1928 st->cur_dts= pkt->dts;
1929 st->pts.val= pkt->dts;
1932 switch (st->codec.codec_type) {
1933 case CODEC_TYPE_AUDIO:
1934 frame_size = get_audio_frame_size(&st->codec, pkt->size);
1936 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
1937 but it would be better if we had the real timestamps from the encoder */
1938 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
1939 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
1942 case CODEC_TYPE_VIDEO:
1943 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec.frame_rate_base);
1950 static void truncate_ts(AVStream *st, AVPacket *pkt){
1951 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
1954 pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
1956 pkt->pts &= pts_mask;
1957 pkt->dts &= pts_mask;
1961 * Write a packet to an output media file. The packet shall contain
1962 * one audio or video frame.
1964 * @param s media file handle
1965 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
1966 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
1968 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
1970 compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
1972 truncate_ts(s->streams[pkt->stream_index], pkt);
1974 return s->oformat->write_packet(s, pkt);
1978 * Writes a packet to an output media file ensuring correct interleaving.
1979 * The packet shall contain one audio or video frame.
1980 * If the packets are already correctly interleaved the application should
1981 * call av_write_frame() instead as its slightly faster, its also important
1982 * to keep in mind that non interlaved input will need huge amounts
1983 * of memory to interleave with this, so its prefereable to interleave at the
1986 * @param s media file handle
1987 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
1988 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
1990 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
1991 AVPacketList *pktl, **next_point, *this_pktl;
1993 int streams[MAX_STREAMS];
1994 AVStream *st= s->streams[ pkt->stream_index];
1996 compute_pkt_fields2(st, pkt);
1998 //FIXME/XXX/HACK drop zero sized packets
1999 if(st->codec.codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2002 if(pkt->dts == AV_NOPTS_VALUE)
2005 assert(pkt->destruct != av_destruct_packet); //FIXME
2007 this_pktl = av_mallocz(sizeof(AVPacketList));
2008 this_pktl->pkt= *pkt;
2009 av_dup_packet(&this_pktl->pkt);
2011 next_point = &s->packet_buffer;
2013 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2014 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2015 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2016 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2018 next_point= &(*next_point)->next;
2020 this_pktl->next= *next_point;
2021 *next_point= this_pktl;
2023 memset(streams, 0, sizeof(streams));
2024 pktl= s->packet_buffer;
2026 //av_log(s, AV_LOG_DEBUG, "show st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2027 if(streams[ pktl->pkt.stream_index ] == 0)
2029 streams[ pktl->pkt.stream_index ]++;
2033 while(s->nb_streams == stream_count){
2036 pktl= s->packet_buffer;
2037 //av_log(s, AV_LOG_DEBUG, "write st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2038 truncate_ts(s->streams[pktl->pkt.stream_index], &pktl->pkt);
2039 ret= s->oformat->write_packet(s, &pktl->pkt);
2041 s->packet_buffer= pktl->next;
2042 if((--streams[ pktl->pkt.stream_index ]) == 0)
2045 av_free_packet(&pktl->pkt);
2055 * write the stream trailer to an output media file and and free the
2056 * file private data.
2058 * @param s media file handle
2059 * @return 0 if OK. AVERROR_xxx if error. */
2060 int av_write_trailer(AVFormatContext *s)
2064 while(s->packet_buffer){
2066 AVPacketList *pktl= s->packet_buffer;
2068 //av_log(s, AV_LOG_DEBUG, "write_trailer st:%d dts:%lld\n", pktl->pkt.stream_index, pktl->pkt.dts);
2069 truncate_ts(s->streams[pktl->pkt.stream_index], &pktl->pkt);
2070 ret= s->oformat->write_packet(s, &pktl->pkt);
2072 s->packet_buffer= pktl->next;
2074 av_free_packet(&pktl->pkt);
2081 ret = s->oformat->write_trailer(s);
2082 av_freep(&s->priv_data);
2086 /* "user interface" functions */
2088 void dump_format(AVFormatContext *ic,
2096 av_log(NULL, AV_LOG_DEBUG, "%s #%d, %s, %s '%s':\n",
2097 is_output ? "Output" : "Input",
2099 is_output ? ic->oformat->name : ic->iformat->name,
2100 is_output ? "to" : "from", url);
2102 av_log(NULL, AV_LOG_DEBUG, " Duration: ");
2103 if (ic->duration != AV_NOPTS_VALUE) {
2104 int hours, mins, secs, us;
2105 secs = ic->duration / AV_TIME_BASE;
2106 us = ic->duration % AV_TIME_BASE;
2111 av_log(NULL, AV_LOG_DEBUG, "%02d:%02d:%02d.%01d", hours, mins, secs,
2112 (10 * us) / AV_TIME_BASE);
2114 av_log(NULL, AV_LOG_DEBUG, "N/A");
2116 av_log(NULL, AV_LOG_DEBUG, ", bitrate: ");
2118 av_log(NULL, AV_LOG_DEBUG,"%d kb/s", ic->bit_rate / 1000);
2120 av_log(NULL, AV_LOG_DEBUG, "N/A");
2122 av_log(NULL, AV_LOG_DEBUG, "\n");
2124 for(i=0;i<ic->nb_streams;i++) {
2125 AVStream *st = ic->streams[i];
2126 avcodec_string(buf, sizeof(buf), &st->codec, is_output);
2127 av_log(NULL, AV_LOG_DEBUG, " Stream #%d.%d", index, i);
2128 /* the pid is an important information, so we display it */
2129 /* XXX: add a generic system */
2131 flags = ic->oformat->flags;
2133 flags = ic->iformat->flags;
2134 if (flags & AVFMT_SHOW_IDS) {
2135 av_log(NULL, AV_LOG_DEBUG, "[0x%x]", st->id);
2137 av_log(NULL, AV_LOG_DEBUG, ": %s\n", buf);
2144 int frame_rate, frame_rate_base;
2147 static AbvEntry frame_abvs[] = {
2148 { "ntsc", 720, 480, 30000, 1001 },
2149 { "pal", 720, 576, 25, 1 },
2150 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2151 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2152 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2153 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2154 { "film", 352, 240, 24, 1 },
2155 { "ntsc-film", 352, 240, 24000, 1001 },
2156 { "sqcif", 128, 96, 0, 0 },
2157 { "qcif", 176, 144, 0, 0 },
2158 { "cif", 352, 288, 0, 0 },
2159 { "4cif", 704, 576, 0, 0 },
2162 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2165 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2167 int frame_width = 0, frame_height = 0;
2170 if (!strcmp(frame_abvs[i].abv, str)) {
2171 frame_width = frame_abvs[i].width;
2172 frame_height = frame_abvs[i].height;
2178 frame_width = strtol(p, (char **)&p, 10);
2181 frame_height = strtol(p, (char **)&p, 10);
2183 if (frame_width <= 0 || frame_height <= 0)
2185 *width_ptr = frame_width;
2186 *height_ptr = frame_height;
2190 int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2195 /* First, we check our abbreviation table */
2196 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2197 if (!strcmp(frame_abvs[i].abv, arg)) {
2198 *frame_rate = frame_abvs[i].frame_rate;
2199 *frame_rate_base = frame_abvs[i].frame_rate_base;
2203 /* Then, we try to parse it as fraction */
2204 cp = strchr(arg, '/');
2207 *frame_rate = strtol(arg, &cpp, 10);
2208 if (cpp != arg || cpp == cp)
2209 *frame_rate_base = strtol(cp+1, &cpp, 10);
2214 /* Finally we give up and parse it as double */
2215 *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
2216 *frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5);
2218 if (!*frame_rate || !*frame_rate_base)
2225 * - If not a duration:
2226 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2227 * Time is localtime unless Z is suffixed to the end. In this case GMT
2228 * Return the date in micro seconds since 1970
2230 * HH[:MM[:SS[.m...]]]
2233 int64_t parse_date(const char *datestr, int duration)
2239 static const char *date_fmt[] = {
2243 static const char *time_fmt[] = {
2253 time_t now = time(0);
2255 len = strlen(datestr);
2257 lastch = datestr[len - 1];
2260 is_utc = (lastch == 'z' || lastch == 'Z');
2262 memset(&dt, 0, sizeof(dt));
2267 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2268 q = small_strptime(p, date_fmt[i], &dt);
2278 dt = *localtime(&now);
2280 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2285 if (*p == 'T' || *p == 't' || *p == ' ')
2288 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2289 q = small_strptime(p, time_fmt[i], &dt);
2299 q = small_strptime(p, time_fmt[0], &dt);
2301 dt.tm_sec = strtol(p, (char **)&q, 10);
2307 /* Now we have all the fields that we can get */
2312 return now * int64_t_C(1000000);
2316 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2318 dt.tm_isdst = -1; /* unknown */
2331 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2334 val += n * (*q - '0');
2338 return negative ? -t : t;
2341 /* syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. Return
2343 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2353 while (*p != '\0' && *p != '=' && *p != '&') {
2354 if ((q - tag) < sizeof(tag) - 1)
2362 while (*p != '&' && *p != '\0') {
2363 if ((q - arg) < arg_size - 1) {
2373 if (!strcmp(tag, tag1))
2382 /* Return in 'buf' the path with '%d' replaced by number. Also handles
2383 the '%0nd' format where 'n' is the total number of digits and
2384 '%%'. Return 0 if OK, and -1 if format error */
2385 int get_frame_filename(char *buf, int buf_size,
2386 const char *path, int number)
2389 char *q, buf1[20], c;
2390 int nd, len, percentd_found;
2402 while (isdigit(*p)) {
2403 nd = nd * 10 + *p++ - '0';
2406 } while (isdigit(c));
2415 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2417 if ((q - buf + len) > buf_size - 1)
2419 memcpy(q, buf1, len);
2427 if ((q - buf) < buf_size - 1)
2431 if (!percentd_found)
2441 * Print nice hexa dump of a buffer
2442 * @param f stream for output
2444 * @param size buffer size
2446 void av_hex_dump(FILE *f, uint8_t *buf, int size)
2450 for(i=0;i<size;i+=16) {
2454 fprintf(f, "%08x ", i);
2457 fprintf(f, " %02x", buf[i+j]);
2462 for(j=0;j<len;j++) {
2464 if (c < ' ' || c > '~')
2466 fprintf(f, "%c", c);
2473 * Print on 'f' a nice dump of a packet
2474 * @param f stream for output
2475 * @param pkt packet to dump
2476 * @param dump_payload true if the payload must be displayed too
2478 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2480 fprintf(f, "stream #%d:\n", pkt->stream_index);
2481 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2482 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2483 /* DTS is _always_ valid after av_read_frame() */
2484 fprintf(f, " dts=");
2485 if (pkt->dts == AV_NOPTS_VALUE)
2488 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2489 /* PTS may be not known if B frames are present */
2490 fprintf(f, " pts=");
2491 if (pkt->pts == AV_NOPTS_VALUE)
2494 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2496 fprintf(f, " size=%d\n", pkt->size);
2498 av_hex_dump(f, pkt->data, pkt->size);
2501 void url_split(char *proto, int proto_size,
2502 char *authorization, int authorization_size,
2503 char *hostname, int hostname_size,
2505 char *path, int path_size,
2516 while (*p != ':' && *p != '\0') {
2517 if ((q - proto) < proto_size - 1)
2523 if (authorization_size > 0)
2524 authorization[0] = '\0';
2528 if (hostname_size > 0)
2532 char *at,*slash; // PETR: position of '@' character and '/' character
2539 at = strchr(p,'@'); // PETR: get the position of '@'
2540 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2541 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2543 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2545 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2546 if (*p == '@') { // PETR: passed '@'
2547 if (authorization_size > 0)
2551 } else if (!at) { // PETR: hostname
2552 if ((q - hostname) < hostname_size - 1)
2555 if ((q - authorization) < authorization_size - 1)
2560 if (hostname_size > 0)
2564 port = strtoul(p, (char **)&p, 10);
2569 pstrcpy(path, path_size, p);
2573 * Set the pts for a given stream
2575 * @param pts_wrap_bits number of bits effectively used by the pts
2576 * (used for wrap control, 33 is the value for MPEG)
2577 * @param pts_num numerator to convert to seconds (MPEG: 1)
2578 * @param pts_den denominator to convert to seconds (MPEG: 90000)
2580 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
2581 int pts_num, int pts_den)
2583 s->pts_wrap_bits = pts_wrap_bits;
2584 s->time_base.num = pts_num;
2585 s->time_base.den = pts_den;
2588 /* fraction handling */
2591 * f = val + (num / den) + 0.5. 'num' is normalized so that it is such
2592 * as 0 <= num < den.
2594 * @param f fractional number
2595 * @param val integer value
2596 * @param num must be >= 0
2597 * @param den must be >= 1
2599 void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
2611 /* set f to (val + 0.5) */
2612 void av_frac_set(AVFrac *f, int64_t val)
2615 f->num = f->den >> 1;
2619 * Fractionnal addition to f: f = f + (incr / f->den)
2621 * @param f fractional number
2622 * @param incr increment, can be positive or negative
2624 void av_frac_add(AVFrac *f, int64_t incr)
2628 num = f->num + incr;
2631 f->val += num / den;
2637 } else if (num >= den) {
2638 f->val += num / den;
2645 * register a new image format
2646 * @param img_fmt Image format descriptor
2648 void av_register_image_format(AVImageFormat *img_fmt)
2652 p = &first_image_format;
2653 while (*p != NULL) p = &(*p)->next;
2655 img_fmt->next = NULL;
2658 /* guess image format */
2659 AVImageFormat *av_probe_image_format(AVProbeData *pd)
2661 AVImageFormat *fmt1, *fmt;
2662 int score, score_max;
2666 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2667 if (fmt1->img_probe) {
2668 score = fmt1->img_probe(pd);
2669 if (score > score_max) {
2678 AVImageFormat *guess_image_format(const char *filename)
2680 AVImageFormat *fmt1;
2682 for(fmt1 = first_image_format; fmt1 != NULL; fmt1 = fmt1->next) {
2683 if (fmt1->extensions && match_ext(filename, fmt1->extensions))
2690 * Read an image from a stream.
2691 * @param gb byte stream containing the image
2692 * @param fmt image format, NULL if probing is required
2694 int av_read_image(ByteIOContext *pb, const char *filename,
2696 int (*alloc_cb)(void *, AVImageInfo *info), void *opaque)
2698 char buf[PROBE_BUF_SIZE];
2699 AVProbeData probe_data, *pd = &probe_data;
2704 pd->filename = filename;
2706 pos = url_ftell(pb);
2707 pd->buf_size = get_buffer(pb, buf, PROBE_BUF_SIZE);
2708 url_fseek(pb, pos, SEEK_SET);
2709 fmt = av_probe_image_format(pd);
2712 return AVERROR_NOFMT;
2713 ret = fmt->img_read(pb, alloc_cb, opaque);
2718 * Write an image to a stream.
2719 * @param pb byte stream for the image output
2720 * @param fmt image format
2721 * @param img image data and informations
2723 int av_write_image(ByteIOContext *pb, AVImageFormat *fmt, AVImageInfo *img)
2725 return fmt->img_write(pb, img);