2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "avio_internal.h"
25 #include "libavcodec/internal.h"
26 #include "libavcodec/bytestream.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/dict.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/pixdesc.h"
33 #include "libavutil/avassert.h"
34 #include "libavutil/avstring.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/parseutils.h"
37 #include "libavutil/time.h"
39 #include "audiointerleave.h"
51 * various utility functions for use within Libav
54 unsigned avformat_version(void)
56 return LIBAVFORMAT_VERSION_INT;
59 const char *avformat_configuration(void)
61 return LIBAV_CONFIGURATION;
64 const char *avformat_license(void)
66 #define LICENSE_PREFIX "libavformat license: "
67 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
70 /* an arbitrarily chosen "sane" max packet size -- 50M */
71 #define SANE_CHUNK_SIZE (50000000)
74 * Read the data in sane-sized chunks and append to pkt.
75 * Return the number of bytes read or an error.
77 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
79 int64_t chunk_size = size;
80 int orig_pos = pkt->pos; // av_grow_packet might reset pos
81 int orig_size = pkt->size;
85 int prev_size = pkt->size;
89 * When the caller requests a lot of data, limit it to the amount left
90 * in file or SANE_CHUNK_SIZE when it is not known
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 read_size = FFMIN(size, chunk_size);
98 ret = av_grow_packet(pkt, read_size);
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
122 pkt->pos = avio_tell(s);
124 return append_packet_chunked(s, pkt, size);
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
135 int av_filename_number_test(const char *filename)
138 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
141 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
143 AVProbeData lpd = *pd;
144 AVInputFormat *fmt1 = NULL, *fmt;
147 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
148 int id3len = ff_id3v2_tag_len(lpd.buf);
149 if (lpd.buf_size > id3len + 16) {
151 lpd.buf_size -= id3len;
157 while ((fmt1 = av_iformat_next(fmt1))) {
158 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
161 if (fmt1->read_probe) {
162 score = fmt1->read_probe(&lpd);
163 } else if (fmt1->extensions) {
164 if (av_match_ext(lpd.filename, fmt1->extensions)) {
165 score = AVPROBE_SCORE_EXTENSION;
168 if (score > *score_max) {
171 }else if (score == *score_max)
175 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */
176 if (!fmt && is_opened && *score_max < AVPROBE_SCORE_EXTENSION / 2) {
177 while ((fmt = av_iformat_next(fmt)))
178 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) {
179 *score_max = AVPROBE_SCORE_EXTENSION / 2;
184 if (!fmt && id3 && *score_max < AVPROBE_SCORE_EXTENSION / 2 - 1) {
185 while ((fmt = av_iformat_next(fmt)))
186 if (fmt->extensions && av_match_ext("mp3", fmt->extensions)) {
187 *score_max = AVPROBE_SCORE_EXTENSION / 2 - 1;
195 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
197 return av_probe_input_format2(pd, is_opened, &score);
200 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
202 static const struct {
203 const char *name; enum AVCodecID id; enum AVMediaType type;
205 { "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
206 { "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
207 { "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
208 { "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
209 { "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
210 { "m4v" , AV_CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
211 { "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
212 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
215 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
219 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
220 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
221 for (i = 0; fmt_id_type[i].name; i++) {
222 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
223 st->codec->codec_id = fmt_id_type[i].id;
224 st->codec->codec_type = fmt_id_type[i].type;
232 /************************************************************/
233 /* input media file */
235 /** size of probe buffer, for guessing file type from file contents */
236 #define PROBE_BUF_MIN 2048
237 #define PROBE_BUF_MAX (1<<20)
239 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
240 const char *filename, void *logctx,
241 unsigned int offset, unsigned int max_probe_size)
243 AVProbeData pd = { filename ? filename : "", NULL, -offset };
244 unsigned char *buf = NULL;
245 int ret = 0, probe_size;
247 if (!max_probe_size) {
248 max_probe_size = PROBE_BUF_MAX;
249 } else if (max_probe_size > PROBE_BUF_MAX) {
250 max_probe_size = PROBE_BUF_MAX;
251 } else if (max_probe_size < PROBE_BUF_MIN) {
252 return AVERROR(EINVAL);
255 if (offset >= max_probe_size) {
256 return AVERROR(EINVAL);
259 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
260 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
261 int score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
262 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
264 if (probe_size < offset) {
268 /* read probe data */
269 if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
271 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
272 /* fail if error was not end of file, otherwise, lower score */
273 if (ret != AVERROR_EOF) {
278 ret = 0; /* error was end of file, nothing read */
281 pd.buf = &buf[offset];
283 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
285 /* guess file format */
286 *fmt = av_probe_input_format2(&pd, 1, &score);
288 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
289 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
291 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
297 return AVERROR_INVALIDDATA;
300 /* rewind. reuse probe buffer to avoid seeking */
301 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0)
307 /* open input file and probe the format if necessary */
308 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
311 AVProbeData pd = {filename, NULL, 0};
314 s->flags |= AVFMT_FLAG_CUSTOM_IO;
316 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
317 else if (s->iformat->flags & AVFMT_NOFILE)
318 return AVERROR(EINVAL);
322 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
323 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
326 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
327 &s->interrupt_callback, options)) < 0)
331 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
334 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
335 AVPacketList **plast_pktl){
336 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
341 (*plast_pktl)->next = pktl;
343 *packet_buffer = pktl;
345 /* add the packet in the buffered packet list */
351 static int queue_attached_pictures(AVFormatContext *s)
354 for (i = 0; i < s->nb_streams; i++)
355 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
356 s->streams[i]->discard < AVDISCARD_ALL) {
357 AVPacket copy = s->streams[i]->attached_pic;
358 copy.buf = av_buffer_ref(copy.buf);
360 return AVERROR(ENOMEM);
362 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
367 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
369 AVFormatContext *s = *ps;
371 AVDictionary *tmp = NULL;
372 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
374 if (!s && !(s = avformat_alloc_context()))
375 return AVERROR(ENOMEM);
380 av_dict_copy(&tmp, *options, 0);
382 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
385 if ((ret = init_input(s, filename, &tmp)) < 0)
388 /* check filename in case an image number is expected */
389 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
390 if (!av_filename_number_test(filename)) {
391 ret = AVERROR(EINVAL);
396 s->duration = s->start_time = AV_NOPTS_VALUE;
397 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
399 /* allocate private data */
400 if (s->iformat->priv_data_size > 0) {
401 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
402 ret = AVERROR(ENOMEM);
405 if (s->iformat->priv_class) {
406 *(const AVClass**)s->priv_data = s->iformat->priv_class;
407 av_opt_set_defaults(s->priv_data);
408 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
413 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
415 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
417 if (s->iformat->read_header)
418 if ((ret = s->iformat->read_header(s)) < 0)
421 if (id3v2_extra_meta &&
422 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
424 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
426 if ((ret = queue_attached_pictures(s)) < 0)
429 if (s->pb && !s->data_offset)
430 s->data_offset = avio_tell(s->pb);
432 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
435 av_dict_free(options);
442 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
444 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
446 avformat_free_context(s);
451 /*******************************************************/
453 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
455 if(st->codec->codec_id == AV_CODEC_ID_PROBE){
456 AVProbeData *pd = &st->probe_data;
457 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
462 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
463 AVPROBE_PADDING_SIZE)) < 0)
465 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
466 pd->buf_size += pkt->size;
467 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
469 st->probe_packets = 0;
471 av_log(s, AV_LOG_ERROR, "nothing to probe for stream %d\n",
477 if (!st->probe_packets ||
478 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
479 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
480 if(st->codec->codec_id != AV_CODEC_ID_PROBE){
483 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
490 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
496 AVPacketList *pktl = s->raw_packet_buffer;
500 st = s->streams[pkt->stream_index];
501 if (st->codec->codec_id != AV_CODEC_ID_PROBE || !st->probe_packets ||
502 s->raw_packet_buffer_remaining_size < pkt->size) {
504 if (st->probe_packets) {
505 if ((err = probe_codec(s, st, NULL)) < 0)
508 pd = &st->probe_data;
511 s->raw_packet_buffer = pktl->next;
512 s->raw_packet_buffer_remaining_size += pkt->size;
521 ret= s->iformat->read_packet(s, pkt);
523 if (!pktl || ret == AVERROR(EAGAIN))
525 for (i = 0; i < s->nb_streams; i++) {
527 if (st->probe_packets) {
528 if ((err = probe_codec(s, st, NULL)) < 0)
535 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
536 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
537 av_log(s, AV_LOG_WARNING,
538 "Dropped corrupted packet (stream = %d)\n",
544 st= s->streams[pkt->stream_index];
546 switch(st->codec->codec_type){
547 case AVMEDIA_TYPE_VIDEO:
548 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
550 case AVMEDIA_TYPE_AUDIO:
551 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
553 case AVMEDIA_TYPE_SUBTITLE:
554 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
558 if(!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
562 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
563 s->raw_packet_buffer_remaining_size -= pkt->size;
565 if ((err = probe_codec(s, st, pkt)) < 0)
570 /**********************************************************/
573 * Get the number of samples of an audio frame. Return -1 on error.
575 int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
579 /* give frame_size priority if demuxing */
580 if (!mux && enc->frame_size > 1)
581 return enc->frame_size;
583 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
586 /* Fall back on using frame_size if muxing. */
587 if (enc->frame_size > 1)
588 return enc->frame_size;
595 * Return the frame duration in seconds. Return 0 if not available.
597 void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
598 AVCodecParserContext *pc, AVPacket *pkt)
604 switch(st->codec->codec_type) {
605 case AVMEDIA_TYPE_VIDEO:
606 if (st->avg_frame_rate.num) {
607 *pnum = st->avg_frame_rate.den;
608 *pden = st->avg_frame_rate.num;
609 } else if(st->time_base.num*1000LL > st->time_base.den) {
610 *pnum = st->time_base.num;
611 *pden = st->time_base.den;
612 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
613 *pnum = st->codec->time_base.num;
614 *pden = st->codec->time_base.den;
615 if (pc && pc->repeat_pict) {
616 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
617 *pden /= 1 + pc->repeat_pict;
619 *pnum *= 1 + pc->repeat_pict;
621 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
622 //Thus if we have no parser in such case leave duration undefined.
623 if(st->codec->ticks_per_frame>1 && !pc){
628 case AVMEDIA_TYPE_AUDIO:
629 frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
630 if (frame_size <= 0 || st->codec->sample_rate <= 0)
633 *pden = st->codec->sample_rate;
640 static int is_intra_only(enum AVCodecID id)
642 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
645 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
650 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
651 int64_t dts, int64_t pts)
653 AVStream *st= s->streams[stream_index];
654 AVPacketList *pktl= s->packet_buffer;
656 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
659 st->first_dts= dts - st->cur_dts;
662 for(; pktl; pktl= pktl->next){
663 if(pktl->pkt.stream_index != stream_index)
665 //FIXME think more about this check
666 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
667 pktl->pkt.pts += st->first_dts;
669 if(pktl->pkt.dts != AV_NOPTS_VALUE)
670 pktl->pkt.dts += st->first_dts;
672 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
673 st->start_time= pktl->pkt.pts;
675 if (st->start_time == AV_NOPTS_VALUE)
676 st->start_time = pts;
679 static void update_initial_durations(AVFormatContext *s, AVStream *st,
680 int stream_index, int duration)
682 AVPacketList *pktl= s->packet_buffer;
685 if(st->first_dts != AV_NOPTS_VALUE){
686 cur_dts= st->first_dts;
687 for(; pktl; pktl= pktl->next){
688 if(pktl->pkt.stream_index == stream_index){
689 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
694 pktl= s->packet_buffer;
695 st->first_dts = cur_dts;
696 }else if(st->cur_dts)
699 for(; pktl; pktl= pktl->next){
700 if(pktl->pkt.stream_index != stream_index)
702 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
703 && !pktl->pkt.duration){
704 pktl->pkt.dts= cur_dts;
705 if(!st->codec->has_b_frames)
706 pktl->pkt.pts= cur_dts;
708 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
709 pktl->pkt.duration = duration;
713 if(st->first_dts == AV_NOPTS_VALUE)
714 st->cur_dts= cur_dts;
717 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
718 AVCodecParserContext *pc, AVPacket *pkt)
720 int num, den, presentation_delayed, delay, i;
723 if (s->flags & AVFMT_FLAG_NOFILLIN)
726 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
727 pkt->dts= AV_NOPTS_VALUE;
729 /* do we have a video B-frame ? */
730 delay= st->codec->has_b_frames;
731 presentation_delayed = 0;
733 /* XXX: need has_b_frame, but cannot get it if the codec is
736 pc && pc->pict_type != AV_PICTURE_TYPE_B)
737 presentation_delayed = 1;
739 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
740 st->pts_wrap_bits < 63 &&
741 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
742 pkt->dts -= 1LL<<st->pts_wrap_bits;
745 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
746 // we take the conservative approach and discard both
747 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
748 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
749 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
750 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
753 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
754 ff_compute_frame_duration(&num, &den, st, pc, pkt);
756 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
758 if(pkt->duration != 0 && s->packet_buffer)
759 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
763 /* correct timestamps with byte offset if demuxers only have timestamps
764 on packet boundaries */
765 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
766 /* this will estimate bitrate based on this frame's duration and size */
767 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
768 if(pkt->pts != AV_NOPTS_VALUE)
770 if(pkt->dts != AV_NOPTS_VALUE)
774 if (pc && pc->dts_sync_point >= 0) {
775 // we have synchronization info from the parser
776 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
778 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
779 if (pkt->dts != AV_NOPTS_VALUE) {
780 // got DTS from the stream, update reference timestamp
781 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
782 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
783 } else if (st->reference_dts != AV_NOPTS_VALUE) {
784 // compute DTS based on reference timestamp
785 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
786 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
788 if (pc->dts_sync_point > 0)
789 st->reference_dts = pkt->dts; // new reference
793 /* This may be redundant, but it should not hurt. */
794 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
795 presentation_delayed = 1;
798 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n",
799 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
800 pkt->stream_index, pc);
801 /* interpolate PTS and DTS if they are not present */
802 //We skip H264 currently because delay and has_b_frames are not reliably set
803 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){
804 if (presentation_delayed) {
805 /* DTS = decompression timestamp */
806 /* PTS = presentation timestamp */
807 if (pkt->dts == AV_NOPTS_VALUE)
808 pkt->dts = st->last_IP_pts;
809 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
810 if (pkt->dts == AV_NOPTS_VALUE)
811 pkt->dts = st->cur_dts;
813 /* this is tricky: the dts must be incremented by the duration
814 of the frame we are displaying, i.e. the last I- or P-frame */
815 if (st->last_IP_duration == 0)
816 st->last_IP_duration = pkt->duration;
817 if(pkt->dts != AV_NOPTS_VALUE)
818 st->cur_dts = pkt->dts + st->last_IP_duration;
819 st->last_IP_duration = pkt->duration;
820 st->last_IP_pts= pkt->pts;
821 /* cannot compute PTS if not present (we can compute it only
822 by knowing the future */
823 } else if (pkt->pts != AV_NOPTS_VALUE ||
824 pkt->dts != AV_NOPTS_VALUE ||
826 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
827 int duration = pkt->duration;
828 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
829 ff_compute_frame_duration(&num, &den, st, pc, pkt);
831 duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den,
832 den * (int64_t)st->time_base.num,
834 if (duration != 0 && s->packet_buffer) {
835 update_initial_durations(s, st, pkt->stream_index,
841 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
843 /* presentation is not delayed : PTS and DTS are the same */
844 if (pkt->pts == AV_NOPTS_VALUE)
846 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
848 if (pkt->pts == AV_NOPTS_VALUE)
849 pkt->pts = st->cur_dts;
851 if (pkt->pts != AV_NOPTS_VALUE)
852 st->cur_dts = pkt->pts + duration;
857 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
858 st->pts_buffer[0]= pkt->pts;
859 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
860 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
861 if(pkt->dts == AV_NOPTS_VALUE)
862 pkt->dts= st->pts_buffer[0];
863 if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
864 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
866 if(pkt->dts > st->cur_dts)
867 st->cur_dts = pkt->dts;
871 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
872 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
875 if (is_intra_only(st->codec->codec_id))
876 pkt->flags |= AV_PKT_FLAG_KEY;
878 pkt->convergence_duration = pc->convergence_duration;
881 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
884 AVPacketList *pktl = *pkt_buf;
885 *pkt_buf = pktl->next;
886 av_free_packet(&pktl->pkt);
893 * Parse a packet, add all split parts to parse_queue
895 * @param pkt packet to parse, NULL when flushing the parser at end of stream
897 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
899 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
900 AVStream *st = s->streams[stream_index];
901 uint8_t *data = pkt ? pkt->data : NULL;
902 int size = pkt ? pkt->size : 0;
903 int ret = 0, got_output = 0;
906 av_init_packet(&flush_pkt);
911 while (size > 0 || (pkt == &flush_pkt && got_output)) {
914 av_init_packet(&out_pkt);
915 len = av_parser_parse2(st->parser, st->codec,
916 &out_pkt.data, &out_pkt.size, data, size,
917 pkt->pts, pkt->dts, pkt->pos);
919 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
920 /* increment read pointer */
924 got_output = !!out_pkt.size;
929 if (pkt->side_data) {
930 out_pkt.side_data = pkt->side_data;
931 out_pkt.side_data_elems = pkt->side_data_elems;
932 pkt->side_data = NULL;
933 pkt->side_data_elems = 0;
936 /* set the duration */
937 out_pkt.duration = 0;
938 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
939 if (st->codec->sample_rate > 0) {
940 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
941 (AVRational){ 1, st->codec->sample_rate },
945 } else if (st->codec->time_base.num != 0 &&
946 st->codec->time_base.den != 0) {
947 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
948 st->codec->time_base,
953 out_pkt.stream_index = st->index;
954 out_pkt.pts = st->parser->pts;
955 out_pkt.dts = st->parser->dts;
956 out_pkt.pos = st->parser->pos;
958 if (st->parser->key_frame == 1 ||
959 (st->parser->key_frame == -1 &&
960 st->parser->pict_type == AV_PICTURE_TYPE_I))
961 out_pkt.flags |= AV_PKT_FLAG_KEY;
963 compute_pkt_fields(s, st, st->parser, &out_pkt);
965 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
966 out_pkt.flags & AV_PKT_FLAG_KEY) {
967 ff_reduce_index(s, st->index);
968 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
969 0, 0, AVINDEX_KEYFRAME);
972 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
973 out_pkt.buf = pkt->buf;
975 #if FF_API_DESTRUCT_PACKET
976 FF_DISABLE_DEPRECATION_WARNINGS
977 out_pkt.destruct = pkt->destruct;
978 pkt->destruct = NULL;
979 FF_ENABLE_DEPRECATION_WARNINGS
982 if ((ret = av_dup_packet(&out_pkt)) < 0)
985 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
986 av_free_packet(&out_pkt);
987 ret = AVERROR(ENOMEM);
993 /* end of the stream => close and free the parser */
994 if (pkt == &flush_pkt) {
995 av_parser_close(st->parser);
1000 av_free_packet(pkt);
1004 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1005 AVPacketList **pkt_buffer_end,
1009 av_assert0(*pkt_buffer);
1012 *pkt_buffer = pktl->next;
1014 *pkt_buffer_end = NULL;
1019 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1021 int ret = 0, i, got_packet = 0;
1023 av_init_packet(pkt);
1025 while (!got_packet && !s->parse_queue) {
1029 /* read next packet */
1030 ret = ff_read_packet(s, &cur_pkt);
1032 if (ret == AVERROR(EAGAIN))
1034 /* flush the parsers */
1035 for(i = 0; i < s->nb_streams; i++) {
1037 if (st->parser && st->need_parsing)
1038 parse_packet(s, NULL, st->index);
1040 /* all remaining packets are now in parse_queue =>
1041 * really terminate parsing */
1045 st = s->streams[cur_pkt.stream_index];
1047 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1048 cur_pkt.dts != AV_NOPTS_VALUE &&
1049 cur_pkt.pts < cur_pkt.dts) {
1050 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1051 cur_pkt.stream_index,
1056 if (s->debug & FF_FDEBUG_TS)
1057 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1058 cur_pkt.stream_index,
1065 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1066 st->parser = av_parser_init(st->codec->codec_id);
1068 /* no parser available: just output the raw packets */
1069 st->need_parsing = AVSTREAM_PARSE_NONE;
1070 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1071 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1072 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1073 st->parser->flags |= PARSER_FLAG_ONCE;
1077 if (!st->need_parsing || !st->parser) {
1078 /* no parsing needed: we just output the packet as is */
1080 compute_pkt_fields(s, st, NULL, pkt);
1081 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1082 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1083 ff_reduce_index(s, st->index);
1084 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1087 } else if (st->discard < AVDISCARD_ALL) {
1088 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1092 av_free_packet(&cur_pkt);
1096 if (!got_packet && s->parse_queue)
1097 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1099 if(s->debug & FF_FDEBUG_TS)
1100 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1111 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1113 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1117 return s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
1118 &s->packet_buffer_end,
1120 read_frame_internal(s, pkt);
1124 AVPacketList *pktl = s->packet_buffer;
1127 AVPacket *next_pkt = &pktl->pkt;
1129 if (next_pkt->dts != AV_NOPTS_VALUE) {
1130 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1131 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1132 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1133 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1134 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1135 next_pkt->pts = pktl->pkt.dts;
1139 pktl = s->packet_buffer;
1142 /* read packet from packet buffer, if there is data */
1143 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1144 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1145 return read_from_packet_buffer(&s->packet_buffer,
1146 &s->packet_buffer_end, pkt);
1149 ret = read_frame_internal(s, pkt);
1151 if (pktl && ret != AVERROR(EAGAIN)) {
1158 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1159 &s->packet_buffer_end)) < 0)
1160 return AVERROR(ENOMEM);
1164 /* XXX: suppress the packet queue */
1165 static void flush_packet_queue(AVFormatContext *s)
1167 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1168 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1169 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1171 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1174 /*******************************************************/
1177 int av_find_default_stream_index(AVFormatContext *s)
1179 int first_audio_index = -1;
1183 if (s->nb_streams <= 0)
1185 for(i = 0; i < s->nb_streams; i++) {
1187 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1188 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1191 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1192 first_audio_index = i;
1194 return first_audio_index >= 0 ? first_audio_index : 0;
1198 * Flush the frame reader.
1200 void ff_read_frame_flush(AVFormatContext *s)
1205 flush_packet_queue(s);
1207 /* for each stream, reset read state */
1208 for(i = 0; i < s->nb_streams; i++) {
1212 av_parser_close(st->parser);
1215 st->last_IP_pts = AV_NOPTS_VALUE;
1216 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1217 st->reference_dts = AV_NOPTS_VALUE;
1219 st->probe_packets = MAX_PROBE_PACKETS;
1221 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1222 st->pts_buffer[j]= AV_NOPTS_VALUE;
1226 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1230 for(i = 0; i < s->nb_streams; i++) {
1231 AVStream *st = s->streams[i];
1233 st->cur_dts = av_rescale(timestamp,
1234 st->time_base.den * (int64_t)ref_st->time_base.num,
1235 st->time_base.num * (int64_t)ref_st->time_base.den);
1239 void ff_reduce_index(AVFormatContext *s, int stream_index)
1241 AVStream *st= s->streams[stream_index];
1242 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1244 if((unsigned)st->nb_index_entries >= max_entries){
1246 for(i=0; 2*i<st->nb_index_entries; i++)
1247 st->index_entries[i]= st->index_entries[2*i];
1248 st->nb_index_entries= i;
1252 int ff_add_index_entry(AVIndexEntry **index_entries,
1253 int *nb_index_entries,
1254 unsigned int *index_entries_allocated_size,
1255 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1257 AVIndexEntry *entries, *ie;
1260 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1263 entries = av_fast_realloc(*index_entries,
1264 index_entries_allocated_size,
1265 (*nb_index_entries + 1) *
1266 sizeof(AVIndexEntry));
1270 *index_entries= entries;
1272 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1275 index= (*nb_index_entries)++;
1276 ie= &entries[index];
1277 assert(index==0 || ie[-1].timestamp < timestamp);
1279 ie= &entries[index];
1280 if(ie->timestamp != timestamp){
1281 if(ie->timestamp <= timestamp)
1283 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1284 (*nb_index_entries)++;
1285 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1286 distance= ie->min_distance;
1290 ie->timestamp = timestamp;
1291 ie->min_distance= distance;
1298 int av_add_index_entry(AVStream *st,
1299 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1301 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1302 &st->index_entries_allocated_size, pos,
1303 timestamp, size, distance, flags);
1306 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1307 int64_t wanted_timestamp, int flags)
1315 //optimize appending index entries at the end
1316 if(b && entries[b-1].timestamp < wanted_timestamp)
1321 timestamp = entries[m].timestamp;
1322 if(timestamp >= wanted_timestamp)
1324 if(timestamp <= wanted_timestamp)
1327 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1329 if(!(flags & AVSEEK_FLAG_ANY)){
1330 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1331 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1340 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1343 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1344 wanted_timestamp, flags);
1347 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1349 AVInputFormat *avif= s->iformat;
1350 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1351 int64_t ts_min, ts_max, ts;
1356 if (stream_index < 0)
1359 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1362 ts_min= AV_NOPTS_VALUE;
1363 pos_limit= -1; //gcc falsely says it may be uninitialized
1365 st= s->streams[stream_index];
1366 if(st->index_entries){
1369 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1370 index= FFMAX(index, 0);
1371 e= &st->index_entries[index];
1373 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1375 ts_min= e->timestamp;
1376 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1382 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1383 assert(index < st->nb_index_entries);
1385 e= &st->index_entries[index];
1386 assert(e->timestamp >= target_ts);
1388 ts_max= e->timestamp;
1389 pos_limit= pos_max - e->min_distance;
1390 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1391 pos_max,pos_limit, ts_max);
1395 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1400 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1403 ff_update_cur_dts(s, st, ts);
1408 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1409 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1410 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1411 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1414 int64_t start_pos, filesize;
1417 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1419 if(ts_min == AV_NOPTS_VALUE){
1420 pos_min = s->data_offset;
1421 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1422 if (ts_min == AV_NOPTS_VALUE)
1426 if(ts_max == AV_NOPTS_VALUE){
1428 filesize = avio_size(s->pb);
1429 pos_max = filesize - 1;
1432 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1434 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1435 if (ts_max == AV_NOPTS_VALUE)
1439 int64_t tmp_pos= pos_max + 1;
1440 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1441 if(tmp_ts == AV_NOPTS_VALUE)
1445 if(tmp_pos >= filesize)
1451 if(ts_min > ts_max){
1453 }else if(ts_min == ts_max){
1458 while (pos_min < pos_limit) {
1459 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1460 pos_min, pos_max, ts_min, ts_max);
1461 assert(pos_limit <= pos_max);
1464 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1465 // interpolate position (better than dichotomy)
1466 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1467 + pos_min - approximate_keyframe_distance;
1468 }else if(no_change==1){
1469 // bisection, if interpolation failed to change min or max pos last time
1470 pos = (pos_min + pos_limit)>>1;
1472 /* linear search if bisection failed, can only happen if there
1473 are very few or no keyframes between min/max */
1478 else if(pos > pos_limit)
1482 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1487 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1488 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1489 pos_limit, start_pos, no_change);
1490 if(ts == AV_NOPTS_VALUE){
1491 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1494 assert(ts != AV_NOPTS_VALUE);
1495 if (target_ts <= ts) {
1496 pos_limit = start_pos - 1;
1500 if (target_ts >= ts) {
1506 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1507 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1509 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1511 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1512 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1513 pos, ts_min, target_ts, ts_max);
1518 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1519 int64_t pos_min, pos_max;
1521 pos_min = s->data_offset;
1522 pos_max = avio_size(s->pb) - 1;
1524 if (pos < pos_min) pos= pos_min;
1525 else if(pos > pos_max) pos= pos_max;
1527 avio_seek(s->pb, pos, SEEK_SET);
1532 static int seek_frame_generic(AVFormatContext *s,
1533 int stream_index, int64_t timestamp, int flags)
1540 st = s->streams[stream_index];
1542 index = av_index_search_timestamp(st, timestamp, flags);
1544 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1547 if(index < 0 || index==st->nb_index_entries-1){
1550 if(st->nb_index_entries){
1551 assert(st->index_entries);
1552 ie= &st->index_entries[st->nb_index_entries-1];
1553 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1555 ff_update_cur_dts(s, st, ie->timestamp);
1557 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1563 read_status = av_read_frame(s, &pkt);
1564 } while (read_status == AVERROR(EAGAIN));
1565 if (read_status < 0)
1567 av_free_packet(&pkt);
1568 if(stream_index == pkt.stream_index){
1569 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1573 index = av_index_search_timestamp(st, timestamp, flags);
1578 ff_read_frame_flush(s);
1579 if (s->iformat->read_seek){
1580 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1583 ie = &st->index_entries[index];
1584 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1586 ff_update_cur_dts(s, st, ie->timestamp);
1591 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1592 int64_t timestamp, int flags)
1597 if (flags & AVSEEK_FLAG_BYTE) {
1598 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1600 ff_read_frame_flush(s);
1601 return seek_frame_byte(s, stream_index, timestamp, flags);
1604 if(stream_index < 0){
1605 stream_index= av_find_default_stream_index(s);
1606 if(stream_index < 0)
1609 st= s->streams[stream_index];
1610 /* timestamp for default must be expressed in AV_TIME_BASE units */
1611 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1614 /* first, we try the format specific seek */
1615 if (s->iformat->read_seek) {
1616 ff_read_frame_flush(s);
1617 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1624 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1625 ff_read_frame_flush(s);
1626 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1627 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1628 ff_read_frame_flush(s);
1629 return seek_frame_generic(s, stream_index, timestamp, flags);
1635 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1637 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1640 ret = queue_attached_pictures(s);
1645 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1647 if(min_ts > ts || max_ts < ts)
1650 if (s->iformat->read_seek2) {
1652 ff_read_frame_flush(s);
1653 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1656 ret = queue_attached_pictures(s);
1660 if(s->iformat->read_timestamp){
1661 //try to seek via read_timestamp()
1664 // Fall back on old API if new is not implemented but old is.
1665 // Note the old API has somewhat different semantics.
1666 if(s->iformat->read_seek || 1)
1667 return av_seek_frame(s, stream_index, ts, flags | ((uint64_t)ts - min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0));
1669 // try some generic seek like seek_frame_generic() but with new ts semantics
1672 /*******************************************************/
1675 * Return TRUE if the stream has accurate duration in any stream.
1677 * @return TRUE if the stream has accurate duration for at least one component.
1679 static int has_duration(AVFormatContext *ic)
1684 for(i = 0;i < ic->nb_streams; i++) {
1685 st = ic->streams[i];
1686 if (st->duration != AV_NOPTS_VALUE)
1689 if (ic->duration != AV_NOPTS_VALUE)
1695 * Estimate the stream timings from the one of each components.
1697 * Also computes the global bitrate if possible.
1699 static void update_stream_timings(AVFormatContext *ic)
1701 int64_t start_time, start_time1, end_time, end_time1;
1702 int64_t duration, duration1, filesize;
1706 start_time = INT64_MAX;
1707 end_time = INT64_MIN;
1708 duration = INT64_MIN;
1709 for(i = 0;i < ic->nb_streams; i++) {
1710 st = ic->streams[i];
1711 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1712 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1713 start_time = FFMIN(start_time, start_time1);
1714 if (st->duration != AV_NOPTS_VALUE) {
1715 end_time1 = start_time1
1716 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1717 end_time = FFMAX(end_time, end_time1);
1720 if (st->duration != AV_NOPTS_VALUE) {
1721 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1722 duration = FFMAX(duration, duration1);
1725 if (start_time != INT64_MAX) {
1726 ic->start_time = start_time;
1727 if (end_time != INT64_MIN)
1728 duration = FFMAX(duration, end_time - start_time);
1730 if (duration != INT64_MIN) {
1731 ic->duration = duration;
1732 if (ic->pb && (filesize = avio_size(ic->pb)) > 0) {
1733 /* compute the bitrate */
1734 ic->bit_rate = (double)filesize * 8.0 * AV_TIME_BASE /
1735 (double)ic->duration;
1740 static void fill_all_stream_timings(AVFormatContext *ic)
1745 update_stream_timings(ic);
1746 for(i = 0;i < ic->nb_streams; i++) {
1747 st = ic->streams[i];
1748 if (st->start_time == AV_NOPTS_VALUE) {
1749 if(ic->start_time != AV_NOPTS_VALUE)
1750 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1751 if(ic->duration != AV_NOPTS_VALUE)
1752 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1757 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1759 int64_t filesize, duration;
1763 /* if bit_rate is already set, we believe it */
1764 if (ic->bit_rate <= 0) {
1766 for(i=0;i<ic->nb_streams;i++) {
1767 st = ic->streams[i];
1768 if (st->codec->bit_rate > 0) {
1769 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1773 bit_rate += st->codec->bit_rate;
1776 ic->bit_rate = bit_rate;
1779 /* if duration is already set, we believe it */
1780 if (ic->duration == AV_NOPTS_VALUE &&
1781 ic->bit_rate != 0) {
1782 filesize = ic->pb ? avio_size(ic->pb) : 0;
1784 for(i = 0; i < ic->nb_streams; i++) {
1785 st = ic->streams[i];
1786 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1787 if (st->duration == AV_NOPTS_VALUE)
1788 st->duration = duration;
1794 #define DURATION_MAX_READ_SIZE 250000
1795 #define DURATION_MAX_RETRY 3
1797 /* only usable for MPEG-PS streams */
1798 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1800 AVPacket pkt1, *pkt = &pkt1;
1802 int read_size, i, ret;
1804 int64_t filesize, offset, duration;
1807 /* flush packet queue */
1808 flush_packet_queue(ic);
1810 for (i=0; i<ic->nb_streams; i++) {
1811 st = ic->streams[i];
1812 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1813 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
1816 av_parser_close(st->parser);
1821 /* estimate the end time (duration) */
1822 /* XXX: may need to support wrapping */
1823 filesize = ic->pb ? avio_size(ic->pb) : 0;
1824 end_time = AV_NOPTS_VALUE;
1826 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1830 avio_seek(ic->pb, offset, SEEK_SET);
1833 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1837 ret = ff_read_packet(ic, pkt);
1838 } while(ret == AVERROR(EAGAIN));
1841 read_size += pkt->size;
1842 st = ic->streams[pkt->stream_index];
1843 if (pkt->pts != AV_NOPTS_VALUE &&
1844 (st->start_time != AV_NOPTS_VALUE ||
1845 st->first_dts != AV_NOPTS_VALUE)) {
1846 duration = end_time = pkt->pts;
1847 if (st->start_time != AV_NOPTS_VALUE)
1848 duration -= st->start_time;
1850 duration -= st->first_dts;
1852 duration += 1LL<<st->pts_wrap_bits;
1854 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1855 st->duration = duration;
1858 av_free_packet(pkt);
1860 }while( end_time==AV_NOPTS_VALUE
1861 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1862 && ++retry <= DURATION_MAX_RETRY);
1864 fill_all_stream_timings(ic);
1866 avio_seek(ic->pb, old_offset, SEEK_SET);
1867 for (i=0; i<ic->nb_streams; i++) {
1869 st->cur_dts= st->first_dts;
1870 st->last_IP_pts = AV_NOPTS_VALUE;
1871 st->reference_dts = AV_NOPTS_VALUE;
1875 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1879 /* get the file size, if possible */
1880 if (ic->iformat->flags & AVFMT_NOFILE) {
1883 file_size = avio_size(ic->pb);
1884 file_size = FFMAX(0, file_size);
1887 if ((!strcmp(ic->iformat->name, "mpeg") ||
1888 !strcmp(ic->iformat->name, "mpegts")) &&
1889 file_size && ic->pb->seekable) {
1890 /* get accurate estimate from the PTSes */
1891 estimate_timings_from_pts(ic, old_offset);
1892 } else if (has_duration(ic)) {
1893 /* at least one component has timings - we use them for all
1895 fill_all_stream_timings(ic);
1897 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
1898 /* less precise: use bitrate info */
1899 estimate_timings_from_bit_rate(ic);
1901 update_stream_timings(ic);
1905 AVStream av_unused *st;
1906 for(i = 0;i < ic->nb_streams; i++) {
1907 st = ic->streams[i];
1908 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
1909 (double) st->start_time / AV_TIME_BASE,
1910 (double) st->duration / AV_TIME_BASE);
1912 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1913 (double) ic->start_time / AV_TIME_BASE,
1914 (double) ic->duration / AV_TIME_BASE,
1915 ic->bit_rate / 1000);
1919 static int has_codec_parameters(AVStream *st)
1921 AVCodecContext *avctx = st->codec;
1923 switch (avctx->codec_type) {
1924 case AVMEDIA_TYPE_AUDIO:
1925 val = avctx->sample_rate && avctx->channels;
1926 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1929 case AVMEDIA_TYPE_VIDEO:
1931 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1938 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1941 static int has_decode_delay_been_guessed(AVStream *st)
1943 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1944 st->info->nb_decoded_frames >= 6;
1947 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1948 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
1950 const AVCodec *codec;
1951 int got_picture = 1, ret = 0;
1952 AVFrame *frame = avcodec_alloc_frame();
1953 AVPacket pkt = *avpkt;
1956 return AVERROR(ENOMEM);
1958 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
1959 AVDictionary *thread_opt = NULL;
1961 codec = st->codec->codec ? st->codec->codec :
1962 avcodec_find_decoder(st->codec->codec_id);
1965 st->info->found_decoder = -1;
1970 /* force thread count to 1 since the h264 decoder will not extract SPS
1971 * and PPS to extradata during multi-threaded decoding */
1972 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1973 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
1975 av_dict_free(&thread_opt);
1977 st->info->found_decoder = -1;
1980 st->info->found_decoder = 1;
1981 } else if (!st->info->found_decoder)
1982 st->info->found_decoder = 1;
1984 if (st->info->found_decoder < 0) {
1989 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1991 (!has_codec_parameters(st) ||
1992 !has_decode_delay_been_guessed(st) ||
1993 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
1995 avcodec_get_frame_defaults(frame);
1996 switch(st->codec->codec_type) {
1997 case AVMEDIA_TYPE_VIDEO:
1998 ret = avcodec_decode_video2(st->codec, frame,
1999 &got_picture, &pkt);
2001 case AVMEDIA_TYPE_AUDIO:
2002 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2009 st->info->nb_decoded_frames++;
2017 avcodec_free_frame(&frame);
2021 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2023 while (tags->id != AV_CODEC_ID_NONE) {
2031 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2034 for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) {
2035 if(tag == tags[i].tag)
2038 for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) {
2039 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2042 return AV_CODEC_ID_NONE;
2045 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2049 case 32: return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2050 case 64: return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2051 default: return AV_CODEC_ID_NONE;
2055 if (sflags & (1 << (bps - 1))) {
2057 case 1: return AV_CODEC_ID_PCM_S8;
2058 case 2: return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2059 case 3: return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2060 case 4: return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2061 default: return AV_CODEC_ID_NONE;
2065 case 1: return AV_CODEC_ID_PCM_U8;
2066 case 2: return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2067 case 3: return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2068 case 4: return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2069 default: return AV_CODEC_ID_NONE;
2075 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id)
2078 for(i=0; tags && tags[i]; i++){
2079 int tag= ff_codec_get_tag(tags[i], id);
2085 enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2088 for(i=0; tags && tags[i]; i++){
2089 enum AVCodecID id= ff_codec_get_id(tags[i], tag);
2090 if(id!=AV_CODEC_ID_NONE) return id;
2092 return AV_CODEC_ID_NONE;
2095 static void compute_chapters_end(AVFormatContext *s)
2098 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2100 for (i = 0; i < s->nb_chapters; i++)
2101 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2102 AVChapter *ch = s->chapters[i];
2103 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2106 for (j = 0; j < s->nb_chapters; j++) {
2107 AVChapter *ch1 = s->chapters[j];
2108 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2109 if (j != i && next_start > ch->start && next_start < end)
2112 ch->end = (end == INT64_MAX) ? ch->start : end;
2116 static int get_std_framerate(int i){
2117 if(i<60*12) return i*1001;
2118 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2122 * Is the time base unreliable.
2123 * This is a heuristic to balance between quick acceptance of the values in
2124 * the headers vs. some extra checks.
2125 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2126 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2127 * And there are "variable" fps files this needs to detect as well.
2129 static int tb_unreliable(AVCodecContext *c){
2130 if( c->time_base.den >= 101L*c->time_base.num
2131 || c->time_base.den < 5L*c->time_base.num
2132 /* || c->codec_tag == AV_RL32("DIVX")
2133 || c->codec_tag == AV_RL32("XVID")*/
2134 || c->codec_id == AV_CODEC_ID_MPEG2VIDEO
2135 || c->codec_id == AV_CODEC_ID_H264
2141 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2143 int i, count, ret, read_size, j;
2145 AVPacket pkt1, *pkt;
2146 int64_t old_offset = avio_tell(ic->pb);
2147 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2149 for(i=0;i<ic->nb_streams;i++) {
2150 const AVCodec *codec;
2151 AVDictionary *thread_opt = NULL;
2152 st = ic->streams[i];
2154 //only for the split stuff
2155 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2156 st->parser = av_parser_init(st->codec->codec_id);
2157 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2158 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2161 codec = st->codec->codec ? st->codec->codec :
2162 avcodec_find_decoder(st->codec->codec_id);
2164 /* force thread count to 1 since the h264 decoder will not extract SPS
2165 * and PPS to extradata during multi-threaded decoding */
2166 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2168 /* Ensure that subtitle_header is properly set. */
2169 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2170 && codec && !st->codec->codec)
2171 avcodec_open2(st->codec, codec, options ? &options[i]
2174 //try to just open decoders, in case this is enough to get parameters
2175 if (!has_codec_parameters(st)) {
2176 if (codec && !st->codec->codec)
2177 avcodec_open2(st->codec, codec, options ? &options[i]
2181 av_dict_free(&thread_opt);
2184 for (i=0; i<ic->nb_streams; i++) {
2185 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2186 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2192 if (ff_check_interrupt(&ic->interrupt_callback)){
2194 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2198 /* check if one codec still needs to be handled */
2199 for(i=0;i<ic->nb_streams;i++) {
2200 int fps_analyze_framecount = 20;
2202 st = ic->streams[i];
2203 if (!has_codec_parameters(st))
2205 /* if the timebase is coarse (like the usual millisecond precision
2206 of mkv), we need to analyze more frames to reliably arrive at
2208 if (av_q2d(st->time_base) > 0.0005)
2209 fps_analyze_framecount *= 2;
2210 if (ic->fps_probe_size >= 0)
2211 fps_analyze_framecount = ic->fps_probe_size;
2212 /* variable fps and no guess at the real fps */
2213 if( tb_unreliable(st->codec) && !st->avg_frame_rate.num
2214 && st->codec_info_nb_frames < fps_analyze_framecount
2215 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2217 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2219 if (st->first_dts == AV_NOPTS_VALUE &&
2220 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2221 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2224 if (i == ic->nb_streams) {
2225 /* NOTE: if the format has no header, then we need to read
2226 some packets to get most of the streams, so we cannot
2228 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2229 /* if we found the info for all the codecs, we can stop */
2231 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2235 /* we did not get all the codec info, but we read too much data */
2236 if (read_size >= ic->probesize) {
2238 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2242 /* NOTE: a new stream can be added there if no header in file
2243 (AVFMTCTX_NOHEADER) */
2244 ret = read_frame_internal(ic, &pkt1);
2245 if (ret == AVERROR(EAGAIN))
2250 AVPacket empty_pkt = { 0 };
2252 av_init_packet(&empty_pkt);
2254 ret = -1; /* we could not have all the codec parameters before EOF */
2255 for(i=0;i<ic->nb_streams;i++) {
2256 st = ic->streams[i];
2258 /* flush the decoders */
2259 if (st->info->found_decoder == 1) {
2261 err = try_decode_frame(st, &empty_pkt,
2262 (options && i < orig_nb_streams) ?
2263 &options[i] : NULL);
2264 } while (err > 0 && !has_codec_parameters(st));
2268 av_log(ic, AV_LOG_WARNING,
2269 "decoding for stream %d failed\n", st->index);
2270 } else if (!has_codec_parameters(st)) {
2272 avcodec_string(buf, sizeof(buf), st->codec, 0);
2273 av_log(ic, AV_LOG_WARNING,
2274 "Could not find codec parameters (%s)\n", buf);
2282 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2285 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2286 &ic->packet_buffer_end);
2287 if ((ret = av_dup_packet(pkt)) < 0)
2288 goto find_stream_info_err;
2291 read_size += pkt->size;
2293 st = ic->streams[pkt->stream_index];
2294 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2295 /* check for non-increasing dts */
2296 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2297 st->info->fps_last_dts >= pkt->dts) {
2298 av_log(ic, AV_LOG_WARNING, "Non-increasing DTS in stream %d: "
2299 "packet %d with DTS %"PRId64", packet %d with DTS "
2300 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2301 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2302 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2304 /* check for a discontinuity in dts - if the difference in dts
2305 * is more than 1000 times the average packet duration in the sequence,
2306 * we treat it as a discontinuity */
2307 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2308 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2309 (pkt->dts - st->info->fps_last_dts) / 1000 >
2310 (st->info->fps_last_dts - st->info->fps_first_dts) / (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2311 av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: "
2312 "packet %d with DTS %"PRId64", packet %d with DTS "
2313 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2314 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2315 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2318 /* update stored dts values */
2319 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2320 st->info->fps_first_dts = pkt->dts;
2321 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2323 st->info->fps_last_dts = pkt->dts;
2324 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2326 /* check max_analyze_duration */
2327 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2328 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2329 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2333 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2334 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2335 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2336 st->codec->extradata_size= i;
2337 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2338 if (!st->codec->extradata)
2339 return AVERROR(ENOMEM);
2340 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2341 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2345 /* if still no information, we try to open the codec and to
2346 decompress the frame. We try to avoid that in most cases as
2347 it takes longer and uses more memory. For MPEG-4, we need to
2348 decompress for QuickTime.
2350 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2351 least one frame of codec data, this makes sure the codec initializes
2352 the channel configuration and does not only trust the values from the container.
2354 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2356 st->codec_info_nb_frames++;
2360 // close codecs which were opened in try_decode_frame()
2361 for(i=0;i<ic->nb_streams;i++) {
2362 st = ic->streams[i];
2363 avcodec_close(st->codec);
2365 for(i=0;i<ic->nb_streams;i++) {
2366 st = ic->streams[i];
2367 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2368 /* estimate average framerate if not set by demuxer */
2369 if (!st->avg_frame_rate.num && st->info->fps_last_dts != st->info->fps_first_dts) {
2370 int64_t delta_dts = st->info->fps_last_dts - st->info->fps_first_dts;
2371 int delta_packets = st->info->fps_last_dts_idx - st->info->fps_first_dts_idx;
2373 double best_error = 0.01;
2375 if (delta_dts >= INT64_MAX / st->time_base.num ||
2376 delta_packets >= INT64_MAX / st->time_base.den ||
2379 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2380 delta_packets*(int64_t)st->time_base.den,
2381 delta_dts*(int64_t)st->time_base.num, 60000);
2383 /* round guessed framerate to a "standard" framerate if it's
2384 * within 1% of the original estimate*/
2385 for (j = 1; j < MAX_STD_TIMEBASES; j++) {
2386 AVRational std_fps = { get_std_framerate(j), 12*1001 };
2387 double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1);
2389 if (error < best_error) {
2391 best_fps = std_fps.num;
2395 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2396 best_fps, 12*1001, INT_MAX);
2399 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2400 if(!st->codec->bits_per_coded_sample)
2401 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2402 // set stream disposition based on audio service type
2403 switch (st->codec->audio_service_type) {
2404 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2405 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
2406 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2407 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
2408 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2409 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
2410 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2411 st->disposition = AV_DISPOSITION_COMMENT; break;
2412 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2413 st->disposition = AV_DISPOSITION_KARAOKE; break;
2418 estimate_timings(ic, old_offset);
2420 compute_chapters_end(ic);
2422 find_stream_info_err:
2423 for (i=0; i < ic->nb_streams; i++) {
2424 if (ic->streams[i]->codec)
2425 ic->streams[i]->codec->thread_count = 0;
2426 av_freep(&ic->streams[i]->info);
2431 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2435 for (i = 0; i < ic->nb_programs; i++)
2436 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2437 if (ic->programs[i]->stream_index[j] == s)
2438 return ic->programs[i];
2442 int av_find_best_stream(AVFormatContext *ic,
2443 enum AVMediaType type,
2444 int wanted_stream_nb,
2446 AVCodec **decoder_ret,
2449 int i, nb_streams = ic->nb_streams;
2450 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2451 unsigned *program = NULL;
2452 AVCodec *decoder = NULL, *best_decoder = NULL;
2454 if (related_stream >= 0 && wanted_stream_nb < 0) {
2455 AVProgram *p = find_program_from_stream(ic, related_stream);
2457 program = p->stream_index;
2458 nb_streams = p->nb_stream_indexes;
2461 for (i = 0; i < nb_streams; i++) {
2462 int real_stream_index = program ? program[i] : i;
2463 AVStream *st = ic->streams[real_stream_index];
2464 AVCodecContext *avctx = st->codec;
2465 if (avctx->codec_type != type)
2467 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2469 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
2472 decoder = avcodec_find_decoder(st->codec->codec_id);
2475 ret = AVERROR_DECODER_NOT_FOUND;
2479 if (best_count >= st->codec_info_nb_frames)
2481 best_count = st->codec_info_nb_frames;
2482 ret = real_stream_index;
2483 best_decoder = decoder;
2484 if (program && i == nb_streams - 1 && ret < 0) {
2486 nb_streams = ic->nb_streams;
2487 i = 0; /* no related stream found, try again with everything */
2491 *decoder_ret = best_decoder;
2495 /*******************************************************/
2497 int av_read_play(AVFormatContext *s)
2499 if (s->iformat->read_play)
2500 return s->iformat->read_play(s);
2502 return avio_pause(s->pb, 0);
2503 return AVERROR(ENOSYS);
2506 int av_read_pause(AVFormatContext *s)
2508 if (s->iformat->read_pause)
2509 return s->iformat->read_pause(s);
2511 return avio_pause(s->pb, 1);
2512 return AVERROR(ENOSYS);
2515 void avformat_free_context(AVFormatContext *s)
2521 if (s->iformat && s->iformat->priv_class && s->priv_data)
2522 av_opt_free(s->priv_data);
2524 for(i=0;i<s->nb_streams;i++) {
2525 /* free all data in a stream component */
2528 av_parser_close(st->parser);
2530 if (st->attached_pic.data)
2531 av_free_packet(&st->attached_pic);
2532 av_dict_free(&st->metadata);
2533 av_freep(&st->probe_data.buf);
2534 av_free(st->index_entries);
2535 av_free(st->codec->extradata);
2536 av_free(st->codec->subtitle_header);
2538 av_free(st->priv_data);
2542 for(i=s->nb_programs-1; i>=0; i--) {
2543 av_dict_free(&s->programs[i]->metadata);
2544 av_freep(&s->programs[i]->stream_index);
2545 av_freep(&s->programs[i]);
2547 av_freep(&s->programs);
2548 av_freep(&s->priv_data);
2549 while(s->nb_chapters--) {
2550 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2551 av_free(s->chapters[s->nb_chapters]);
2553 av_freep(&s->chapters);
2554 av_dict_free(&s->metadata);
2555 av_freep(&s->streams);
2559 void avformat_close_input(AVFormatContext **ps)
2561 AVFormatContext *s = *ps;
2562 AVIOContext *pb = s->pb;
2564 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2565 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2568 flush_packet_queue(s);
2571 if (s->iformat->read_close)
2572 s->iformat->read_close(s);
2575 avformat_free_context(s);
2582 AVStream *avformat_new_stream(AVFormatContext *s, AVCodec *c)
2587 if (av_reallocp_array(&s->streams, s->nb_streams + 1, sizeof(*s->streams)) < 0) {
2592 st = av_mallocz(sizeof(AVStream));
2595 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2600 st->codec = avcodec_alloc_context3(c);
2602 /* no default bitrate if decoding */
2603 st->codec->bit_rate = 0;
2605 st->index = s->nb_streams;
2606 st->start_time = AV_NOPTS_VALUE;
2607 st->duration = AV_NOPTS_VALUE;
2608 /* we set the current DTS to 0 so that formats without any timestamps
2609 but durations get some timestamps, formats with some unknown
2610 timestamps have their first few packets buffered and the
2611 timestamps corrected before they are returned to the user */
2613 st->first_dts = AV_NOPTS_VALUE;
2614 st->probe_packets = MAX_PROBE_PACKETS;
2616 /* default pts setting is MPEG-like */
2617 avpriv_set_pts_info(st, 33, 1, 90000);
2618 st->last_IP_pts = AV_NOPTS_VALUE;
2619 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2620 st->pts_buffer[i]= AV_NOPTS_VALUE;
2621 st->reference_dts = AV_NOPTS_VALUE;
2623 st->sample_aspect_ratio = (AVRational){0,1};
2625 st->info->fps_first_dts = AV_NOPTS_VALUE;
2626 st->info->fps_last_dts = AV_NOPTS_VALUE;
2628 s->streams[s->nb_streams++] = st;
2632 AVProgram *av_new_program(AVFormatContext *ac, int id)
2634 AVProgram *program=NULL;
2637 av_dlog(ac, "new_program: id=0x%04x\n", id);
2639 for(i=0; i<ac->nb_programs; i++)
2640 if(ac->programs[i]->id == id)
2641 program = ac->programs[i];
2644 program = av_mallocz(sizeof(AVProgram));
2647 dynarray_add(&ac->programs, &ac->nb_programs, program);
2648 program->discard = AVDISCARD_NONE;
2655 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2657 AVChapter *chapter = NULL;
2660 for(i=0; i<s->nb_chapters; i++)
2661 if(s->chapters[i]->id == id)
2662 chapter = s->chapters[i];
2665 chapter= av_mallocz(sizeof(AVChapter));
2668 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2670 av_dict_set(&chapter->metadata, "title", title, 0);
2672 chapter->time_base= time_base;
2673 chapter->start = start;
2679 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
2682 AVProgram *program=NULL;
2684 if (idx >= ac->nb_streams) {
2685 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2689 for(i=0; i<ac->nb_programs; i++){
2690 if(ac->programs[i]->id != progid)
2692 program = ac->programs[i];
2693 for(j=0; j<program->nb_stream_indexes; j++)
2694 if(program->stream_index[j] == idx)
2697 if (av_reallocp_array(&program->stream_index,
2698 program->nb_stream_indexes + 1,
2699 sizeof(*program->stream_index)) < 0) {
2700 program->nb_stream_indexes = 0;
2703 program->stream_index[program->nb_stream_indexes++] = idx;
2708 static void print_fps(double d, const char *postfix){
2709 uint64_t v= lrintf(d*100);
2710 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
2711 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
2712 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
2715 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
2717 if(m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))){
2718 AVDictionaryEntry *tag=NULL;
2720 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
2721 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
2722 if(strcmp("language", tag->key))
2723 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
2728 /* "user interface" functions */
2729 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
2732 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
2733 AVStream *st = ic->streams[i];
2734 int g = av_gcd(st->time_base.num, st->time_base.den);
2735 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
2736 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2737 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2738 /* the pid is an important information, so we display it */
2739 /* XXX: add a generic system */
2740 if (flags & AVFMT_SHOW_IDS)
2741 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2743 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
2744 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
2745 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2746 if (st->sample_aspect_ratio.num && // default
2747 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
2748 AVRational display_aspect_ratio;
2749 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
2750 st->codec->width*st->sample_aspect_ratio.num,
2751 st->codec->height*st->sample_aspect_ratio.den,
2753 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
2754 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
2755 display_aspect_ratio.num, display_aspect_ratio.den);
2757 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
2758 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
2759 print_fps(av_q2d(st->avg_frame_rate), "fps");
2760 if(st->time_base.den && st->time_base.num)
2761 print_fps(1/av_q2d(st->time_base), "tbn");
2762 if(st->codec->time_base.den && st->codec->time_base.num)
2763 print_fps(1/av_q2d(st->codec->time_base), "tbc");
2765 if (st->disposition & AV_DISPOSITION_DEFAULT)
2766 av_log(NULL, AV_LOG_INFO, " (default)");
2767 if (st->disposition & AV_DISPOSITION_DUB)
2768 av_log(NULL, AV_LOG_INFO, " (dub)");
2769 if (st->disposition & AV_DISPOSITION_ORIGINAL)
2770 av_log(NULL, AV_LOG_INFO, " (original)");
2771 if (st->disposition & AV_DISPOSITION_COMMENT)
2772 av_log(NULL, AV_LOG_INFO, " (comment)");
2773 if (st->disposition & AV_DISPOSITION_LYRICS)
2774 av_log(NULL, AV_LOG_INFO, " (lyrics)");
2775 if (st->disposition & AV_DISPOSITION_KARAOKE)
2776 av_log(NULL, AV_LOG_INFO, " (karaoke)");
2777 if (st->disposition & AV_DISPOSITION_FORCED)
2778 av_log(NULL, AV_LOG_INFO, " (forced)");
2779 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
2780 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
2781 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
2782 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
2783 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
2784 av_log(NULL, AV_LOG_INFO, " (clean effects)");
2785 av_log(NULL, AV_LOG_INFO, "\n");
2786 dump_metadata(NULL, st->metadata, " ");
2789 void av_dump_format(AVFormatContext *ic,
2795 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
2796 if (ic->nb_streams && !printed)
2799 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2800 is_output ? "Output" : "Input",
2802 is_output ? ic->oformat->name : ic->iformat->name,
2803 is_output ? "to" : "from", url);
2804 dump_metadata(NULL, ic->metadata, " ");
2806 av_log(NULL, AV_LOG_INFO, " Duration: ");
2807 if (ic->duration != AV_NOPTS_VALUE) {
2808 int hours, mins, secs, us;
2809 secs = ic->duration / AV_TIME_BASE;
2810 us = ic->duration % AV_TIME_BASE;
2815 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
2816 (100 * us) / AV_TIME_BASE);
2818 av_log(NULL, AV_LOG_INFO, "N/A");
2820 if (ic->start_time != AV_NOPTS_VALUE) {
2822 av_log(NULL, AV_LOG_INFO, ", start: ");
2823 secs = ic->start_time / AV_TIME_BASE;
2824 us = abs(ic->start_time % AV_TIME_BASE);
2825 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2826 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2828 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2830 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2832 av_log(NULL, AV_LOG_INFO, "N/A");
2834 av_log(NULL, AV_LOG_INFO, "\n");
2836 for (i = 0; i < ic->nb_chapters; i++) {
2837 AVChapter *ch = ic->chapters[i];
2838 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
2839 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
2840 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
2842 dump_metadata(NULL, ch->metadata, " ");
2844 if(ic->nb_programs) {
2845 int j, k, total = 0;
2846 for(j=0; j<ic->nb_programs; j++) {
2847 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
2849 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
2850 name ? name->value : "");
2851 dump_metadata(NULL, ic->programs[j]->metadata, " ");
2852 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
2853 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
2854 printed[ic->programs[j]->stream_index[k]] = 1;
2856 total += ic->programs[j]->nb_stream_indexes;
2858 if (total < ic->nb_streams)
2859 av_log(NULL, AV_LOG_INFO, " No Program\n");
2861 for(i=0;i<ic->nb_streams;i++)
2863 dump_stream_format(ic, i, index, is_output);
2868 uint64_t ff_ntp_time(void)
2870 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2873 int av_get_frame_filename(char *buf, int buf_size,
2874 const char *path, int number)
2877 char *q, buf1[20], c;
2878 int nd, len, percentd_found;
2890 while (av_isdigit(*p)) {
2891 nd = nd * 10 + *p++ - '0';
2894 } while (av_isdigit(c));
2903 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2905 if ((q - buf + len) > buf_size - 1)
2907 memcpy(q, buf1, len);
2915 if ((q - buf) < buf_size - 1)
2919 if (!percentd_found)
2928 static void hex_dump_internal(void *avcl, FILE *f, int level,
2929 const uint8_t *buf, int size)
2932 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2934 for(i=0;i<size;i+=16) {
2941 PRINT(" %02x", buf[i+j]);
2946 for(j=0;j<len;j++) {
2948 if (c < ' ' || c > '~')
2957 void av_hex_dump(FILE *f, const uint8_t *buf, int size)
2959 hex_dump_internal(NULL, f, 0, buf, size);
2962 void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
2964 hex_dump_internal(avcl, NULL, level, buf, size);
2967 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
2969 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
2970 PRINT("stream #%d:\n", pkt->stream_index);
2971 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
2972 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
2973 /* DTS is _always_ valid after av_read_frame() */
2975 if (pkt->dts == AV_NOPTS_VALUE)
2978 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
2979 /* PTS may not be known if B-frames are present. */
2981 if (pkt->pts == AV_NOPTS_VALUE)
2984 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
2986 PRINT(" size=%d\n", pkt->size);
2989 av_hex_dump(f, pkt->data, pkt->size);
2992 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
2994 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
2997 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3000 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3003 void av_url_split(char *proto, int proto_size,
3004 char *authorization, int authorization_size,
3005 char *hostname, int hostname_size,
3007 char *path, int path_size,
3010 const char *p, *ls, *at, *col, *brk;
3012 if (port_ptr) *port_ptr = -1;
3013 if (proto_size > 0) proto[0] = 0;
3014 if (authorization_size > 0) authorization[0] = 0;
3015 if (hostname_size > 0) hostname[0] = 0;
3016 if (path_size > 0) path[0] = 0;
3018 /* parse protocol */
3019 if ((p = strchr(url, ':'))) {
3020 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3025 /* no protocol means plain filename */
3026 av_strlcpy(path, url, path_size);
3030 /* separate path from hostname */
3031 ls = strchr(p, '/');
3033 ls = strchr(p, '?');
3035 av_strlcpy(path, ls, path_size);
3037 ls = &p[strlen(p)]; // XXX
3039 /* the rest is hostname, use that to parse auth/port */
3041 /* authorization (user[:pass]@hostname) */
3042 if ((at = strchr(p, '@')) && at < ls) {
3043 av_strlcpy(authorization, p,
3044 FFMIN(authorization_size, at + 1 - p));
3045 p = at + 1; /* skip '@' */
3048 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3050 av_strlcpy(hostname, p + 1,
3051 FFMIN(hostname_size, brk - p));
3052 if (brk[1] == ':' && port_ptr)
3053 *port_ptr = atoi(brk + 2);
3054 } else if ((col = strchr(p, ':')) && col < ls) {
3055 av_strlcpy(hostname, p,
3056 FFMIN(col + 1 - p, hostname_size));
3057 if (port_ptr) *port_ptr = atoi(col + 1);
3059 av_strlcpy(hostname, p,
3060 FFMIN(ls + 1 - p, hostname_size));
3064 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3067 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3070 'C', 'D', 'E', 'F' };
3071 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3074 'c', 'd', 'e', 'f' };
3075 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3077 for(i = 0; i < s; i++) {
3078 buff[i * 2] = hex_table[src[i] >> 4];
3079 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3085 int ff_hex_to_data(uint8_t *data, const char *p)
3092 p += strspn(p, SPACE_CHARS);
3095 c = av_toupper((unsigned char) *p++);
3096 if (c >= '0' && c <= '9')
3098 else if (c >= 'A' && c <= 'F')
3113 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3114 unsigned int pts_num, unsigned int pts_den)
3117 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
3118 if(new_tb.num != pts_num)
3119 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
3121 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3123 if(new_tb.num <= 0 || new_tb.den <= 0) {
3124 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase for st:%d\n", s->index);
3127 s->time_base = new_tb;
3128 s->pts_wrap_bits = pts_wrap_bits;
3131 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3134 const char *ptr = str;
3136 /* Parse key=value pairs. */
3139 char *dest = NULL, *dest_end;
3140 int key_len, dest_len = 0;
3142 /* Skip whitespace and potential commas. */
3143 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3150 if (!(ptr = strchr(key, '=')))
3153 key_len = ptr - key;
3155 callback_get_buf(context, key, key_len, &dest, &dest_len);
3156 dest_end = dest + dest_len - 1;
3160 while (*ptr && *ptr != '\"') {
3164 if (dest && dest < dest_end)
3168 if (dest && dest < dest_end)
3176 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3177 if (dest && dest < dest_end)
3185 int ff_find_stream_index(AVFormatContext *s, int id)
3188 for (i = 0; i < s->nb_streams; i++) {
3189 if (s->streams[i]->id == id)
3195 int64_t ff_iso8601_to_unix_time(const char *datestr)
3198 struct tm time1 = {0}, time2 = {0};
3200 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
3201 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
3203 return av_timegm(&time2);
3205 return av_timegm(&time1);
3207 av_log(NULL, AV_LOG_WARNING, "strptime() unavailable on this system, cannot convert "
3208 "the date string.\n");
3213 int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance)
3216 if (ofmt->query_codec)
3217 return ofmt->query_codec(codec_id, std_compliance);
3218 else if (ofmt->codec_tag)
3219 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
3220 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
3221 codec_id == ofmt->subtitle_codec)
3224 return AVERROR_PATCHWELCOME;
3227 int avformat_network_init(void)
3231 ff_network_inited_globally = 1;
3232 if ((ret = ff_network_init()) < 0)
3239 int avformat_network_deinit(void)
3248 int ff_add_param_change(AVPacket *pkt, int32_t channels,
3249 uint64_t channel_layout, int32_t sample_rate,
3250 int32_t width, int32_t height)
3256 return AVERROR(EINVAL);
3259 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
3261 if (channel_layout) {
3263 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
3267 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
3269 if (width || height) {
3271 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
3273 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
3275 return AVERROR(ENOMEM);
3276 bytestream_put_le32(&data, flags);
3278 bytestream_put_le32(&data, channels);
3280 bytestream_put_le64(&data, channel_layout);
3282 bytestream_put_le32(&data, sample_rate);
3283 if (width || height) {
3284 bytestream_put_le32(&data, width);
3285 bytestream_put_le32(&data, height);