2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
42 #include "audiointerleave.h"
55 * various utility functions for use within Libav
58 unsigned avformat_version(void)
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return LIBAV_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
87 int prev_size = pkt->size;
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 read_size = FFMIN(size, chunk_size);
98 ret = av_grow_packet(pkt, read_size);
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
113 av_packet_unref(pkt);
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
122 pkt->pos = avio_tell(s);
124 return append_packet_chunked(s, pkt, size);
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
134 int av_filename_number_test(const char *filename)
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
144 static const struct {
147 enum AVMediaType type;
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codec->codec_id = fmt_id_type[i].id;
171 st->codec->codec_type = fmt_id_type[i].type;
179 /************************************************************/
180 /* input media file */
182 /* Open input file and probe the format if necessary. */
183 static int init_input(AVFormatContext *s, const char *filename,
184 AVDictionary **options)
187 AVProbeData pd = { filename, NULL, 0 };
190 s->flags |= AVFMT_FLAG_CUSTOM_IO;
192 return av_probe_input_buffer(s->pb, &s->iformat, filename,
194 else if (s->iformat->flags & AVFMT_NOFILE)
195 return AVERROR(EINVAL);
199 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
200 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
203 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
204 &s->interrupt_callback, options)) < 0)
208 return av_probe_input_buffer(s->pb, &s->iformat, filename,
212 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
213 AVPacketList **plast_pktl)
215 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
220 (*plast_pktl)->next = pktl;
222 *packet_buffer = pktl;
224 /* Add the packet in the buffered packet list. */
230 static int queue_attached_pictures(AVFormatContext *s)
233 for (i = 0; i < s->nb_streams; i++)
234 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
235 s->streams[i]->discard < AVDISCARD_ALL) {
236 AVPacket copy = s->streams[i]->attached_pic;
237 copy.buf = av_buffer_ref(copy.buf);
239 return AVERROR(ENOMEM);
241 add_to_pktbuf(&s->internal->raw_packet_buffer, ©,
242 &s->internal->raw_packet_buffer_end);
247 int avformat_open_input(AVFormatContext **ps, const char *filename,
248 AVInputFormat *fmt, AVDictionary **options)
250 AVFormatContext *s = *ps;
252 AVDictionary *tmp = NULL;
253 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
255 if (!s && !(s = avformat_alloc_context()))
256 return AVERROR(ENOMEM);
261 av_dict_copy(&tmp, *options, 0);
263 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
266 if ((ret = init_input(s, filename, &tmp)) < 0)
269 /* Check filename in case an image number is expected. */
270 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
271 if (!av_filename_number_test(filename)) {
272 ret = AVERROR(EINVAL);
277 s->duration = s->start_time = AV_NOPTS_VALUE;
278 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
280 /* Allocate private data. */
281 if (s->iformat->priv_data_size > 0) {
282 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
283 ret = AVERROR(ENOMEM);
286 if (s->iformat->priv_class) {
287 *(const AVClass **) s->priv_data = s->iformat->priv_class;
288 av_opt_set_defaults(s->priv_data);
289 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
294 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
296 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
298 if (s->iformat->read_header)
299 if ((ret = s->iformat->read_header(s)) < 0)
302 if (id3v2_extra_meta &&
303 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
305 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
307 if ((ret = queue_attached_pictures(s)) < 0)
310 if (s->pb && !s->internal->data_offset)
311 s->internal->data_offset = avio_tell(s->pb);
313 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
316 av_dict_free(options);
323 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
325 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
327 avformat_free_context(s);
332 /*******************************************************/
334 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
336 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
337 AVProbeData *pd = &st->probe_data;
338 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
343 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
344 AVPROBE_PADDING_SIZE)) < 0)
346 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
347 pd->buf_size += pkt->size;
348 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
350 st->probe_packets = 0;
352 av_log(s, AV_LOG_ERROR,
353 "nothing to probe for stream %d\n", st->index);
358 if (!st->probe_packets ||
359 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
360 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
361 ? AVPROBE_SCORE_MAX / 4 : 0);
362 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
365 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
372 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
378 AVPacketList *pktl = s->internal->raw_packet_buffer;
382 st = s->streams[pkt->stream_index];
383 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
384 !st->probe_packets ||
385 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
387 if (st->probe_packets)
388 if ((err = probe_codec(s, st, NULL)) < 0)
390 pd = &st->probe_data;
393 s->internal->raw_packet_buffer = pktl->next;
394 s->internal->raw_packet_buffer_remaining_size += pkt->size;
403 ret = s->iformat->read_packet(s, pkt);
405 if (!pktl || ret == AVERROR(EAGAIN))
407 for (i = 0; i < s->nb_streams; i++) {
409 if (st->probe_packets)
410 if ((err = probe_codec(s, st, NULL)) < 0)
417 AVPacket tmp = { 0 };
418 ret = av_packet_ref(&tmp, pkt);
424 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
425 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
426 av_log(s, AV_LOG_WARNING,
427 "Dropped corrupted packet (stream = %d)\n",
429 av_packet_unref(pkt);
433 st = s->streams[pkt->stream_index];
435 switch (st->codec->codec_type) {
436 case AVMEDIA_TYPE_VIDEO:
437 if (s->video_codec_id)
438 st->codec->codec_id = s->video_codec_id;
440 case AVMEDIA_TYPE_AUDIO:
441 if (s->audio_codec_id)
442 st->codec->codec_id = s->audio_codec_id;
444 case AVMEDIA_TYPE_SUBTITLE:
445 if (s->subtitle_codec_id)
446 st->codec->codec_id = s->subtitle_codec_id;
450 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
454 add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
455 &s->internal->raw_packet_buffer_end);
456 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
458 if ((err = probe_codec(s, st, pkt)) < 0)
463 /**********************************************************/
466 * Return the frame duration in seconds. Return 0 if not available.
468 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
469 AVCodecParserContext *pc, AVPacket *pkt)
471 AVRational codec_framerate = s->iformat ? st->codec->framerate :
472 av_inv_q(st->codec->time_base);
477 switch (st->codec->codec_type) {
478 case AVMEDIA_TYPE_VIDEO:
479 if (st->avg_frame_rate.num) {
480 *pnum = st->avg_frame_rate.den;
481 *pden = st->avg_frame_rate.num;
482 } else if (st->time_base.num * 1000LL > st->time_base.den) {
483 *pnum = st->time_base.num;
484 *pden = st->time_base.den;
485 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
486 *pnum = codec_framerate.den;
487 *pden = codec_framerate.num;
488 if (pc && pc->repeat_pict) {
489 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
490 *pden /= 1 + pc->repeat_pict;
492 *pnum *= 1 + pc->repeat_pict;
494 /* If this codec can be interlaced or progressive then we need
495 * a parser to compute duration of a packet. Thus if we have
496 * no parser in such case leave duration undefined. */
497 if (st->codec->ticks_per_frame > 1 && !pc)
501 case AVMEDIA_TYPE_AUDIO:
502 frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
503 if (frame_size <= 0 || st->codec->sample_rate <= 0)
506 *pden = st->codec->sample_rate;
513 static int is_intra_only(enum AVCodecID id)
515 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
518 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
523 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
524 int64_t dts, int64_t pts)
526 AVStream *st = s->streams[stream_index];
527 AVPacketList *pktl = s->internal->packet_buffer;
529 if (st->first_dts != AV_NOPTS_VALUE ||
530 dts == AV_NOPTS_VALUE ||
531 st->cur_dts == AV_NOPTS_VALUE)
534 st->first_dts = dts - st->cur_dts;
537 for (; pktl; pktl = pktl->next) {
538 if (pktl->pkt.stream_index != stream_index)
540 // FIXME: think more about this check
541 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
542 pktl->pkt.pts += st->first_dts;
544 if (pktl->pkt.dts != AV_NOPTS_VALUE)
545 pktl->pkt.dts += st->first_dts;
547 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
548 st->start_time = pktl->pkt.pts;
550 if (st->start_time == AV_NOPTS_VALUE)
551 st->start_time = pts;
554 static void update_initial_durations(AVFormatContext *s, AVStream *st,
555 int stream_index, int duration)
557 AVPacketList *pktl = s->internal->packet_buffer;
560 if (st->first_dts != AV_NOPTS_VALUE) {
561 cur_dts = st->first_dts;
562 for (; pktl; pktl = pktl->next) {
563 if (pktl->pkt.stream_index == stream_index) {
564 if (pktl->pkt.pts != pktl->pkt.dts ||
565 pktl->pkt.dts != AV_NOPTS_VALUE ||
571 pktl = s->internal->packet_buffer;
572 st->first_dts = cur_dts;
573 } else if (st->cur_dts)
576 for (; pktl; pktl = pktl->next) {
577 if (pktl->pkt.stream_index != stream_index)
579 if (pktl->pkt.pts == pktl->pkt.dts &&
580 pktl->pkt.dts == AV_NOPTS_VALUE &&
581 !pktl->pkt.duration) {
582 pktl->pkt.dts = cur_dts;
583 if (!st->codec->has_b_frames)
584 pktl->pkt.pts = cur_dts;
586 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
587 pktl->pkt.duration = duration;
591 if (st->first_dts == AV_NOPTS_VALUE)
592 st->cur_dts = cur_dts;
595 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
596 AVCodecParserContext *pc, AVPacket *pkt)
598 int num, den, presentation_delayed, delay, i;
601 if (s->flags & AVFMT_FLAG_NOFILLIN)
604 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
605 pkt->dts = AV_NOPTS_VALUE;
607 /* do we have a video B-frame ? */
608 delay = st->codec->has_b_frames;
609 presentation_delayed = 0;
611 /* XXX: need has_b_frame, but cannot get it if the codec is
614 pc && pc->pict_type != AV_PICTURE_TYPE_B)
615 presentation_delayed = 1;
617 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
618 st->pts_wrap_bits < 63 &&
619 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
620 pkt->dts -= 1LL << st->pts_wrap_bits;
623 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
624 * We take the conservative approach and discard both.
625 * Note: If this is misbehaving for an H.264 file, then possibly
626 * presentation_delayed is not set correctly. */
627 if (delay == 1 && pkt->dts == pkt->pts &&
628 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
629 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
630 pkt->dts = AV_NOPTS_VALUE;
633 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
634 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
636 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
637 den * (int64_t) st->time_base.num,
640 if (pkt->duration != 0 && s->internal->packet_buffer)
641 update_initial_durations(s, st, pkt->stream_index,
646 /* Correct timestamps with byte offset if demuxers only have timestamps
647 * on packet boundaries */
648 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
649 /* this will estimate bitrate based on this frame's duration and size */
650 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
651 if (pkt->pts != AV_NOPTS_VALUE)
653 if (pkt->dts != AV_NOPTS_VALUE)
657 /* This may be redundant, but it should not hurt. */
658 if (pkt->dts != AV_NOPTS_VALUE &&
659 pkt->pts != AV_NOPTS_VALUE &&
661 presentation_delayed = 1;
663 av_log(NULL, AV_LOG_TRACE,
664 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
665 "cur_dts:%"PRId64" st:%d pc:%p\n",
666 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
667 pkt->stream_index, pc);
668 /* Interpolate PTS and DTS if they are not present. We skip H.264
669 * currently because delay and has_b_frames are not reliably set. */
670 if ((delay == 0 || (delay == 1 && pc)) &&
671 st->codec->codec_id != AV_CODEC_ID_H264) {
672 if (presentation_delayed) {
673 /* DTS = decompression timestamp */
674 /* PTS = presentation timestamp */
675 if (pkt->dts == AV_NOPTS_VALUE)
676 pkt->dts = st->last_IP_pts;
677 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
678 if (pkt->dts == AV_NOPTS_VALUE)
679 pkt->dts = st->cur_dts;
681 /* This is tricky: the dts must be incremented by the duration
682 * of the frame we are displaying, i.e. the last I- or P-frame. */
683 if (st->last_IP_duration == 0)
684 st->last_IP_duration = pkt->duration;
685 if (pkt->dts != AV_NOPTS_VALUE)
686 st->cur_dts = pkt->dts + st->last_IP_duration;
687 st->last_IP_duration = pkt->duration;
688 st->last_IP_pts = pkt->pts;
689 /* Cannot compute PTS if not present (we can compute it only
690 * by knowing the future. */
691 } else if (pkt->pts != AV_NOPTS_VALUE ||
692 pkt->dts != AV_NOPTS_VALUE ||
694 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
695 int duration = pkt->duration;
696 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
697 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
699 duration = av_rescale_rnd(1,
700 num * (int64_t) st->time_base.den,
701 den * (int64_t) st->time_base.num,
703 if (duration != 0 && s->internal->packet_buffer)
704 update_initial_durations(s, st, pkt->stream_index,
709 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
711 /* presentation is not delayed : PTS and DTS are the same */
712 if (pkt->pts == AV_NOPTS_VALUE)
714 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
716 if (pkt->pts == AV_NOPTS_VALUE)
717 pkt->pts = st->cur_dts;
719 if (pkt->pts != AV_NOPTS_VALUE)
720 st->cur_dts = pkt->pts + duration;
725 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
726 st->pts_buffer[0] = pkt->pts;
727 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
728 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
729 if (pkt->dts == AV_NOPTS_VALUE)
730 pkt->dts = st->pts_buffer[0];
731 // We skipped it above so we try here.
732 if (st->codec->codec_id == AV_CODEC_ID_H264)
733 // This should happen on the first packet
734 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
735 if (pkt->dts > st->cur_dts)
736 st->cur_dts = pkt->dts;
739 av_log(NULL, AV_LOG_TRACE,
740 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
741 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
744 if (is_intra_only(st->codec->codec_id))
745 pkt->flags |= AV_PKT_FLAG_KEY;
746 #if FF_API_CONVERGENCE_DURATION
747 FF_DISABLE_DEPRECATION_WARNINGS
749 pkt->convergence_duration = pc->convergence_duration;
750 FF_ENABLE_DEPRECATION_WARNINGS
754 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
757 AVPacketList *pktl = *pkt_buf;
758 *pkt_buf = pktl->next;
759 av_packet_unref(&pktl->pkt);
766 * Parse a packet, add all split parts to parse_queue.
768 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
770 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
772 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
773 AVStream *st = s->streams[stream_index];
774 uint8_t *data = pkt ? pkt->data : NULL;
775 int size = pkt ? pkt->size : 0;
776 int ret = 0, got_output = 0;
779 av_init_packet(&flush_pkt);
784 while (size > 0 || (pkt == &flush_pkt && got_output)) {
787 av_init_packet(&out_pkt);
788 len = av_parser_parse2(st->parser, st->codec,
789 &out_pkt.data, &out_pkt.size, data, size,
790 pkt->pts, pkt->dts, pkt->pos);
792 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
793 /* increment read pointer */
797 got_output = !!out_pkt.size;
802 if (pkt->side_data) {
803 out_pkt.side_data = pkt->side_data;
804 out_pkt.side_data_elems = pkt->side_data_elems;
805 pkt->side_data = NULL;
806 pkt->side_data_elems = 0;
809 /* set the duration */
810 out_pkt.duration = 0;
811 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
812 if (st->codec->sample_rate > 0) {
814 av_rescale_q_rnd(st->parser->duration,
815 (AVRational) { 1, st->codec->sample_rate },
821 out_pkt.stream_index = st->index;
822 out_pkt.pts = st->parser->pts;
823 out_pkt.dts = st->parser->dts;
824 out_pkt.pos = st->parser->pos;
826 if (st->parser->key_frame == 1 ||
827 (st->parser->key_frame == -1 &&
828 st->parser->pict_type == AV_PICTURE_TYPE_I))
829 out_pkt.flags |= AV_PKT_FLAG_KEY;
831 compute_pkt_fields(s, st, st->parser, &out_pkt);
833 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
834 out_pkt.flags & AV_PKT_FLAG_KEY) {
835 ff_reduce_index(s, st->index);
836 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
837 0, 0, AVINDEX_KEYFRAME);
840 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
841 out_pkt.buf = pkt->buf;
844 if ((ret = av_dup_packet(&out_pkt)) < 0)
847 if (!add_to_pktbuf(&s->internal->parse_queue, &out_pkt, &s->internal->parse_queue_end)) {
848 av_packet_unref(&out_pkt);
849 ret = AVERROR(ENOMEM);
854 /* end of the stream => close and free the parser */
855 if (pkt == &flush_pkt) {
856 av_parser_close(st->parser);
861 av_packet_unref(pkt);
865 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
866 AVPacketList **pkt_buffer_end,
870 av_assert0(*pkt_buffer);
873 *pkt_buffer = pktl->next;
875 *pkt_buffer_end = NULL;
880 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
882 int ret = 0, i, got_packet = 0;
883 AVDictionary *metadata = NULL;
887 while (!got_packet && !s->internal->parse_queue) {
891 /* read next packet */
892 ret = ff_read_packet(s, &cur_pkt);
894 if (ret == AVERROR(EAGAIN))
896 /* flush the parsers */
897 for (i = 0; i < s->nb_streams; i++) {
899 if (st->parser && st->need_parsing)
900 parse_packet(s, NULL, st->index);
902 /* all remaining packets are now in parse_queue =>
903 * really terminate parsing */
907 st = s->streams[cur_pkt.stream_index];
909 if (cur_pkt.pts != AV_NOPTS_VALUE &&
910 cur_pkt.dts != AV_NOPTS_VALUE &&
911 cur_pkt.pts < cur_pkt.dts) {
912 av_log(s, AV_LOG_WARNING,
913 "Invalid timestamps stream=%d, pts=%"PRId64", "
914 "dts=%"PRId64", size=%d\n",
915 cur_pkt.stream_index, cur_pkt.pts,
916 cur_pkt.dts, cur_pkt.size);
918 if (s->debug & FF_FDEBUG_TS)
919 av_log(s, AV_LOG_DEBUG,
920 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
921 "size=%d, duration=%"PRId64", flags=%d\n",
922 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
923 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
925 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
926 st->parser = av_parser_init(st->codec->codec_id);
928 /* no parser available: just output the raw packets */
929 st->need_parsing = AVSTREAM_PARSE_NONE;
930 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
931 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
932 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
933 st->parser->flags |= PARSER_FLAG_ONCE;
936 if (!st->need_parsing || !st->parser) {
937 /* no parsing needed: we just output the packet as is */
939 compute_pkt_fields(s, st, NULL, pkt);
940 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
941 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
942 ff_reduce_index(s, st->index);
943 av_add_index_entry(st, pkt->pos, pkt->dts,
944 0, 0, AVINDEX_KEYFRAME);
947 } else if (st->discard < AVDISCARD_ALL) {
948 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
952 av_packet_unref(&cur_pkt);
956 if (!got_packet && s->internal->parse_queue)
957 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
959 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
961 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
962 av_dict_copy(&s->metadata, metadata, 0);
963 av_dict_free(&metadata);
964 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
967 if (s->debug & FF_FDEBUG_TS)
968 av_log(s, AV_LOG_DEBUG,
969 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
970 "size=%d, duration=%"PRId64", flags=%d\n",
971 pkt->stream_index, pkt->pts, pkt->dts,
972 pkt->size, pkt->duration, pkt->flags);
977 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
979 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
983 return s->internal->packet_buffer
984 ? read_from_packet_buffer(&s->internal->packet_buffer,
985 &s->internal->packet_buffer_end, pkt)
986 : read_frame_internal(s, pkt);
990 AVPacketList *pktl = s->internal->packet_buffer;
993 AVPacket *next_pkt = &pktl->pkt;
995 if (next_pkt->dts != AV_NOPTS_VALUE) {
996 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
997 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
998 if (pktl->pkt.stream_index == next_pkt->stream_index &&
999 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1000 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1002 next_pkt->pts = pktl->pkt.dts;
1006 pktl = s->internal->packet_buffer;
1009 /* read packet from packet buffer, if there is data */
1010 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1011 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1012 return read_from_packet_buffer(&s->internal->packet_buffer,
1013 &s->internal->packet_buffer_end, pkt);
1016 ret = read_frame_internal(s, pkt);
1018 if (pktl && ret != AVERROR(EAGAIN)) {
1025 if (av_dup_packet(add_to_pktbuf(&s->internal->packet_buffer, pkt,
1026 &s->internal->packet_buffer_end)) < 0)
1027 return AVERROR(ENOMEM);
1031 /* XXX: suppress the packet queue */
1032 static void flush_packet_queue(AVFormatContext *s)
1034 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1035 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1036 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1038 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1041 /*******************************************************/
1044 int av_find_default_stream_index(AVFormatContext *s)
1046 int first_audio_index = -1;
1050 if (s->nb_streams <= 0)
1052 for (i = 0; i < s->nb_streams; i++) {
1054 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1055 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1058 if (first_audio_index < 0 &&
1059 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1060 first_audio_index = i;
1062 return first_audio_index >= 0 ? first_audio_index : 0;
1065 /** Flush the frame reader. */
1066 void ff_read_frame_flush(AVFormatContext *s)
1071 flush_packet_queue(s);
1073 /* Reset read state for each stream. */
1074 for (i = 0; i < s->nb_streams; i++) {
1078 av_parser_close(st->parser);
1081 st->last_IP_pts = AV_NOPTS_VALUE;
1082 /* We set the current DTS to an unspecified origin. */
1083 st->cur_dts = AV_NOPTS_VALUE;
1085 st->probe_packets = MAX_PROBE_PACKETS;
1087 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1088 st->pts_buffer[j] = AV_NOPTS_VALUE;
1092 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1096 for (i = 0; i < s->nb_streams; i++) {
1097 AVStream *st = s->streams[i];
1100 av_rescale(timestamp,
1101 st->time_base.den * (int64_t) ref_st->time_base.num,
1102 st->time_base.num * (int64_t) ref_st->time_base.den);
1106 void ff_reduce_index(AVFormatContext *s, int stream_index)
1108 AVStream *st = s->streams[stream_index];
1109 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1111 if ((unsigned) st->nb_index_entries >= max_entries) {
1113 for (i = 0; 2 * i < st->nb_index_entries; i++)
1114 st->index_entries[i] = st->index_entries[2 * i];
1115 st->nb_index_entries = i;
1119 int ff_add_index_entry(AVIndexEntry **index_entries,
1120 int *nb_index_entries,
1121 unsigned int *index_entries_allocated_size,
1122 int64_t pos, int64_t timestamp,
1123 int size, int distance, int flags)
1125 AVIndexEntry *entries, *ie;
1128 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1131 entries = av_fast_realloc(*index_entries,
1132 index_entries_allocated_size,
1133 (*nb_index_entries + 1) *
1134 sizeof(AVIndexEntry));
1138 *index_entries = entries;
1140 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1141 timestamp, AVSEEK_FLAG_ANY);
1144 index = (*nb_index_entries)++;
1145 ie = &entries[index];
1146 assert(index == 0 || ie[-1].timestamp < timestamp);
1148 ie = &entries[index];
1149 if (ie->timestamp != timestamp) {
1150 if (ie->timestamp <= timestamp)
1152 memmove(entries + index + 1, entries + index,
1153 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1154 (*nb_index_entries)++;
1155 } else if (ie->pos == pos && distance < ie->min_distance)
1156 // do not reduce the distance
1157 distance = ie->min_distance;
1161 ie->timestamp = timestamp;
1162 ie->min_distance = distance;
1169 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1170 int size, int distance, int flags)
1172 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1173 &st->index_entries_allocated_size, pos,
1174 timestamp, size, distance, flags);
1177 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1178 int64_t wanted_timestamp, int flags)
1186 // Optimize appending index entries at the end.
1187 if (b && entries[b - 1].timestamp < wanted_timestamp)
1192 timestamp = entries[m].timestamp;
1193 if (timestamp >= wanted_timestamp)
1195 if (timestamp <= wanted_timestamp)
1198 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1200 if (!(flags & AVSEEK_FLAG_ANY))
1201 while (m >= 0 && m < nb_entries &&
1202 !(entries[m].flags & AVINDEX_KEYFRAME))
1203 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1205 if (m == nb_entries)
1210 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1212 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1213 wanted_timestamp, flags);
1216 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1217 int64_t target_ts, int flags)
1219 AVInputFormat *avif = s->iformat;
1220 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1221 int64_t ts_min, ts_max, ts;
1226 if (stream_index < 0)
1229 av_log(s, AV_LOG_TRACE, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1232 ts_min = AV_NOPTS_VALUE;
1233 pos_limit = -1; // GCC falsely says it may be uninitialized.
1235 st = s->streams[stream_index];
1236 if (st->index_entries) {
1239 /* FIXME: Whole function must be checked for non-keyframe entries in
1240 * index case, especially read_timestamp(). */
1241 index = av_index_search_timestamp(st, target_ts,
1242 flags | AVSEEK_FLAG_BACKWARD);
1243 index = FFMAX(index, 0);
1244 e = &st->index_entries[index];
1246 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1248 ts_min = e->timestamp;
1249 av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1255 index = av_index_search_timestamp(st, target_ts,
1256 flags & ~AVSEEK_FLAG_BACKWARD);
1257 assert(index < st->nb_index_entries);
1259 e = &st->index_entries[index];
1260 assert(e->timestamp >= target_ts);
1262 ts_max = e->timestamp;
1263 pos_limit = pos_max - e->min_distance;
1264 av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1265 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1269 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1270 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1275 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1278 ff_update_cur_dts(s, st, ts);
1283 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1284 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1285 int64_t ts_min, int64_t ts_max,
1286 int flags, int64_t *ts_ret,
1287 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1288 int64_t *, int64_t))
1291 int64_t start_pos, filesize;
1294 av_log(s, AV_LOG_TRACE, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1296 if (ts_min == AV_NOPTS_VALUE) {
1297 pos_min = s->internal->data_offset;
1298 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1299 if (ts_min == AV_NOPTS_VALUE)
1303 if (ts_max == AV_NOPTS_VALUE) {
1305 filesize = avio_size(s->pb);
1306 pos_max = filesize - 1;
1309 ts_max = read_timestamp(s, stream_index, &pos_max,
1312 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1313 if (ts_max == AV_NOPTS_VALUE)
1317 int64_t tmp_pos = pos_max + 1;
1318 int64_t tmp_ts = read_timestamp(s, stream_index,
1319 &tmp_pos, INT64_MAX);
1320 if (tmp_ts == AV_NOPTS_VALUE)
1324 if (tmp_pos >= filesize)
1327 pos_limit = pos_max;
1330 if (ts_min > ts_max)
1332 else if (ts_min == ts_max)
1333 pos_limit = pos_min;
1336 while (pos_min < pos_limit) {
1337 av_log(s, AV_LOG_TRACE, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1338 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1339 assert(pos_limit <= pos_max);
1341 if (no_change == 0) {
1342 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1343 // interpolate position (better than dichotomy)
1344 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1346 pos_min - approximate_keyframe_distance;
1347 } else if (no_change == 1) {
1348 // bisection if interpolation did not change min / max pos last time
1349 pos = (pos_min + pos_limit) >> 1;
1351 /* linear search if bisection failed, can only happen if there
1352 * are very few or no keyframes between min/max */
1357 else if (pos > pos_limit)
1361 // May pass pos_limit instead of -1.
1362 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1367 av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1368 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1369 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1370 pos_limit, start_pos, no_change);
1371 if (ts == AV_NOPTS_VALUE) {
1372 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1375 assert(ts != AV_NOPTS_VALUE);
1376 if (target_ts <= ts) {
1377 pos_limit = start_pos - 1;
1381 if (target_ts >= ts) {
1387 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1388 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1390 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1392 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1393 av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1394 pos, ts_min, target_ts, ts_max);
1399 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1400 int64_t pos, int flags)
1402 int64_t pos_min, pos_max;
1404 pos_min = s->internal->data_offset;
1405 pos_max = avio_size(s->pb) - 1;
1409 else if (pos > pos_max)
1412 avio_seek(s->pb, pos, SEEK_SET);
1417 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1418 int64_t timestamp, int flags)
1425 st = s->streams[stream_index];
1427 index = av_index_search_timestamp(st, timestamp, flags);
1429 if (index < 0 && st->nb_index_entries &&
1430 timestamp < st->index_entries[0].timestamp)
1433 if (index < 0 || index == st->nb_index_entries - 1) {
1436 if (st->nb_index_entries) {
1437 assert(st->index_entries);
1438 ie = &st->index_entries[st->nb_index_entries - 1];
1439 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1441 ff_update_cur_dts(s, st, ie->timestamp);
1443 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1449 read_status = av_read_frame(s, &pkt);
1450 } while (read_status == AVERROR(EAGAIN));
1451 if (read_status < 0)
1453 av_packet_unref(&pkt);
1454 if (stream_index == pkt.stream_index)
1455 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1458 index = av_index_search_timestamp(st, timestamp, flags);
1463 ff_read_frame_flush(s);
1464 if (s->iformat->read_seek)
1465 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1467 ie = &st->index_entries[index];
1468 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1470 ff_update_cur_dts(s, st, ie->timestamp);
1475 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1476 int64_t timestamp, int flags)
1481 if (flags & AVSEEK_FLAG_BYTE) {
1482 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1484 ff_read_frame_flush(s);
1485 return seek_frame_byte(s, stream_index, timestamp, flags);
1488 if (stream_index < 0) {
1489 stream_index = av_find_default_stream_index(s);
1490 if (stream_index < 0)
1493 st = s->streams[stream_index];
1494 /* timestamp for default must be expressed in AV_TIME_BASE units */
1495 timestamp = av_rescale(timestamp, st->time_base.den,
1496 AV_TIME_BASE * (int64_t) st->time_base.num);
1499 /* first, we try the format specific seek */
1500 if (s->iformat->read_seek) {
1501 ff_read_frame_flush(s);
1502 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1508 if (s->iformat->read_timestamp &&
1509 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1510 ff_read_frame_flush(s);
1511 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1512 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1513 ff_read_frame_flush(s);
1514 return seek_frame_generic(s, stream_index, timestamp, flags);
1519 int av_seek_frame(AVFormatContext *s, int stream_index,
1520 int64_t timestamp, int flags)
1522 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1525 ret = queue_attached_pictures(s);
1530 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1531 int64_t ts, int64_t max_ts, int flags)
1533 if (min_ts > ts || max_ts < ts)
1536 if (s->iformat->read_seek2) {
1538 ff_read_frame_flush(s);
1539 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1543 ret = queue_attached_pictures(s);
1547 if (s->iformat->read_timestamp) {
1548 // try to seek via read_timestamp()
1551 // Fall back on old API if new is not implemented but old is.
1552 // Note the old API has somewhat different semantics.
1553 if (s->iformat->read_seek || 1)
1554 return av_seek_frame(s, stream_index, ts,
1555 flags | ((uint64_t) ts - min_ts >
1556 (uint64_t) max_ts - ts
1557 ? AVSEEK_FLAG_BACKWARD : 0));
1559 // try some generic seek like seek_frame_generic() but with new ts semantics
1562 /*******************************************************/
1565 * Return TRUE if the stream has accurate duration in any stream.
1567 * @return TRUE if the stream has accurate duration for at least one component.
1569 static int has_duration(AVFormatContext *ic)
1574 for (i = 0; i < ic->nb_streams; i++) {
1575 st = ic->streams[i];
1576 if (st->duration != AV_NOPTS_VALUE)
1579 if (ic->duration != AV_NOPTS_VALUE)
1585 * Estimate the stream timings from the one of each components.
1587 * Also computes the global bitrate if possible.
1589 static void update_stream_timings(AVFormatContext *ic)
1591 int64_t start_time, start_time1, end_time, end_time1;
1592 int64_t duration, duration1, filesize;
1596 start_time = INT64_MAX;
1597 end_time = INT64_MIN;
1598 duration = INT64_MIN;
1599 for (i = 0; i < ic->nb_streams; i++) {
1600 st = ic->streams[i];
1601 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1602 start_time1 = av_rescale_q(st->start_time, st->time_base,
1604 start_time = FFMIN(start_time, start_time1);
1605 if (st->duration != AV_NOPTS_VALUE) {
1606 end_time1 = start_time1 +
1607 av_rescale_q(st->duration, st->time_base,
1609 end_time = FFMAX(end_time, end_time1);
1612 if (st->duration != AV_NOPTS_VALUE) {
1613 duration1 = av_rescale_q(st->duration, st->time_base,
1615 duration = FFMAX(duration, duration1);
1618 if (start_time != INT64_MAX) {
1619 ic->start_time = start_time;
1620 if (end_time != INT64_MIN)
1621 duration = FFMAX(duration, end_time - start_time);
1623 if (duration != INT64_MIN) {
1624 ic->duration = duration;
1625 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1626 /* compute the bitrate */
1627 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1628 (double) ic->duration;
1632 static void fill_all_stream_timings(AVFormatContext *ic)
1637 update_stream_timings(ic);
1638 for (i = 0; i < ic->nb_streams; i++) {
1639 st = ic->streams[i];
1640 if (st->start_time == AV_NOPTS_VALUE) {
1641 if (ic->start_time != AV_NOPTS_VALUE)
1642 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1644 if (ic->duration != AV_NOPTS_VALUE)
1645 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1651 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1653 int64_t filesize, duration;
1657 /* if bit_rate is already set, we believe it */
1658 if (ic->bit_rate <= 0) {
1660 for (i = 0; i < ic->nb_streams; i++) {
1661 st = ic->streams[i];
1662 if (st->codec->bit_rate > 0) {
1663 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1667 bit_rate += st->codec->bit_rate;
1670 ic->bit_rate = bit_rate;
1673 /* if duration is already set, we believe it */
1674 if (ic->duration == AV_NOPTS_VALUE &&
1675 ic->bit_rate != 0) {
1676 filesize = ic->pb ? avio_size(ic->pb) : 0;
1678 for (i = 0; i < ic->nb_streams; i++) {
1679 st = ic->streams[i];
1680 duration = av_rescale(8 * filesize, st->time_base.den,
1682 (int64_t) st->time_base.num);
1683 if (st->duration == AV_NOPTS_VALUE)
1684 st->duration = duration;
1690 #define DURATION_MAX_READ_SIZE 250000
1691 #define DURATION_MAX_RETRY 3
1693 /* only usable for MPEG-PS streams */
1694 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1696 AVPacket pkt1, *pkt = &pkt1;
1698 int read_size, i, ret;
1700 int64_t filesize, offset, duration;
1703 /* flush packet queue */
1704 flush_packet_queue(ic);
1706 for (i = 0; i < ic->nb_streams; i++) {
1707 st = ic->streams[i];
1708 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1709 av_log(st->codec, AV_LOG_WARNING,
1710 "start time is not set in estimate_timings_from_pts\n");
1713 av_parser_close(st->parser);
1718 /* estimate the end time (duration) */
1719 /* XXX: may need to support wrapping */
1720 filesize = ic->pb ? avio_size(ic->pb) : 0;
1721 end_time = AV_NOPTS_VALUE;
1723 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1727 avio_seek(ic->pb, offset, SEEK_SET);
1730 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1734 ret = ff_read_packet(ic, pkt);
1735 } while (ret == AVERROR(EAGAIN));
1738 read_size += pkt->size;
1739 st = ic->streams[pkt->stream_index];
1740 if (pkt->pts != AV_NOPTS_VALUE &&
1741 (st->start_time != AV_NOPTS_VALUE ||
1742 st->first_dts != AV_NOPTS_VALUE)) {
1743 duration = end_time = pkt->pts;
1744 if (st->start_time != AV_NOPTS_VALUE)
1745 duration -= st->start_time;
1747 duration -= st->first_dts;
1749 duration += 1LL << st->pts_wrap_bits;
1751 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1752 st->duration = duration;
1755 av_packet_unref(pkt);
1757 } while (end_time == AV_NOPTS_VALUE &&
1758 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1759 ++retry <= DURATION_MAX_RETRY);
1761 fill_all_stream_timings(ic);
1763 avio_seek(ic->pb, old_offset, SEEK_SET);
1764 for (i = 0; i < ic->nb_streams; i++) {
1765 st = ic->streams[i];
1766 st->cur_dts = st->first_dts;
1767 st->last_IP_pts = AV_NOPTS_VALUE;
1771 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1775 /* get the file size, if possible */
1776 if (ic->iformat->flags & AVFMT_NOFILE) {
1779 file_size = avio_size(ic->pb);
1780 file_size = FFMAX(0, file_size);
1783 if ((!strcmp(ic->iformat->name, "mpeg") ||
1784 !strcmp(ic->iformat->name, "mpegts")) &&
1785 file_size && ic->pb->seekable) {
1786 /* get accurate estimate from the PTSes */
1787 estimate_timings_from_pts(ic, old_offset);
1788 } else if (has_duration(ic)) {
1789 /* at least one component has timings - we use them for all
1791 fill_all_stream_timings(ic);
1793 av_log(ic, AV_LOG_WARNING,
1794 "Estimating duration from bitrate, this may be inaccurate\n");
1795 /* less precise: use bitrate info */
1796 estimate_timings_from_bit_rate(ic);
1798 update_stream_timings(ic);
1802 AVStream av_unused *st;
1803 for (i = 0; i < ic->nb_streams; i++) {
1804 st = ic->streams[i];
1805 av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
1806 (double) st->start_time / AV_TIME_BASE,
1807 (double) st->duration / AV_TIME_BASE);
1809 av_log(ic, AV_LOG_TRACE,
1810 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1811 (double) ic->start_time / AV_TIME_BASE,
1812 (double) ic->duration / AV_TIME_BASE,
1813 ic->bit_rate / 1000);
1817 static int has_codec_parameters(AVStream *st)
1819 AVCodecContext *avctx = st->codec;
1822 switch (avctx->codec_type) {
1823 case AVMEDIA_TYPE_AUDIO:
1824 val = avctx->sample_rate && avctx->channels;
1825 if (st->info->found_decoder >= 0 &&
1826 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1829 case AVMEDIA_TYPE_VIDEO:
1831 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1838 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1841 static int has_decode_delay_been_guessed(AVStream *st)
1843 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1844 st->info->nb_decoded_frames >= 6;
1847 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1848 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1849 AVDictionary **options)
1851 const AVCodec *codec;
1852 int got_picture = 1, ret = 0;
1853 AVFrame *frame = av_frame_alloc();
1854 AVPacket pkt = *avpkt;
1857 return AVERROR(ENOMEM);
1859 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
1860 AVDictionary *thread_opt = NULL;
1862 codec = st->codec->codec ? st->codec->codec
1863 : avcodec_find_decoder(st->codec->codec_id);
1866 st->info->found_decoder = -1;
1871 /* Force thread count to 1 since the H.264 decoder will not extract
1872 * SPS and PPS to extradata during multi-threaded decoding. */
1873 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1874 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
1876 av_dict_free(&thread_opt);
1878 st->info->found_decoder = -1;
1881 st->info->found_decoder = 1;
1882 } else if (!st->info->found_decoder)
1883 st->info->found_decoder = 1;
1885 if (st->info->found_decoder < 0) {
1890 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1892 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1893 (!st->codec_info_nb_frames &&
1894 (st->codec->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
1896 switch (st->codec->codec_type) {
1897 case AVMEDIA_TYPE_VIDEO:
1898 ret = avcodec_decode_video2(st->codec, frame,
1899 &got_picture, &pkt);
1901 case AVMEDIA_TYPE_AUDIO:
1902 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
1909 st->info->nb_decoded_frames++;
1917 av_frame_free(&frame);
1921 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1923 while (tags->id != AV_CODEC_ID_NONE) {
1931 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1934 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1935 if (tag == tags[i].tag)
1937 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1938 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1940 return AV_CODEC_ID_NONE;
1943 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
1948 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
1950 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
1952 return AV_CODEC_ID_NONE;
1956 if (sflags & (1 << (bps - 1))) {
1959 return AV_CODEC_ID_PCM_S8;
1961 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
1963 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
1965 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
1967 return AV_CODEC_ID_NONE;
1972 return AV_CODEC_ID_PCM_U8;
1974 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
1976 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
1978 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
1980 return AV_CODEC_ID_NONE;
1986 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
1989 for (i = 0; tags && tags[i]; i++) {
1990 int tag = ff_codec_get_tag(tags[i], id);
1997 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2000 for (i = 0; tags && tags[i]; i++) {
2001 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2002 if (id != AV_CODEC_ID_NONE)
2005 return AV_CODEC_ID_NONE;
2008 static void compute_chapters_end(AVFormatContext *s)
2011 int64_t max_time = s->duration +
2012 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2014 for (i = 0; i < s->nb_chapters; i++)
2015 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2016 AVChapter *ch = s->chapters[i];
2017 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2021 for (j = 0; j < s->nb_chapters; j++) {
2022 AVChapter *ch1 = s->chapters[j];
2023 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2025 if (j != i && next_start > ch->start && next_start < end)
2028 ch->end = (end == INT64_MAX) ? ch->start : end;
2032 static int get_std_framerate(int i)
2035 return (i + 1) * 1001;
2037 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2040 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2042 int i, count, ret, read_size, j;
2044 AVPacket pkt1, *pkt;
2045 int64_t old_offset = avio_tell(ic->pb);
2046 // new streams might appear, no options for those
2047 int orig_nb_streams = ic->nb_streams;
2049 for (i = 0; i < ic->nb_streams; i++) {
2050 const AVCodec *codec;
2051 AVDictionary *thread_opt = NULL;
2052 st = ic->streams[i];
2054 // only for the split stuff
2055 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2056 st->parser = av_parser_init(st->codec->codec_id);
2057 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2058 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2060 codec = st->codec->codec ? st->codec->codec
2061 : avcodec_find_decoder(st->codec->codec_id);
2063 /* Force thread count to 1 since the H.264 decoder will not extract
2064 * SPS and PPS to extradata during multi-threaded decoding. */
2065 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2067 /* Ensure that subtitle_header is properly set. */
2068 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2069 && codec && !st->codec->codec)
2070 avcodec_open2(st->codec, codec,
2071 options ? &options[i] : &thread_opt);
2073 // Try to just open decoders, in case this is enough to get parameters.
2074 if (!has_codec_parameters(st)) {
2075 if (codec && !st->codec->codec)
2076 avcodec_open2(st->codec, codec,
2077 options ? &options[i] : &thread_opt);
2080 av_dict_free(&thread_opt);
2083 for (i = 0; i < ic->nb_streams; i++) {
2084 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2085 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2091 if (ff_check_interrupt(&ic->interrupt_callback)) {
2093 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2097 /* check if one codec still needs to be handled */
2098 for (i = 0; i < ic->nb_streams; i++) {
2099 int fps_analyze_framecount = 20;
2101 st = ic->streams[i];
2102 if (!has_codec_parameters(st))
2104 /* If the timebase is coarse (like the usual millisecond precision
2105 * of mkv), we need to analyze more frames to reliably arrive at
2106 * the correct fps. */
2107 if (av_q2d(st->time_base) > 0.0005)
2108 fps_analyze_framecount *= 2;
2109 if (ic->fps_probe_size >= 0)
2110 fps_analyze_framecount = ic->fps_probe_size;
2111 /* variable fps and no guess at the real fps */
2112 if (!st->avg_frame_rate.num &&
2113 st->codec_info_nb_frames < fps_analyze_framecount &&
2114 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2116 if (st->parser && st->parser->parser->split &&
2117 !st->codec->extradata)
2119 if (st->first_dts == AV_NOPTS_VALUE &&
2120 st->codec_info_nb_frames < ic->max_ts_probe &&
2121 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2122 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2125 if (i == ic->nb_streams) {
2126 /* NOTE: If the format has no header, then we need to read some
2127 * packets to get most of the streams, so we cannot stop here. */
2128 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2129 /* If we found the info for all the codecs, we can stop. */
2131 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2135 /* We did not get all the codec info, but we read too much data. */
2136 if (read_size >= ic->probesize) {
2138 av_log(ic, AV_LOG_DEBUG,
2139 "Probe buffer size limit %d reached\n", ic->probesize);
2143 /* NOTE: A new stream can be added there if no header in file
2144 * (AVFMTCTX_NOHEADER). */
2145 ret = read_frame_internal(ic, &pkt1);
2146 if (ret == AVERROR(EAGAIN))
2151 AVPacket empty_pkt = { 0 };
2153 av_init_packet(&empty_pkt);
2155 /* We could not have all the codec parameters before EOF. */
2157 for (i = 0; i < ic->nb_streams; i++) {
2158 st = ic->streams[i];
2160 /* flush the decoders */
2161 if (st->info->found_decoder == 1) {
2163 err = try_decode_frame(st, &empty_pkt,
2164 (options && i < orig_nb_streams)
2165 ? &options[i] : NULL);
2166 } while (err > 0 && !has_codec_parameters(st));
2170 av_log(ic, AV_LOG_WARNING,
2171 "decoding for stream %d failed\n", st->index);
2172 } else if (!has_codec_parameters(st)) {
2174 avcodec_string(buf, sizeof(buf), st->codec, 0);
2175 av_log(ic, AV_LOG_WARNING,
2176 "Could not find codec parameters (%s)\n", buf);
2184 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2187 pkt = add_to_pktbuf(&ic->internal->packet_buffer, &pkt1,
2188 &ic->internal->packet_buffer_end);
2189 if ((ret = av_dup_packet(pkt)) < 0)
2190 goto find_stream_info_err;
2193 read_size += pkt->size;
2195 st = ic->streams[pkt->stream_index];
2196 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2197 /* check for non-increasing dts */
2198 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2199 st->info->fps_last_dts >= pkt->dts) {
2200 av_log(ic, AV_LOG_WARNING,
2201 "Non-increasing DTS in stream %d: packet %d with DTS "
2202 "%"PRId64", packet %d with DTS %"PRId64"\n",
2203 st->index, st->info->fps_last_dts_idx,
2204 st->info->fps_last_dts, st->codec_info_nb_frames,
2206 st->info->fps_first_dts =
2207 st->info->fps_last_dts = AV_NOPTS_VALUE;
2209 /* Check for a discontinuity in dts. If the difference in dts
2210 * is more than 1000 times the average packet duration in the
2211 * sequence, we treat it as a discontinuity. */
2212 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2213 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2214 (pkt->dts - st->info->fps_last_dts) / 1000 >
2215 (st->info->fps_last_dts - st->info->fps_first_dts) /
2216 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2217 av_log(ic, AV_LOG_WARNING,
2218 "DTS discontinuity in stream %d: packet %d with DTS "
2219 "%"PRId64", packet %d with DTS %"PRId64"\n",
2220 st->index, st->info->fps_last_dts_idx,
2221 st->info->fps_last_dts, st->codec_info_nb_frames,
2223 st->info->fps_first_dts =
2224 st->info->fps_last_dts = AV_NOPTS_VALUE;
2227 /* update stored dts values */
2228 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2229 st->info->fps_first_dts = pkt->dts;
2230 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2232 st->info->fps_last_dts = pkt->dts;
2233 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2235 /* check max_analyze_duration */
2236 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2237 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2238 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2239 ic->max_analyze_duration);
2240 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2241 av_packet_unref(pkt);
2245 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2246 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2247 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2248 st->codec->extradata_size = i;
2249 st->codec->extradata = av_mallocz(st->codec->extradata_size +
2250 AV_INPUT_BUFFER_PADDING_SIZE);
2251 if (!st->codec->extradata)
2252 return AVERROR(ENOMEM);
2253 memcpy(st->codec->extradata, pkt->data,
2254 st->codec->extradata_size);
2258 /* If still no information, we try to open the codec and to
2259 * decompress the frame. We try to avoid that in most cases as
2260 * it takes longer and uses more memory. For MPEG-4, we need to
2261 * decompress for QuickTime.
2263 * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2264 * least one frame of codec data, this makes sure the codec initializes
2265 * the channel configuration and does not only trust the values from
2267 try_decode_frame(st, pkt,
2268 (options && i < orig_nb_streams) ? &options[i] : NULL);
2270 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2271 av_packet_unref(pkt);
2273 st->codec_info_nb_frames++;
2277 // close codecs which were opened in try_decode_frame()
2278 for (i = 0; i < ic->nb_streams; i++) {
2279 st = ic->streams[i];
2280 avcodec_close(st->codec);
2282 for (i = 0; i < ic->nb_streams; i++) {
2283 st = ic->streams[i];
2284 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2285 /* estimate average framerate if not set by demuxer */
2286 if (!st->avg_frame_rate.num &&
2287 st->info->fps_last_dts != st->info->fps_first_dts) {
2288 int64_t delta_dts = st->info->fps_last_dts -
2289 st->info->fps_first_dts;
2290 int delta_packets = st->info->fps_last_dts_idx -
2291 st->info->fps_first_dts_idx;
2293 double best_error = 0.01;
2295 if (delta_dts >= INT64_MAX / st->time_base.num ||
2296 delta_packets >= INT64_MAX / st->time_base.den ||
2299 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2300 delta_packets * (int64_t) st->time_base.den,
2301 delta_dts * (int64_t) st->time_base.num, 60000);
2303 /* Round guessed framerate to a "standard" framerate if it's
2304 * within 1% of the original estimate. */
2305 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2306 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2307 double error = fabs(av_q2d(st->avg_frame_rate) /
2308 av_q2d(std_fps) - 1);
2310 if (error < best_error) {
2312 best_fps = std_fps.num;
2316 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2317 best_fps, 12 * 1001, INT_MAX);
2319 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2320 if (!st->codec->bits_per_coded_sample)
2321 st->codec->bits_per_coded_sample =
2322 av_get_bits_per_sample(st->codec->codec_id);
2323 // set stream disposition based on audio service type
2324 switch (st->codec->audio_service_type) {
2325 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2326 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2328 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2329 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2331 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2332 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2334 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2335 st->disposition = AV_DISPOSITION_COMMENT;
2337 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2338 st->disposition = AV_DISPOSITION_KARAOKE;
2344 estimate_timings(ic, old_offset);
2346 compute_chapters_end(ic);
2348 find_stream_info_err:
2349 for (i = 0; i < ic->nb_streams; i++) {
2350 ic->streams[i]->codec->thread_count = 0;
2351 av_freep(&ic->streams[i]->info);
2356 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2360 for (i = 0; i < ic->nb_programs; i++)
2361 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2362 if (ic->programs[i]->stream_index[j] == s)
2363 return ic->programs[i];
2367 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2368 int wanted_stream_nb, int related_stream,
2369 AVCodec **decoder_ret, int flags)
2371 int i, nb_streams = ic->nb_streams;
2372 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2373 unsigned *program = NULL;
2374 AVCodec *decoder = NULL, *best_decoder = NULL;
2376 if (related_stream >= 0 && wanted_stream_nb < 0) {
2377 AVProgram *p = find_program_from_stream(ic, related_stream);
2379 program = p->stream_index;
2380 nb_streams = p->nb_stream_indexes;
2383 for (i = 0; i < nb_streams; i++) {
2384 int real_stream_index = program ? program[i] : i;
2385 AVStream *st = ic->streams[real_stream_index];
2386 AVCodecContext *avctx = st->codec;
2387 if (avctx->codec_type != type)
2389 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2391 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2392 AV_DISPOSITION_VISUAL_IMPAIRED))
2395 decoder = avcodec_find_decoder(st->codec->codec_id);
2398 ret = AVERROR_DECODER_NOT_FOUND;
2402 if (best_count >= st->codec_info_nb_frames)
2404 best_count = st->codec_info_nb_frames;
2405 ret = real_stream_index;
2406 best_decoder = decoder;
2407 if (program && i == nb_streams - 1 && ret < 0) {
2409 nb_streams = ic->nb_streams;
2410 /* no related stream found, try again with everything */
2415 *decoder_ret = best_decoder;
2419 /*******************************************************/
2421 int av_read_play(AVFormatContext *s)
2423 if (s->iformat->read_play)
2424 return s->iformat->read_play(s);
2426 return avio_pause(s->pb, 0);
2427 return AVERROR(ENOSYS);
2430 int av_read_pause(AVFormatContext *s)
2432 if (s->iformat->read_pause)
2433 return s->iformat->read_pause(s);
2435 return avio_pause(s->pb, 1);
2436 return AVERROR(ENOSYS);
2439 void avformat_free_context(AVFormatContext *s)
2448 if (s->iformat && s->iformat->priv_class && s->priv_data)
2449 av_opt_free(s->priv_data);
2451 for (i = 0; i < s->nb_streams; i++) {
2452 /* free all data in a stream component */
2455 for (j = 0; j < st->nb_side_data; j++)
2456 av_freep(&st->side_data[j].data);
2457 av_freep(&st->side_data);
2458 st->nb_side_data = 0;
2461 av_parser_close(st->parser);
2463 if (st->attached_pic.data)
2464 av_packet_unref(&st->attached_pic);
2465 av_dict_free(&st->metadata);
2466 av_freep(&st->probe_data.buf);
2467 av_free(st->index_entries);
2468 av_free(st->codec->extradata);
2469 av_free(st->codec->subtitle_header);
2471 av_free(st->priv_data);
2475 for (i = s->nb_programs - 1; i >= 0; i--) {
2476 av_dict_free(&s->programs[i]->metadata);
2477 av_freep(&s->programs[i]->stream_index);
2478 av_freep(&s->programs[i]);
2480 av_freep(&s->programs);
2481 av_freep(&s->priv_data);
2482 while (s->nb_chapters--) {
2483 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2484 av_free(s->chapters[s->nb_chapters]);
2486 av_freep(&s->chapters);
2487 av_dict_free(&s->metadata);
2488 av_freep(&s->streams);
2489 av_freep(&s->internal);
2493 void avformat_close_input(AVFormatContext **ps)
2495 AVFormatContext *s = *ps;
2496 AVIOContext *pb = s->pb;
2498 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2499 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2502 flush_packet_queue(s);
2505 if (s->iformat->read_close)
2506 s->iformat->read_close(s);
2508 avformat_free_context(s);
2515 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2520 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2521 sizeof(*s->streams)) < 0) {
2526 st = av_mallocz(sizeof(AVStream));
2529 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2534 st->codec = avcodec_alloc_context3(c);
2541 /* no default bitrate if decoding */
2542 st->codec->bit_rate = 0;
2544 /* default pts setting is MPEG-like */
2545 avpriv_set_pts_info(st, 33, 1, 90000);
2548 st->index = s->nb_streams;
2549 st->start_time = AV_NOPTS_VALUE;
2550 st->duration = AV_NOPTS_VALUE;
2551 /* we set the current DTS to 0 so that formats without any timestamps
2552 * but durations get some timestamps, formats with some unknown
2553 * timestamps have their first few packets buffered and the
2554 * timestamps corrected before they are returned to the user */
2556 st->first_dts = AV_NOPTS_VALUE;
2557 st->probe_packets = MAX_PROBE_PACKETS;
2559 st->last_IP_pts = AV_NOPTS_VALUE;
2560 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2561 st->pts_buffer[i] = AV_NOPTS_VALUE;
2563 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2565 st->info->fps_first_dts = AV_NOPTS_VALUE;
2566 st->info->fps_last_dts = AV_NOPTS_VALUE;
2568 s->streams[s->nb_streams++] = st;
2572 AVProgram *av_new_program(AVFormatContext *ac, int id)
2574 AVProgram *program = NULL;
2577 av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
2579 for (i = 0; i < ac->nb_programs; i++)
2580 if (ac->programs[i]->id == id)
2581 program = ac->programs[i];
2584 program = av_mallocz(sizeof(AVProgram));
2587 dynarray_add(&ac->programs, &ac->nb_programs, program);
2588 program->discard = AVDISCARD_NONE;
2595 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2596 int64_t start, int64_t end, const char *title)
2598 AVChapter *chapter = NULL;
2601 for (i = 0; i < s->nb_chapters; i++)
2602 if (s->chapters[i]->id == id)
2603 chapter = s->chapters[i];
2606 chapter = av_mallocz(sizeof(AVChapter));
2609 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2611 av_dict_set(&chapter->metadata, "title", title, 0);
2613 chapter->time_base = time_base;
2614 chapter->start = start;
2620 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2623 AVProgram *program = NULL;
2625 if (idx >= ac->nb_streams) {
2626 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2630 for (i = 0; i < ac->nb_programs; i++) {
2631 if (ac->programs[i]->id != progid)
2633 program = ac->programs[i];
2634 for (j = 0; j < program->nb_stream_indexes; j++)
2635 if (program->stream_index[j] == idx)
2638 if (av_reallocp_array(&program->stream_index,
2639 program->nb_stream_indexes + 1,
2640 sizeof(*program->stream_index)) < 0) {
2641 program->nb_stream_indexes = 0;
2644 program->stream_index[program->nb_stream_indexes++] = idx;
2649 uint64_t ff_ntp_time(void)
2651 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2654 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2657 char *q, buf1[20], c;
2658 int nd, len, percentd_found;
2670 while (av_isdigit(*p))
2671 nd = nd * 10 + *p++ - '0';
2673 } while (av_isdigit(c));
2682 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2684 if ((q - buf + len) > buf_size - 1)
2686 memcpy(q, buf1, len);
2694 if ((q - buf) < buf_size - 1)
2698 if (!percentd_found)
2707 void av_url_split(char *proto, int proto_size,
2708 char *authorization, int authorization_size,
2709 char *hostname, int hostname_size,
2710 int *port_ptr, char *path, int path_size, const char *url)
2712 const char *p, *ls, *at, *col, *brk;
2718 if (authorization_size > 0)
2719 authorization[0] = 0;
2720 if (hostname_size > 0)
2725 /* parse protocol */
2726 if ((p = strchr(url, ':'))) {
2727 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2734 /* no protocol means plain filename */
2735 av_strlcpy(path, url, path_size);
2739 /* separate path from hostname */
2740 ls = strchr(p, '/');
2742 ls = strchr(p, '?');
2744 av_strlcpy(path, ls, path_size);
2746 ls = &p[strlen(p)]; // XXX
2748 /* the rest is hostname, use that to parse auth/port */
2750 /* authorization (user[:pass]@hostname) */
2751 if ((at = strchr(p, '@')) && at < ls) {
2752 av_strlcpy(authorization, p,
2753 FFMIN(authorization_size, at + 1 - p));
2754 p = at + 1; /* skip '@' */
2757 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2759 av_strlcpy(hostname, p + 1,
2760 FFMIN(hostname_size, brk - p));
2761 if (brk[1] == ':' && port_ptr)
2762 *port_ptr = atoi(brk + 2);
2763 } else if ((col = strchr(p, ':')) && col < ls) {
2764 av_strlcpy(hostname, p,
2765 FFMIN(col + 1 - p, hostname_size));
2767 *port_ptr = atoi(col + 1);
2769 av_strlcpy(hostname, p,
2770 FFMIN(ls + 1 - p, hostname_size));
2774 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2777 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2780 'C', 'D', 'E', 'F' };
2781 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2784 'c', 'd', 'e', 'f' };
2785 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2787 for (i = 0; i < s; i++) {
2788 buff[i * 2] = hex_table[src[i] >> 4];
2789 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2795 int ff_hex_to_data(uint8_t *data, const char *p)
2802 p += strspn(p, SPACE_CHARS);
2805 c = av_toupper((unsigned char) *p++);
2806 if (c >= '0' && c <= '9')
2808 else if (c >= 'A' && c <= 'F')
2823 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2824 unsigned int pts_num, unsigned int pts_den)
2827 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2828 if (new_tb.num != pts_num)
2829 av_log(NULL, AV_LOG_DEBUG,
2830 "st:%d removing common factor %d from timebase\n",
2831 s->index, pts_num / new_tb.num);
2833 av_log(NULL, AV_LOG_WARNING,
2834 "st:%d has too large timebase, reducing\n", s->index);
2836 if (new_tb.num <= 0 || new_tb.den <= 0) {
2837 av_log(NULL, AV_LOG_ERROR,
2838 "Ignoring attempt to set invalid timebase for st:%d\n",
2842 s->time_base = new_tb;
2843 s->pts_wrap_bits = pts_wrap_bits;
2846 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
2849 const char *ptr = str;
2851 /* Parse key=value pairs. */
2854 char *dest = NULL, *dest_end;
2855 int key_len, dest_len = 0;
2857 /* Skip whitespace and potential commas. */
2858 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
2865 if (!(ptr = strchr(key, '=')))
2868 key_len = ptr - key;
2870 callback_get_buf(context, key, key_len, &dest, &dest_len);
2871 dest_end = dest + dest_len - 1;
2875 while (*ptr && *ptr != '\"') {
2879 if (dest && dest < dest_end)
2883 if (dest && dest < dest_end)
2891 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
2892 if (dest && dest < dest_end)
2900 int ff_find_stream_index(AVFormatContext *s, int id)
2903 for (i = 0; i < s->nb_streams; i++)
2904 if (s->streams[i]->id == id)
2909 int64_t ff_iso8601_to_unix_time(const char *datestr)
2911 struct tm time1 = { 0 }, time2 = { 0 };
2912 const char *ret1, *ret2;
2913 ret1 = av_small_strptime(datestr, "%Y - %m - %d %T", &time1);
2914 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%T", &time2);
2916 return av_timegm(&time2);
2918 return av_timegm(&time1);
2921 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
2925 if (ofmt->query_codec)
2926 return ofmt->query_codec(codec_id, std_compliance);
2927 else if (ofmt->codec_tag)
2928 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
2929 else if (codec_id == ofmt->video_codec ||
2930 codec_id == ofmt->audio_codec ||
2931 codec_id == ofmt->subtitle_codec)
2934 return AVERROR_PATCHWELCOME;
2937 int avformat_network_init(void)
2941 ff_network_inited_globally = 1;
2942 if ((ret = ff_network_init()) < 0)
2949 int avformat_network_deinit(void)
2958 int ff_add_param_change(AVPacket *pkt, int32_t channels,
2959 uint64_t channel_layout, int32_t sample_rate,
2960 int32_t width, int32_t height)
2966 return AVERROR(EINVAL);
2969 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
2971 if (channel_layout) {
2973 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
2977 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
2979 if (width || height) {
2981 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
2983 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
2985 return AVERROR(ENOMEM);
2986 bytestream_put_le32(&data, flags);
2988 bytestream_put_le32(&data, channels);
2990 bytestream_put_le64(&data, channel_layout);
2992 bytestream_put_le32(&data, sample_rate);
2993 if (width || height) {
2994 bytestream_put_le32(&data, width);
2995 bytestream_put_le32(&data, height);
3000 int ff_generate_avci_extradata(AVStream *st)
3002 static const uint8_t avci100_1080p_extradata[] = {
3004 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3005 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3006 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3007 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3008 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3009 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3010 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3011 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3012 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3014 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3017 static const uint8_t avci100_1080i_extradata[] = {
3019 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3020 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3021 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3022 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3023 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3024 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3025 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3026 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3027 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3028 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3029 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3031 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3034 static const uint8_t avci50_1080i_extradata[] = {
3036 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3037 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3038 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3039 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3040 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3041 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3042 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3043 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3044 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3045 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3046 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3048 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3051 static const uint8_t avci100_720p_extradata[] = {
3053 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3054 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3055 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3056 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3057 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3058 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3059 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3060 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3061 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3062 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3064 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3068 const uint8_t *data = NULL;
3071 if (st->codec->width == 1920) {
3072 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
3073 data = avci100_1080p_extradata;
3074 size = sizeof(avci100_1080p_extradata);
3076 data = avci100_1080i_extradata;
3077 size = sizeof(avci100_1080i_extradata);
3079 } else if (st->codec->width == 1440) {
3080 data = avci50_1080i_extradata;
3081 size = sizeof(avci50_1080i_extradata);
3082 } else if (st->codec->width == 1280) {
3083 data = avci100_720p_extradata;
3084 size = sizeof(avci100_720p_extradata);
3090 av_freep(&st->codec->extradata);
3091 st->codec->extradata_size = 0;
3092 st->codec->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
3093 if (!st->codec->extradata)
3094 return AVERROR(ENOMEM);
3096 memcpy(st->codec->extradata, data, size);
3097 st->codec->extradata_size = size;
3102 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3107 for (i = 0; i < st->nb_side_data; i++) {
3108 if (st->side_data[i].type == type) {
3110 *size = st->side_data[i].size;
3111 return st->side_data[i].data;
3117 uint8_t *ff_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type,
3120 AVPacketSideData *sd, *tmp;
3122 uint8_t *data = av_malloc(size);
3127 for (i = 0; i < st->nb_side_data; i++) {
3128 sd = &st->side_data[i];
3130 if (sd->type == type) {
3131 av_freep(&sd->data);
3138 tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
3144 st->side_data = tmp;
3147 sd = &st->side_data[st->nb_side_data - 1];