2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
42 #include "audiointerleave.h"
55 * various utility functions for use within Libav
58 unsigned avformat_version(void)
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return LIBAV_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
87 int prev_size = pkt->size;
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 read_size = FFMIN(size, chunk_size);
98 ret = av_grow_packet(pkt, read_size);
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
113 av_packet_unref(pkt);
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
122 pkt->pos = avio_tell(s);
124 return append_packet_chunked(s, pkt, size);
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
134 int av_filename_number_test(const char *filename)
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
144 static const struct {
147 enum AVMediaType type;
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codec->codec_id = fmt_id_type[i].id;
171 st->codec->codec_type = fmt_id_type[i].type;
179 /************************************************************/
180 /* input media file */
182 /* Open input file and probe the format if necessary. */
183 static int init_input(AVFormatContext *s, const char *filename,
184 AVDictionary **options)
187 AVProbeData pd = { filename, NULL, 0 };
190 s->flags |= AVFMT_FLAG_CUSTOM_IO;
192 return av_probe_input_buffer(s->pb, &s->iformat, filename,
194 else if (s->iformat->flags & AVFMT_NOFILE)
195 return AVERROR(EINVAL);
199 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
200 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
203 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
204 &s->interrupt_callback, options)) < 0)
208 return av_probe_input_buffer(s->pb, &s->iformat, filename,
212 static int add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
213 AVPacketList **plast_pktl, int ref)
215 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
219 return AVERROR(ENOMEM);
222 if ((ret = av_packet_ref(&pktl->pkt, pkt)) < 0) {
231 (*plast_pktl)->next = pktl;
233 *packet_buffer = pktl;
235 /* Add the packet in the buffered packet list. */
240 static int queue_attached_pictures(AVFormatContext *s)
243 for (i = 0; i < s->nb_streams; i++)
244 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
245 s->streams[i]->discard < AVDISCARD_ALL) {
247 ret = add_to_pktbuf(&s->internal->raw_packet_buffer,
248 &s->streams[i]->attached_pic,
249 &s->internal->raw_packet_buffer_end, 1);
256 int avformat_open_input(AVFormatContext **ps, const char *filename,
257 AVInputFormat *fmt, AVDictionary **options)
259 AVFormatContext *s = *ps;
261 AVDictionary *tmp = NULL;
262 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
264 if (!s && !(s = avformat_alloc_context()))
265 return AVERROR(ENOMEM);
270 av_dict_copy(&tmp, *options, 0);
272 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
275 if ((ret = init_input(s, filename, &tmp)) < 0)
278 /* Check filename in case an image number is expected. */
279 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
280 if (!av_filename_number_test(filename)) {
281 ret = AVERROR(EINVAL);
286 s->duration = s->start_time = AV_NOPTS_VALUE;
287 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
289 /* Allocate private data. */
290 if (s->iformat->priv_data_size > 0) {
291 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
292 ret = AVERROR(ENOMEM);
295 if (s->iformat->priv_class) {
296 *(const AVClass **) s->priv_data = s->iformat->priv_class;
297 av_opt_set_defaults(s->priv_data);
298 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
303 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
305 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
307 if (s->iformat->read_header)
308 if ((ret = s->iformat->read_header(s)) < 0)
311 if (id3v2_extra_meta &&
312 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
314 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
316 if ((ret = queue_attached_pictures(s)) < 0)
319 if (s->pb && !s->internal->data_offset)
320 s->internal->data_offset = avio_tell(s->pb);
322 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
325 av_dict_free(options);
332 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
334 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
336 avformat_free_context(s);
341 /*******************************************************/
343 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
345 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
346 AVProbeData *pd = &st->probe_data;
347 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
352 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
353 AVPROBE_PADDING_SIZE)) < 0)
355 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
356 pd->buf_size += pkt->size;
357 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
359 st->probe_packets = 0;
361 av_log(s, AV_LOG_ERROR,
362 "nothing to probe for stream %d\n", st->index);
367 if (!st->probe_packets ||
368 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
369 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
370 ? AVPROBE_SCORE_MAX / 4 : 0);
371 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
374 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
381 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
387 AVPacketList *pktl = s->internal->raw_packet_buffer;
391 st = s->streams[pkt->stream_index];
392 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
393 !st->probe_packets ||
394 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
396 if (st->probe_packets)
397 if ((err = probe_codec(s, st, NULL)) < 0)
399 pd = &st->probe_data;
402 s->internal->raw_packet_buffer = pktl->next;
403 s->internal->raw_packet_buffer_remaining_size += pkt->size;
412 ret = s->iformat->read_packet(s, pkt);
414 if (!pktl || ret == AVERROR(EAGAIN))
416 for (i = 0; i < s->nb_streams; i++) {
418 if (st->probe_packets)
419 if ((err = probe_codec(s, st, NULL)) < 0)
426 AVPacket tmp = { 0 };
427 ret = av_packet_ref(&tmp, pkt);
433 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
434 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
435 av_log(s, AV_LOG_WARNING,
436 "Dropped corrupted packet (stream = %d)\n",
438 av_packet_unref(pkt);
442 st = s->streams[pkt->stream_index];
444 switch (st->codec->codec_type) {
445 case AVMEDIA_TYPE_VIDEO:
446 if (s->video_codec_id)
447 st->codec->codec_id = s->video_codec_id;
449 case AVMEDIA_TYPE_AUDIO:
450 if (s->audio_codec_id)
451 st->codec->codec_id = s->audio_codec_id;
453 case AVMEDIA_TYPE_SUBTITLE:
454 if (s->subtitle_codec_id)
455 st->codec->codec_id = s->subtitle_codec_id;
459 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
463 err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
464 &s->internal->raw_packet_buffer_end, 0);
467 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
469 if ((err = probe_codec(s, st, pkt)) < 0)
474 /**********************************************************/
477 * Return the frame duration in seconds. Return 0 if not available.
479 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
480 AVCodecParserContext *pc, AVPacket *pkt)
482 AVRational codec_framerate = s->iformat ? st->codec->framerate :
483 av_inv_q(st->codec->time_base);
488 switch (st->codec->codec_type) {
489 case AVMEDIA_TYPE_VIDEO:
490 if (st->avg_frame_rate.num) {
491 *pnum = st->avg_frame_rate.den;
492 *pden = st->avg_frame_rate.num;
493 } else if (st->time_base.num * 1000LL > st->time_base.den) {
494 *pnum = st->time_base.num;
495 *pden = st->time_base.den;
496 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
497 *pnum = codec_framerate.den;
498 *pden = codec_framerate.num;
499 if (pc && pc->repeat_pict) {
500 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
501 *pden /= 1 + pc->repeat_pict;
503 *pnum *= 1 + pc->repeat_pict;
505 /* If this codec can be interlaced or progressive then we need
506 * a parser to compute duration of a packet. Thus if we have
507 * no parser in such case leave duration undefined. */
508 if (st->codec->ticks_per_frame > 1 && !pc)
512 case AVMEDIA_TYPE_AUDIO:
513 frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
514 if (frame_size <= 0 || st->codec->sample_rate <= 0)
517 *pden = st->codec->sample_rate;
524 static int is_intra_only(enum AVCodecID id)
526 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
529 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
534 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
535 int64_t dts, int64_t pts)
537 AVStream *st = s->streams[stream_index];
538 AVPacketList *pktl = s->internal->packet_buffer;
540 if (st->first_dts != AV_NOPTS_VALUE ||
541 dts == AV_NOPTS_VALUE ||
542 st->cur_dts == AV_NOPTS_VALUE)
545 st->first_dts = dts - st->cur_dts;
548 for (; pktl; pktl = pktl->next) {
549 if (pktl->pkt.stream_index != stream_index)
551 // FIXME: think more about this check
552 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
553 pktl->pkt.pts += st->first_dts;
555 if (pktl->pkt.dts != AV_NOPTS_VALUE)
556 pktl->pkt.dts += st->first_dts;
558 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
559 st->start_time = pktl->pkt.pts;
561 if (st->start_time == AV_NOPTS_VALUE)
562 st->start_time = pts;
565 static void update_initial_durations(AVFormatContext *s, AVStream *st,
566 int stream_index, int duration)
568 AVPacketList *pktl = s->internal->packet_buffer;
571 if (st->first_dts != AV_NOPTS_VALUE) {
572 cur_dts = st->first_dts;
573 for (; pktl; pktl = pktl->next) {
574 if (pktl->pkt.stream_index == stream_index) {
575 if (pktl->pkt.pts != pktl->pkt.dts ||
576 pktl->pkt.dts != AV_NOPTS_VALUE ||
582 pktl = s->internal->packet_buffer;
583 st->first_dts = cur_dts;
584 } else if (st->cur_dts)
587 for (; pktl; pktl = pktl->next) {
588 if (pktl->pkt.stream_index != stream_index)
590 if (pktl->pkt.pts == pktl->pkt.dts &&
591 pktl->pkt.dts == AV_NOPTS_VALUE &&
592 !pktl->pkt.duration) {
593 pktl->pkt.dts = cur_dts;
594 if (!st->codec->has_b_frames)
595 pktl->pkt.pts = cur_dts;
597 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
598 pktl->pkt.duration = duration;
602 if (st->first_dts == AV_NOPTS_VALUE)
603 st->cur_dts = cur_dts;
606 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
607 AVCodecParserContext *pc, AVPacket *pkt)
609 int num, den, presentation_delayed, delay, i;
612 if (s->flags & AVFMT_FLAG_NOFILLIN)
615 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
616 pkt->dts = AV_NOPTS_VALUE;
618 /* do we have a video B-frame ? */
619 delay = st->codec->has_b_frames;
620 presentation_delayed = 0;
622 /* XXX: need has_b_frame, but cannot get it if the codec is
625 pc && pc->pict_type != AV_PICTURE_TYPE_B)
626 presentation_delayed = 1;
628 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
629 st->pts_wrap_bits < 63 &&
630 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
631 pkt->dts -= 1LL << st->pts_wrap_bits;
634 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
635 * We take the conservative approach and discard both.
636 * Note: If this is misbehaving for an H.264 file, then possibly
637 * presentation_delayed is not set correctly. */
638 if (delay == 1 && pkt->dts == pkt->pts &&
639 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
640 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
641 pkt->dts = AV_NOPTS_VALUE;
644 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
645 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
647 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
648 den * (int64_t) st->time_base.num,
651 if (pkt->duration != 0 && s->internal->packet_buffer)
652 update_initial_durations(s, st, pkt->stream_index,
657 /* Correct timestamps with byte offset if demuxers only have timestamps
658 * on packet boundaries */
659 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
660 /* this will estimate bitrate based on this frame's duration and size */
661 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
662 if (pkt->pts != AV_NOPTS_VALUE)
664 if (pkt->dts != AV_NOPTS_VALUE)
668 /* This may be redundant, but it should not hurt. */
669 if (pkt->dts != AV_NOPTS_VALUE &&
670 pkt->pts != AV_NOPTS_VALUE &&
672 presentation_delayed = 1;
674 av_log(NULL, AV_LOG_TRACE,
675 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
676 "cur_dts:%"PRId64" st:%d pc:%p\n",
677 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
678 pkt->stream_index, pc);
679 /* Interpolate PTS and DTS if they are not present. We skip H.264
680 * currently because delay and has_b_frames are not reliably set. */
681 if ((delay == 0 || (delay == 1 && pc)) &&
682 st->codec->codec_id != AV_CODEC_ID_H264) {
683 if (presentation_delayed) {
684 /* DTS = decompression timestamp */
685 /* PTS = presentation timestamp */
686 if (pkt->dts == AV_NOPTS_VALUE)
687 pkt->dts = st->last_IP_pts;
688 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
689 if (pkt->dts == AV_NOPTS_VALUE)
690 pkt->dts = st->cur_dts;
692 /* This is tricky: the dts must be incremented by the duration
693 * of the frame we are displaying, i.e. the last I- or P-frame. */
694 if (st->last_IP_duration == 0)
695 st->last_IP_duration = pkt->duration;
696 if (pkt->dts != AV_NOPTS_VALUE)
697 st->cur_dts = pkt->dts + st->last_IP_duration;
698 st->last_IP_duration = pkt->duration;
699 st->last_IP_pts = pkt->pts;
700 /* Cannot compute PTS if not present (we can compute it only
701 * by knowing the future. */
702 } else if (pkt->pts != AV_NOPTS_VALUE ||
703 pkt->dts != AV_NOPTS_VALUE ||
705 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
706 int duration = pkt->duration;
707 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
708 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
710 duration = av_rescale_rnd(1,
711 num * (int64_t) st->time_base.den,
712 den * (int64_t) st->time_base.num,
714 if (duration != 0 && s->internal->packet_buffer)
715 update_initial_durations(s, st, pkt->stream_index,
720 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
722 /* presentation is not delayed : PTS and DTS are the same */
723 if (pkt->pts == AV_NOPTS_VALUE)
725 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
727 if (pkt->pts == AV_NOPTS_VALUE)
728 pkt->pts = st->cur_dts;
730 if (pkt->pts != AV_NOPTS_VALUE)
731 st->cur_dts = pkt->pts + duration;
736 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
737 st->pts_buffer[0] = pkt->pts;
738 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
739 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
740 if (pkt->dts == AV_NOPTS_VALUE)
741 pkt->dts = st->pts_buffer[0];
742 // We skipped it above so we try here.
743 if (st->codec->codec_id == AV_CODEC_ID_H264)
744 // This should happen on the first packet
745 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
746 if (pkt->dts > st->cur_dts)
747 st->cur_dts = pkt->dts;
750 av_log(NULL, AV_LOG_TRACE,
751 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
752 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
755 if (is_intra_only(st->codec->codec_id))
756 pkt->flags |= AV_PKT_FLAG_KEY;
757 #if FF_API_CONVERGENCE_DURATION
758 FF_DISABLE_DEPRECATION_WARNINGS
760 pkt->convergence_duration = pc->convergence_duration;
761 FF_ENABLE_DEPRECATION_WARNINGS
765 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
768 AVPacketList *pktl = *pkt_buf;
769 *pkt_buf = pktl->next;
770 av_packet_unref(&pktl->pkt);
777 * Parse a packet, add all split parts to parse_queue.
779 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
781 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
783 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
784 AVStream *st = s->streams[stream_index];
785 uint8_t *data = pkt ? pkt->data : NULL;
786 int size = pkt ? pkt->size : 0;
787 int ret = 0, got_output = 0;
790 av_init_packet(&flush_pkt);
795 while (size > 0 || (pkt == &flush_pkt && got_output)) {
798 av_init_packet(&out_pkt);
799 len = av_parser_parse2(st->parser, st->codec,
800 &out_pkt.data, &out_pkt.size, data, size,
801 pkt->pts, pkt->dts, pkt->pos);
803 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
804 /* increment read pointer */
808 got_output = !!out_pkt.size;
813 if (pkt->side_data) {
814 out_pkt.side_data = pkt->side_data;
815 out_pkt.side_data_elems = pkt->side_data_elems;
816 pkt->side_data = NULL;
817 pkt->side_data_elems = 0;
820 /* set the duration */
821 out_pkt.duration = 0;
822 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
823 if (st->codec->sample_rate > 0) {
825 av_rescale_q_rnd(st->parser->duration,
826 (AVRational) { 1, st->codec->sample_rate },
832 out_pkt.stream_index = st->index;
833 out_pkt.pts = st->parser->pts;
834 out_pkt.dts = st->parser->dts;
835 out_pkt.pos = st->parser->pos;
837 if (st->parser->key_frame == 1 ||
838 (st->parser->key_frame == -1 &&
839 st->parser->pict_type == AV_PICTURE_TYPE_I))
840 out_pkt.flags |= AV_PKT_FLAG_KEY;
842 compute_pkt_fields(s, st, st->parser, &out_pkt);
844 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
845 out_pkt.flags & AV_PKT_FLAG_KEY) {
846 ff_reduce_index(s, st->index);
847 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
848 0, 0, AVINDEX_KEYFRAME);
851 if ((ret = add_to_pktbuf(&s->internal->parse_queue, &out_pkt,
852 &s->internal->parse_queue_end,
854 av_packet_unref(&out_pkt);
859 /* end of the stream => close and free the parser */
860 if (pkt == &flush_pkt) {
861 av_parser_close(st->parser);
866 av_packet_unref(pkt);
870 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
871 AVPacketList **pkt_buffer_end,
875 av_assert0(*pkt_buffer);
878 *pkt_buffer = pktl->next;
880 *pkt_buffer_end = NULL;
885 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
887 int ret = 0, i, got_packet = 0;
888 AVDictionary *metadata = NULL;
892 while (!got_packet && !s->internal->parse_queue) {
896 /* read next packet */
897 ret = ff_read_packet(s, &cur_pkt);
899 if (ret == AVERROR(EAGAIN))
901 /* flush the parsers */
902 for (i = 0; i < s->nb_streams; i++) {
904 if (st->parser && st->need_parsing)
905 parse_packet(s, NULL, st->index);
907 /* all remaining packets are now in parse_queue =>
908 * really terminate parsing */
912 st = s->streams[cur_pkt.stream_index];
914 if (cur_pkt.pts != AV_NOPTS_VALUE &&
915 cur_pkt.dts != AV_NOPTS_VALUE &&
916 cur_pkt.pts < cur_pkt.dts) {
917 av_log(s, AV_LOG_WARNING,
918 "Invalid timestamps stream=%d, pts=%"PRId64", "
919 "dts=%"PRId64", size=%d\n",
920 cur_pkt.stream_index, cur_pkt.pts,
921 cur_pkt.dts, cur_pkt.size);
923 if (s->debug & FF_FDEBUG_TS)
924 av_log(s, AV_LOG_DEBUG,
925 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
926 "size=%d, duration=%"PRId64", flags=%d\n",
927 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
928 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
930 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
931 st->parser = av_parser_init(st->codec->codec_id);
933 /* no parser available: just output the raw packets */
934 st->need_parsing = AVSTREAM_PARSE_NONE;
935 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
936 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
937 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
938 st->parser->flags |= PARSER_FLAG_ONCE;
941 if (!st->need_parsing || !st->parser) {
942 /* no parsing needed: we just output the packet as is */
944 compute_pkt_fields(s, st, NULL, pkt);
945 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
946 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
947 ff_reduce_index(s, st->index);
948 av_add_index_entry(st, pkt->pos, pkt->dts,
949 0, 0, AVINDEX_KEYFRAME);
952 } else if (st->discard < AVDISCARD_ALL) {
953 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
957 av_packet_unref(&cur_pkt);
961 if (!got_packet && s->internal->parse_queue)
962 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
964 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
966 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
967 av_dict_copy(&s->metadata, metadata, 0);
968 av_dict_free(&metadata);
969 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
972 if (s->debug & FF_FDEBUG_TS)
973 av_log(s, AV_LOG_DEBUG,
974 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
975 "size=%d, duration=%"PRId64", flags=%d\n",
976 pkt->stream_index, pkt->pts, pkt->dts,
977 pkt->size, pkt->duration, pkt->flags);
982 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
984 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
988 return s->internal->packet_buffer
989 ? read_from_packet_buffer(&s->internal->packet_buffer,
990 &s->internal->packet_buffer_end, pkt)
991 : read_frame_internal(s, pkt);
995 AVPacketList *pktl = s->internal->packet_buffer;
998 AVPacket *next_pkt = &pktl->pkt;
1000 if (next_pkt->dts != AV_NOPTS_VALUE) {
1001 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1002 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1003 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1004 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1005 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1007 next_pkt->pts = pktl->pkt.dts;
1011 pktl = s->internal->packet_buffer;
1014 /* read packet from packet buffer, if there is data */
1015 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1016 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1017 return read_from_packet_buffer(&s->internal->packet_buffer,
1018 &s->internal->packet_buffer_end, pkt);
1021 ret = read_frame_internal(s, pkt);
1023 if (pktl && ret != AVERROR(EAGAIN)) {
1030 ret = add_to_pktbuf(&s->internal->packet_buffer, pkt,
1031 &s->internal->packet_buffer_end, 1);
1037 /* XXX: suppress the packet queue */
1038 static void flush_packet_queue(AVFormatContext *s)
1040 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1041 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1042 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1044 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1047 /*******************************************************/
1050 int av_find_default_stream_index(AVFormatContext *s)
1052 int first_audio_index = -1;
1056 if (s->nb_streams <= 0)
1058 for (i = 0; i < s->nb_streams; i++) {
1060 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1061 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1064 if (first_audio_index < 0 &&
1065 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1066 first_audio_index = i;
1068 return first_audio_index >= 0 ? first_audio_index : 0;
1071 /** Flush the frame reader. */
1072 void ff_read_frame_flush(AVFormatContext *s)
1077 flush_packet_queue(s);
1079 /* Reset read state for each stream. */
1080 for (i = 0; i < s->nb_streams; i++) {
1084 av_parser_close(st->parser);
1087 st->last_IP_pts = AV_NOPTS_VALUE;
1088 /* We set the current DTS to an unspecified origin. */
1089 st->cur_dts = AV_NOPTS_VALUE;
1091 st->probe_packets = MAX_PROBE_PACKETS;
1093 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1094 st->pts_buffer[j] = AV_NOPTS_VALUE;
1098 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1102 for (i = 0; i < s->nb_streams; i++) {
1103 AVStream *st = s->streams[i];
1106 av_rescale(timestamp,
1107 st->time_base.den * (int64_t) ref_st->time_base.num,
1108 st->time_base.num * (int64_t) ref_st->time_base.den);
1112 void ff_reduce_index(AVFormatContext *s, int stream_index)
1114 AVStream *st = s->streams[stream_index];
1115 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1117 if ((unsigned) st->nb_index_entries >= max_entries) {
1119 for (i = 0; 2 * i < st->nb_index_entries; i++)
1120 st->index_entries[i] = st->index_entries[2 * i];
1121 st->nb_index_entries = i;
1125 int ff_add_index_entry(AVIndexEntry **index_entries,
1126 int *nb_index_entries,
1127 unsigned int *index_entries_allocated_size,
1128 int64_t pos, int64_t timestamp,
1129 int size, int distance, int flags)
1131 AVIndexEntry *entries, *ie;
1134 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1137 entries = av_fast_realloc(*index_entries,
1138 index_entries_allocated_size,
1139 (*nb_index_entries + 1) *
1140 sizeof(AVIndexEntry));
1144 *index_entries = entries;
1146 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1147 timestamp, AVSEEK_FLAG_ANY);
1150 index = (*nb_index_entries)++;
1151 ie = &entries[index];
1152 assert(index == 0 || ie[-1].timestamp < timestamp);
1154 ie = &entries[index];
1155 if (ie->timestamp != timestamp) {
1156 if (ie->timestamp <= timestamp)
1158 memmove(entries + index + 1, entries + index,
1159 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1160 (*nb_index_entries)++;
1161 } else if (ie->pos == pos && distance < ie->min_distance)
1162 // do not reduce the distance
1163 distance = ie->min_distance;
1167 ie->timestamp = timestamp;
1168 ie->min_distance = distance;
1175 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1176 int size, int distance, int flags)
1178 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1179 &st->index_entries_allocated_size, pos,
1180 timestamp, size, distance, flags);
1183 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1184 int64_t wanted_timestamp, int flags)
1192 // Optimize appending index entries at the end.
1193 if (b && entries[b - 1].timestamp < wanted_timestamp)
1198 timestamp = entries[m].timestamp;
1199 if (timestamp >= wanted_timestamp)
1201 if (timestamp <= wanted_timestamp)
1204 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1206 if (!(flags & AVSEEK_FLAG_ANY))
1207 while (m >= 0 && m < nb_entries &&
1208 !(entries[m].flags & AVINDEX_KEYFRAME))
1209 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1211 if (m == nb_entries)
1216 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1218 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1219 wanted_timestamp, flags);
1222 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1223 int64_t target_ts, int flags)
1225 AVInputFormat *avif = s->iformat;
1226 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1227 int64_t ts_min, ts_max, ts;
1232 if (stream_index < 0)
1235 av_log(s, AV_LOG_TRACE, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1238 ts_min = AV_NOPTS_VALUE;
1239 pos_limit = -1; // GCC falsely says it may be uninitialized.
1241 st = s->streams[stream_index];
1242 if (st->index_entries) {
1245 /* FIXME: Whole function must be checked for non-keyframe entries in
1246 * index case, especially read_timestamp(). */
1247 index = av_index_search_timestamp(st, target_ts,
1248 flags | AVSEEK_FLAG_BACKWARD);
1249 index = FFMAX(index, 0);
1250 e = &st->index_entries[index];
1252 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1254 ts_min = e->timestamp;
1255 av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1261 index = av_index_search_timestamp(st, target_ts,
1262 flags & ~AVSEEK_FLAG_BACKWARD);
1263 assert(index < st->nb_index_entries);
1265 e = &st->index_entries[index];
1266 assert(e->timestamp >= target_ts);
1268 ts_max = e->timestamp;
1269 pos_limit = pos_max - e->min_distance;
1270 av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1271 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1275 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1276 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1281 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1284 ff_update_cur_dts(s, st, ts);
1289 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1290 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1291 int64_t ts_min, int64_t ts_max,
1292 int flags, int64_t *ts_ret,
1293 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1294 int64_t *, int64_t))
1297 int64_t start_pos, filesize;
1300 av_log(s, AV_LOG_TRACE, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1302 if (ts_min == AV_NOPTS_VALUE) {
1303 pos_min = s->internal->data_offset;
1304 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1305 if (ts_min == AV_NOPTS_VALUE)
1309 if (ts_max == AV_NOPTS_VALUE) {
1311 filesize = avio_size(s->pb);
1312 pos_max = filesize - 1;
1315 ts_max = read_timestamp(s, stream_index, &pos_max,
1318 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1319 if (ts_max == AV_NOPTS_VALUE)
1323 int64_t tmp_pos = pos_max + 1;
1324 int64_t tmp_ts = read_timestamp(s, stream_index,
1325 &tmp_pos, INT64_MAX);
1326 if (tmp_ts == AV_NOPTS_VALUE)
1330 if (tmp_pos >= filesize)
1333 pos_limit = pos_max;
1336 if (ts_min > ts_max)
1338 else if (ts_min == ts_max)
1339 pos_limit = pos_min;
1342 while (pos_min < pos_limit) {
1343 av_log(s, AV_LOG_TRACE, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1344 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1345 assert(pos_limit <= pos_max);
1347 if (no_change == 0) {
1348 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1349 // interpolate position (better than dichotomy)
1350 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1352 pos_min - approximate_keyframe_distance;
1353 } else if (no_change == 1) {
1354 // bisection if interpolation did not change min / max pos last time
1355 pos = (pos_min + pos_limit) >> 1;
1357 /* linear search if bisection failed, can only happen if there
1358 * are very few or no keyframes between min/max */
1363 else if (pos > pos_limit)
1367 // May pass pos_limit instead of -1.
1368 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1373 av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1374 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1375 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1376 pos_limit, start_pos, no_change);
1377 if (ts == AV_NOPTS_VALUE) {
1378 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1381 assert(ts != AV_NOPTS_VALUE);
1382 if (target_ts <= ts) {
1383 pos_limit = start_pos - 1;
1387 if (target_ts >= ts) {
1393 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1394 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1396 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1398 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1399 av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1400 pos, ts_min, target_ts, ts_max);
1405 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1406 int64_t pos, int flags)
1408 int64_t pos_min, pos_max;
1410 pos_min = s->internal->data_offset;
1411 pos_max = avio_size(s->pb) - 1;
1415 else if (pos > pos_max)
1418 avio_seek(s->pb, pos, SEEK_SET);
1423 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1424 int64_t timestamp, int flags)
1431 st = s->streams[stream_index];
1433 index = av_index_search_timestamp(st, timestamp, flags);
1435 if (index < 0 && st->nb_index_entries &&
1436 timestamp < st->index_entries[0].timestamp)
1439 if (index < 0 || index == st->nb_index_entries - 1) {
1442 if (st->nb_index_entries) {
1443 assert(st->index_entries);
1444 ie = &st->index_entries[st->nb_index_entries - 1];
1445 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1447 ff_update_cur_dts(s, st, ie->timestamp);
1449 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1455 read_status = av_read_frame(s, &pkt);
1456 } while (read_status == AVERROR(EAGAIN));
1457 if (read_status < 0)
1459 av_packet_unref(&pkt);
1460 if (stream_index == pkt.stream_index)
1461 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1464 index = av_index_search_timestamp(st, timestamp, flags);
1469 ff_read_frame_flush(s);
1470 if (s->iformat->read_seek)
1471 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1473 ie = &st->index_entries[index];
1474 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1476 ff_update_cur_dts(s, st, ie->timestamp);
1481 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1482 int64_t timestamp, int flags)
1487 if (flags & AVSEEK_FLAG_BYTE) {
1488 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1490 ff_read_frame_flush(s);
1491 return seek_frame_byte(s, stream_index, timestamp, flags);
1494 if (stream_index < 0) {
1495 stream_index = av_find_default_stream_index(s);
1496 if (stream_index < 0)
1499 st = s->streams[stream_index];
1500 /* timestamp for default must be expressed in AV_TIME_BASE units */
1501 timestamp = av_rescale(timestamp, st->time_base.den,
1502 AV_TIME_BASE * (int64_t) st->time_base.num);
1505 /* first, we try the format specific seek */
1506 if (s->iformat->read_seek) {
1507 ff_read_frame_flush(s);
1508 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1514 if (s->iformat->read_timestamp &&
1515 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1516 ff_read_frame_flush(s);
1517 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1518 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1519 ff_read_frame_flush(s);
1520 return seek_frame_generic(s, stream_index, timestamp, flags);
1525 int av_seek_frame(AVFormatContext *s, int stream_index,
1526 int64_t timestamp, int flags)
1528 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1531 ret = queue_attached_pictures(s);
1536 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1537 int64_t ts, int64_t max_ts, int flags)
1539 if (min_ts > ts || max_ts < ts)
1542 if (s->iformat->read_seek2) {
1544 ff_read_frame_flush(s);
1545 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1549 ret = queue_attached_pictures(s);
1553 if (s->iformat->read_timestamp) {
1554 // try to seek via read_timestamp()
1557 // Fall back on old API if new is not implemented but old is.
1558 // Note the old API has somewhat different semantics.
1559 if (s->iformat->read_seek || 1)
1560 return av_seek_frame(s, stream_index, ts,
1561 flags | ((uint64_t) ts - min_ts >
1562 (uint64_t) max_ts - ts
1563 ? AVSEEK_FLAG_BACKWARD : 0));
1565 // try some generic seek like seek_frame_generic() but with new ts semantics
1568 /*******************************************************/
1571 * Return TRUE if the stream has accurate duration in any stream.
1573 * @return TRUE if the stream has accurate duration for at least one component.
1575 static int has_duration(AVFormatContext *ic)
1580 for (i = 0; i < ic->nb_streams; i++) {
1581 st = ic->streams[i];
1582 if (st->duration != AV_NOPTS_VALUE)
1585 if (ic->duration != AV_NOPTS_VALUE)
1591 * Estimate the stream timings from the one of each components.
1593 * Also computes the global bitrate if possible.
1595 static void update_stream_timings(AVFormatContext *ic)
1597 int64_t start_time, start_time1, end_time, end_time1;
1598 int64_t duration, duration1, filesize;
1602 start_time = INT64_MAX;
1603 end_time = INT64_MIN;
1604 duration = INT64_MIN;
1605 for (i = 0; i < ic->nb_streams; i++) {
1606 st = ic->streams[i];
1607 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1608 start_time1 = av_rescale_q(st->start_time, st->time_base,
1610 start_time = FFMIN(start_time, start_time1);
1611 if (st->duration != AV_NOPTS_VALUE) {
1612 end_time1 = start_time1 +
1613 av_rescale_q(st->duration, st->time_base,
1615 end_time = FFMAX(end_time, end_time1);
1618 if (st->duration != AV_NOPTS_VALUE) {
1619 duration1 = av_rescale_q(st->duration, st->time_base,
1621 duration = FFMAX(duration, duration1);
1624 if (start_time != INT64_MAX) {
1625 ic->start_time = start_time;
1626 if (end_time != INT64_MIN)
1627 duration = FFMAX(duration, end_time - start_time);
1629 if (duration != INT64_MIN) {
1630 ic->duration = duration;
1631 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1632 /* compute the bitrate */
1633 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1634 (double) ic->duration;
1638 static void fill_all_stream_timings(AVFormatContext *ic)
1643 update_stream_timings(ic);
1644 for (i = 0; i < ic->nb_streams; i++) {
1645 st = ic->streams[i];
1646 if (st->start_time == AV_NOPTS_VALUE) {
1647 if (ic->start_time != AV_NOPTS_VALUE)
1648 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1650 if (ic->duration != AV_NOPTS_VALUE)
1651 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1657 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1659 int64_t filesize, duration;
1663 /* if bit_rate is already set, we believe it */
1664 if (ic->bit_rate <= 0) {
1666 for (i = 0; i < ic->nb_streams; i++) {
1667 st = ic->streams[i];
1668 if (st->codec->bit_rate > 0) {
1669 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1673 bit_rate += st->codec->bit_rate;
1676 ic->bit_rate = bit_rate;
1679 /* if duration is already set, we believe it */
1680 if (ic->duration == AV_NOPTS_VALUE &&
1681 ic->bit_rate != 0) {
1682 filesize = ic->pb ? avio_size(ic->pb) : 0;
1684 for (i = 0; i < ic->nb_streams; i++) {
1685 st = ic->streams[i];
1686 duration = av_rescale(8 * filesize, st->time_base.den,
1688 (int64_t) st->time_base.num);
1689 if (st->duration == AV_NOPTS_VALUE)
1690 st->duration = duration;
1696 #define DURATION_MAX_READ_SIZE 250000
1697 #define DURATION_MAX_RETRY 3
1699 /* only usable for MPEG-PS streams */
1700 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1702 AVPacket pkt1, *pkt = &pkt1;
1704 int read_size, i, ret;
1706 int64_t filesize, offset, duration;
1709 /* flush packet queue */
1710 flush_packet_queue(ic);
1712 for (i = 0; i < ic->nb_streams; i++) {
1713 st = ic->streams[i];
1714 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1715 av_log(st->codec, AV_LOG_WARNING,
1716 "start time is not set in estimate_timings_from_pts\n");
1719 av_parser_close(st->parser);
1724 /* estimate the end time (duration) */
1725 /* XXX: may need to support wrapping */
1726 filesize = ic->pb ? avio_size(ic->pb) : 0;
1727 end_time = AV_NOPTS_VALUE;
1729 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1733 avio_seek(ic->pb, offset, SEEK_SET);
1736 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1740 ret = ff_read_packet(ic, pkt);
1741 } while (ret == AVERROR(EAGAIN));
1744 read_size += pkt->size;
1745 st = ic->streams[pkt->stream_index];
1746 if (pkt->pts != AV_NOPTS_VALUE &&
1747 (st->start_time != AV_NOPTS_VALUE ||
1748 st->first_dts != AV_NOPTS_VALUE)) {
1749 duration = end_time = pkt->pts;
1750 if (st->start_time != AV_NOPTS_VALUE)
1751 duration -= st->start_time;
1753 duration -= st->first_dts;
1755 duration += 1LL << st->pts_wrap_bits;
1757 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1758 st->duration = duration;
1761 av_packet_unref(pkt);
1763 } while (end_time == AV_NOPTS_VALUE &&
1764 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1765 ++retry <= DURATION_MAX_RETRY);
1767 fill_all_stream_timings(ic);
1769 avio_seek(ic->pb, old_offset, SEEK_SET);
1770 for (i = 0; i < ic->nb_streams; i++) {
1771 st = ic->streams[i];
1772 st->cur_dts = st->first_dts;
1773 st->last_IP_pts = AV_NOPTS_VALUE;
1777 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1781 /* get the file size, if possible */
1782 if (ic->iformat->flags & AVFMT_NOFILE) {
1785 file_size = avio_size(ic->pb);
1786 file_size = FFMAX(0, file_size);
1789 if ((!strcmp(ic->iformat->name, "mpeg") ||
1790 !strcmp(ic->iformat->name, "mpegts")) &&
1791 file_size && ic->pb->seekable) {
1792 /* get accurate estimate from the PTSes */
1793 estimate_timings_from_pts(ic, old_offset);
1794 } else if (has_duration(ic)) {
1795 /* at least one component has timings - we use them for all
1797 fill_all_stream_timings(ic);
1799 av_log(ic, AV_LOG_WARNING,
1800 "Estimating duration from bitrate, this may be inaccurate\n");
1801 /* less precise: use bitrate info */
1802 estimate_timings_from_bit_rate(ic);
1804 update_stream_timings(ic);
1808 AVStream av_unused *st;
1809 for (i = 0; i < ic->nb_streams; i++) {
1810 st = ic->streams[i];
1811 av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
1812 (double) st->start_time / AV_TIME_BASE,
1813 (double) st->duration / AV_TIME_BASE);
1815 av_log(ic, AV_LOG_TRACE,
1816 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1817 (double) ic->start_time / AV_TIME_BASE,
1818 (double) ic->duration / AV_TIME_BASE,
1819 ic->bit_rate / 1000);
1823 static int has_codec_parameters(AVStream *st)
1825 AVCodecContext *avctx = st->codec;
1828 switch (avctx->codec_type) {
1829 case AVMEDIA_TYPE_AUDIO:
1830 val = avctx->sample_rate && avctx->channels;
1831 if (st->info->found_decoder >= 0 &&
1832 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1835 case AVMEDIA_TYPE_VIDEO:
1837 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1844 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1847 static int has_decode_delay_been_guessed(AVStream *st)
1849 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1850 st->info->nb_decoded_frames >= 6;
1853 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1854 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1855 AVDictionary **options)
1857 const AVCodec *codec;
1858 int got_picture = 1, ret = 0;
1859 AVFrame *frame = av_frame_alloc();
1860 AVPacket pkt = *avpkt;
1863 return AVERROR(ENOMEM);
1865 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
1866 AVDictionary *thread_opt = NULL;
1868 codec = st->codec->codec ? st->codec->codec
1869 : avcodec_find_decoder(st->codec->codec_id);
1872 st->info->found_decoder = -1;
1877 /* Force thread count to 1 since the H.264 decoder will not extract
1878 * SPS and PPS to extradata during multi-threaded decoding. */
1879 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1880 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
1882 av_dict_free(&thread_opt);
1884 st->info->found_decoder = -1;
1887 st->info->found_decoder = 1;
1888 } else if (!st->info->found_decoder)
1889 st->info->found_decoder = 1;
1891 if (st->info->found_decoder < 0) {
1896 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1898 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1899 (!st->codec_info_nb_frames &&
1900 (st->codec->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
1902 switch (st->codec->codec_type) {
1903 case AVMEDIA_TYPE_VIDEO:
1904 ret = avcodec_decode_video2(st->codec, frame,
1905 &got_picture, &pkt);
1907 case AVMEDIA_TYPE_AUDIO:
1908 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
1915 st->info->nb_decoded_frames++;
1923 av_frame_free(&frame);
1927 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1929 while (tags->id != AV_CODEC_ID_NONE) {
1937 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1940 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1941 if (tag == tags[i].tag)
1943 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1944 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1946 return AV_CODEC_ID_NONE;
1949 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
1954 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
1956 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
1958 return AV_CODEC_ID_NONE;
1962 if (sflags & (1 << (bps - 1))) {
1965 return AV_CODEC_ID_PCM_S8;
1967 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
1969 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
1971 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
1973 return AV_CODEC_ID_NONE;
1978 return AV_CODEC_ID_PCM_U8;
1980 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
1982 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
1984 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
1986 return AV_CODEC_ID_NONE;
1992 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
1995 for (i = 0; tags && tags[i]; i++) {
1996 int tag = ff_codec_get_tag(tags[i], id);
2003 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2006 for (i = 0; tags && tags[i]; i++) {
2007 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2008 if (id != AV_CODEC_ID_NONE)
2011 return AV_CODEC_ID_NONE;
2014 static void compute_chapters_end(AVFormatContext *s)
2017 int64_t max_time = s->duration +
2018 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2020 for (i = 0; i < s->nb_chapters; i++)
2021 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2022 AVChapter *ch = s->chapters[i];
2023 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2027 for (j = 0; j < s->nb_chapters; j++) {
2028 AVChapter *ch1 = s->chapters[j];
2029 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2031 if (j != i && next_start > ch->start && next_start < end)
2034 ch->end = (end == INT64_MAX) ? ch->start : end;
2038 static int get_std_framerate(int i)
2041 return (i + 1) * 1001;
2043 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2046 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2048 int i, count, ret, read_size, j;
2050 AVPacket pkt1, *pkt;
2051 int64_t old_offset = avio_tell(ic->pb);
2052 // new streams might appear, no options for those
2053 int orig_nb_streams = ic->nb_streams;
2055 for (i = 0; i < ic->nb_streams; i++) {
2056 const AVCodec *codec;
2057 AVDictionary *thread_opt = NULL;
2058 st = ic->streams[i];
2060 // only for the split stuff
2061 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2062 st->parser = av_parser_init(st->codec->codec_id);
2063 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2064 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2066 codec = st->codec->codec ? st->codec->codec
2067 : avcodec_find_decoder(st->codec->codec_id);
2069 /* Force thread count to 1 since the H.264 decoder will not extract
2070 * SPS and PPS to extradata during multi-threaded decoding. */
2071 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2073 /* Ensure that subtitle_header is properly set. */
2074 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2075 && codec && !st->codec->codec)
2076 avcodec_open2(st->codec, codec,
2077 options ? &options[i] : &thread_opt);
2079 // Try to just open decoders, in case this is enough to get parameters.
2080 if (!has_codec_parameters(st)) {
2081 if (codec && !st->codec->codec)
2082 avcodec_open2(st->codec, codec,
2083 options ? &options[i] : &thread_opt);
2086 av_dict_free(&thread_opt);
2089 for (i = 0; i < ic->nb_streams; i++) {
2090 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2091 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2097 if (ff_check_interrupt(&ic->interrupt_callback)) {
2099 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2103 /* check if one codec still needs to be handled */
2104 for (i = 0; i < ic->nb_streams; i++) {
2105 int fps_analyze_framecount = 20;
2107 st = ic->streams[i];
2108 if (!has_codec_parameters(st))
2110 /* If the timebase is coarse (like the usual millisecond precision
2111 * of mkv), we need to analyze more frames to reliably arrive at
2112 * the correct fps. */
2113 if (av_q2d(st->time_base) > 0.0005)
2114 fps_analyze_framecount *= 2;
2115 if (ic->fps_probe_size >= 0)
2116 fps_analyze_framecount = ic->fps_probe_size;
2117 /* variable fps and no guess at the real fps */
2118 if (!st->avg_frame_rate.num &&
2119 st->codec_info_nb_frames < fps_analyze_framecount &&
2120 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2122 if (st->parser && st->parser->parser->split &&
2123 !st->codec->extradata)
2125 if (st->first_dts == AV_NOPTS_VALUE &&
2126 st->codec_info_nb_frames < ic->max_ts_probe &&
2127 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2128 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2131 if (i == ic->nb_streams) {
2132 /* NOTE: If the format has no header, then we need to read some
2133 * packets to get most of the streams, so we cannot stop here. */
2134 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2135 /* If we found the info for all the codecs, we can stop. */
2137 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2141 /* We did not get all the codec info, but we read too much data. */
2142 if (read_size >= ic->probesize) {
2144 av_log(ic, AV_LOG_DEBUG,
2145 "Probe buffer size limit %d reached\n", ic->probesize);
2149 /* NOTE: A new stream can be added there if no header in file
2150 * (AVFMTCTX_NOHEADER). */
2151 ret = read_frame_internal(ic, &pkt1);
2152 if (ret == AVERROR(EAGAIN))
2157 AVPacket empty_pkt = { 0 };
2159 av_init_packet(&empty_pkt);
2161 /* We could not have all the codec parameters before EOF. */
2163 for (i = 0; i < ic->nb_streams; i++) {
2164 st = ic->streams[i];
2166 /* flush the decoders */
2167 if (st->info->found_decoder == 1) {
2169 err = try_decode_frame(st, &empty_pkt,
2170 (options && i < orig_nb_streams)
2171 ? &options[i] : NULL);
2172 } while (err > 0 && !has_codec_parameters(st));
2176 av_log(ic, AV_LOG_WARNING,
2177 "decoding for stream %d failed\n", st->index);
2178 } else if (!has_codec_parameters(st)) {
2180 avcodec_string(buf, sizeof(buf), st->codec, 0);
2181 av_log(ic, AV_LOG_WARNING,
2182 "Could not find codec parameters (%s)\n", buf);
2192 if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) {
2193 ret = add_to_pktbuf(&ic->internal->packet_buffer, pkt,
2194 &ic->internal->packet_buffer_end, 0);
2196 goto find_stream_info_err;
2199 read_size += pkt->size;
2201 st = ic->streams[pkt->stream_index];
2202 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2203 /* check for non-increasing dts */
2204 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2205 st->info->fps_last_dts >= pkt->dts) {
2206 av_log(ic, AV_LOG_WARNING,
2207 "Non-increasing DTS in stream %d: packet %d with DTS "
2208 "%"PRId64", packet %d with DTS %"PRId64"\n",
2209 st->index, st->info->fps_last_dts_idx,
2210 st->info->fps_last_dts, st->codec_info_nb_frames,
2212 st->info->fps_first_dts =
2213 st->info->fps_last_dts = AV_NOPTS_VALUE;
2215 /* Check for a discontinuity in dts. If the difference in dts
2216 * is more than 1000 times the average packet duration in the
2217 * sequence, we treat it as a discontinuity. */
2218 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2219 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2220 (pkt->dts - st->info->fps_last_dts) / 1000 >
2221 (st->info->fps_last_dts - st->info->fps_first_dts) /
2222 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2223 av_log(ic, AV_LOG_WARNING,
2224 "DTS discontinuity in stream %d: packet %d with DTS "
2225 "%"PRId64", packet %d with DTS %"PRId64"\n",
2226 st->index, st->info->fps_last_dts_idx,
2227 st->info->fps_last_dts, st->codec_info_nb_frames,
2229 st->info->fps_first_dts =
2230 st->info->fps_last_dts = AV_NOPTS_VALUE;
2233 /* update stored dts values */
2234 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2235 st->info->fps_first_dts = pkt->dts;
2236 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2238 st->info->fps_last_dts = pkt->dts;
2239 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2241 /* check max_analyze_duration */
2242 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2243 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2244 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2245 ic->max_analyze_duration);
2246 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2247 av_packet_unref(pkt);
2251 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2252 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2253 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2254 st->codec->extradata_size = i;
2255 st->codec->extradata = av_mallocz(st->codec->extradata_size +
2256 AV_INPUT_BUFFER_PADDING_SIZE);
2257 if (!st->codec->extradata)
2258 return AVERROR(ENOMEM);
2259 memcpy(st->codec->extradata, pkt->data,
2260 st->codec->extradata_size);
2264 /* If still no information, we try to open the codec and to
2265 * decompress the frame. We try to avoid that in most cases as
2266 * it takes longer and uses more memory. For MPEG-4, we need to
2267 * decompress for QuickTime.
2269 * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2270 * least one frame of codec data, this makes sure the codec initializes
2271 * the channel configuration and does not only trust the values from
2273 try_decode_frame(st, pkt,
2274 (options && i < orig_nb_streams) ? &options[i] : NULL);
2276 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2277 av_packet_unref(pkt);
2279 st->codec_info_nb_frames++;
2283 // close codecs which were opened in try_decode_frame()
2284 for (i = 0; i < ic->nb_streams; i++) {
2285 st = ic->streams[i];
2286 avcodec_close(st->codec);
2288 for (i = 0; i < ic->nb_streams; i++) {
2289 st = ic->streams[i];
2290 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2291 /* estimate average framerate if not set by demuxer */
2292 if (!st->avg_frame_rate.num &&
2293 st->info->fps_last_dts != st->info->fps_first_dts) {
2294 int64_t delta_dts = st->info->fps_last_dts -
2295 st->info->fps_first_dts;
2296 int delta_packets = st->info->fps_last_dts_idx -
2297 st->info->fps_first_dts_idx;
2299 double best_error = 0.01;
2301 if (delta_dts >= INT64_MAX / st->time_base.num ||
2302 delta_packets >= INT64_MAX / st->time_base.den ||
2305 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2306 delta_packets * (int64_t) st->time_base.den,
2307 delta_dts * (int64_t) st->time_base.num, 60000);
2309 /* Round guessed framerate to a "standard" framerate if it's
2310 * within 1% of the original estimate. */
2311 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2312 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2313 double error = fabs(av_q2d(st->avg_frame_rate) /
2314 av_q2d(std_fps) - 1);
2316 if (error < best_error) {
2318 best_fps = std_fps.num;
2322 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2323 best_fps, 12 * 1001, INT_MAX);
2325 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2326 if (!st->codec->bits_per_coded_sample)
2327 st->codec->bits_per_coded_sample =
2328 av_get_bits_per_sample(st->codec->codec_id);
2329 // set stream disposition based on audio service type
2330 switch (st->codec->audio_service_type) {
2331 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2332 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2334 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2335 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2337 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2338 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2340 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2341 st->disposition = AV_DISPOSITION_COMMENT;
2343 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2344 st->disposition = AV_DISPOSITION_KARAOKE;
2350 estimate_timings(ic, old_offset);
2352 compute_chapters_end(ic);
2354 find_stream_info_err:
2355 for (i = 0; i < ic->nb_streams; i++) {
2356 ic->streams[i]->codec->thread_count = 0;
2357 av_freep(&ic->streams[i]->info);
2362 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2366 for (i = 0; i < ic->nb_programs; i++)
2367 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2368 if (ic->programs[i]->stream_index[j] == s)
2369 return ic->programs[i];
2373 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2374 int wanted_stream_nb, int related_stream,
2375 AVCodec **decoder_ret, int flags)
2377 int i, nb_streams = ic->nb_streams;
2378 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2379 unsigned *program = NULL;
2380 AVCodec *decoder = NULL, *best_decoder = NULL;
2382 if (related_stream >= 0 && wanted_stream_nb < 0) {
2383 AVProgram *p = find_program_from_stream(ic, related_stream);
2385 program = p->stream_index;
2386 nb_streams = p->nb_stream_indexes;
2389 for (i = 0; i < nb_streams; i++) {
2390 int real_stream_index = program ? program[i] : i;
2391 AVStream *st = ic->streams[real_stream_index];
2392 AVCodecContext *avctx = st->codec;
2393 if (avctx->codec_type != type)
2395 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2397 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2398 AV_DISPOSITION_VISUAL_IMPAIRED))
2401 decoder = avcodec_find_decoder(st->codec->codec_id);
2404 ret = AVERROR_DECODER_NOT_FOUND;
2408 if (best_count >= st->codec_info_nb_frames)
2410 best_count = st->codec_info_nb_frames;
2411 ret = real_stream_index;
2412 best_decoder = decoder;
2413 if (program && i == nb_streams - 1 && ret < 0) {
2415 nb_streams = ic->nb_streams;
2416 /* no related stream found, try again with everything */
2421 *decoder_ret = best_decoder;
2425 /*******************************************************/
2427 int av_read_play(AVFormatContext *s)
2429 if (s->iformat->read_play)
2430 return s->iformat->read_play(s);
2432 return avio_pause(s->pb, 0);
2433 return AVERROR(ENOSYS);
2436 int av_read_pause(AVFormatContext *s)
2438 if (s->iformat->read_pause)
2439 return s->iformat->read_pause(s);
2441 return avio_pause(s->pb, 1);
2442 return AVERROR(ENOSYS);
2445 static void free_stream(AVStream **pst)
2447 AVStream *st = *pst;
2453 for (i = 0; i < st->nb_side_data; i++)
2454 av_freep(&st->side_data[i].data);
2455 av_freep(&st->side_data);
2458 av_parser_close(st->parser);
2460 if (st->attached_pic.data)
2461 av_packet_unref(&st->attached_pic);
2463 av_freep(&st->internal);
2465 av_dict_free(&st->metadata);
2466 av_freep(&st->probe_data.buf);
2467 av_free(st->index_entries);
2468 av_free(st->codec->extradata);
2469 av_free(st->codec->subtitle_header);
2471 av_free(st->priv_data);
2477 void avformat_free_context(AVFormatContext *s)
2485 if (s->iformat && s->iformat->priv_class && s->priv_data)
2486 av_opt_free(s->priv_data);
2488 for (i = 0; i < s->nb_streams; i++)
2489 free_stream(&s->streams[i]);
2491 for (i = s->nb_programs - 1; i >= 0; i--) {
2492 av_dict_free(&s->programs[i]->metadata);
2493 av_freep(&s->programs[i]->stream_index);
2494 av_freep(&s->programs[i]);
2496 av_freep(&s->programs);
2497 av_freep(&s->priv_data);
2498 while (s->nb_chapters--) {
2499 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2500 av_free(s->chapters[s->nb_chapters]);
2502 av_freep(&s->chapters);
2503 av_dict_free(&s->metadata);
2504 av_freep(&s->streams);
2505 av_freep(&s->internal);
2509 void avformat_close_input(AVFormatContext **ps)
2511 AVFormatContext *s = *ps;
2512 AVIOContext *pb = s->pb;
2514 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2515 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2518 flush_packet_queue(s);
2521 if (s->iformat->read_close)
2522 s->iformat->read_close(s);
2524 avformat_free_context(s);
2531 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2536 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2537 sizeof(*s->streams)) < 0) {
2542 st = av_mallocz(sizeof(AVStream));
2545 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2550 st->codec = avcodec_alloc_context3(c);
2557 st->internal = av_mallocz(sizeof(*st->internal));
2562 /* no default bitrate if decoding */
2563 st->codec->bit_rate = 0;
2565 /* default pts setting is MPEG-like */
2566 avpriv_set_pts_info(st, 33, 1, 90000);
2567 /* we set the current DTS to 0 so that formats without any timestamps
2568 * but durations get some timestamps, formats with some unknown
2569 * timestamps have their first few packets buffered and the
2570 * timestamps corrected before they are returned to the user */
2573 st->cur_dts = AV_NOPTS_VALUE;
2576 st->index = s->nb_streams;
2577 st->start_time = AV_NOPTS_VALUE;
2578 st->duration = AV_NOPTS_VALUE;
2579 st->first_dts = AV_NOPTS_VALUE;
2580 st->probe_packets = MAX_PROBE_PACKETS;
2582 st->last_IP_pts = AV_NOPTS_VALUE;
2583 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2584 st->pts_buffer[i] = AV_NOPTS_VALUE;
2586 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2588 st->info->fps_first_dts = AV_NOPTS_VALUE;
2589 st->info->fps_last_dts = AV_NOPTS_VALUE;
2591 s->streams[s->nb_streams++] = st;
2598 AVProgram *av_new_program(AVFormatContext *ac, int id)
2600 AVProgram *program = NULL;
2603 av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
2605 for (i = 0; i < ac->nb_programs; i++)
2606 if (ac->programs[i]->id == id)
2607 program = ac->programs[i];
2610 program = av_mallocz(sizeof(AVProgram));
2613 dynarray_add(&ac->programs, &ac->nb_programs, program);
2614 program->discard = AVDISCARD_NONE;
2621 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2622 int64_t start, int64_t end, const char *title)
2624 AVChapter *chapter = NULL;
2627 for (i = 0; i < s->nb_chapters; i++)
2628 if (s->chapters[i]->id == id)
2629 chapter = s->chapters[i];
2632 chapter = av_mallocz(sizeof(AVChapter));
2635 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2637 av_dict_set(&chapter->metadata, "title", title, 0);
2639 chapter->time_base = time_base;
2640 chapter->start = start;
2646 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2649 AVProgram *program = NULL;
2651 if (idx >= ac->nb_streams) {
2652 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2656 for (i = 0; i < ac->nb_programs; i++) {
2657 if (ac->programs[i]->id != progid)
2659 program = ac->programs[i];
2660 for (j = 0; j < program->nb_stream_indexes; j++)
2661 if (program->stream_index[j] == idx)
2664 if (av_reallocp_array(&program->stream_index,
2665 program->nb_stream_indexes + 1,
2666 sizeof(*program->stream_index)) < 0) {
2667 program->nb_stream_indexes = 0;
2670 program->stream_index[program->nb_stream_indexes++] = idx;
2675 uint64_t ff_ntp_time(void)
2677 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2680 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2683 char *q, buf1[20], c;
2684 int nd, len, percentd_found;
2696 while (av_isdigit(*p))
2697 nd = nd * 10 + *p++ - '0';
2699 } while (av_isdigit(c));
2708 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2710 if ((q - buf + len) > buf_size - 1)
2712 memcpy(q, buf1, len);
2720 if ((q - buf) < buf_size - 1)
2724 if (!percentd_found)
2733 void av_url_split(char *proto, int proto_size,
2734 char *authorization, int authorization_size,
2735 char *hostname, int hostname_size,
2736 int *port_ptr, char *path, int path_size, const char *url)
2738 const char *p, *ls, *at, *col, *brk;
2744 if (authorization_size > 0)
2745 authorization[0] = 0;
2746 if (hostname_size > 0)
2751 /* parse protocol */
2752 if ((p = strchr(url, ':'))) {
2753 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2760 /* no protocol means plain filename */
2761 av_strlcpy(path, url, path_size);
2765 /* separate path from hostname */
2766 ls = strchr(p, '/');
2768 ls = strchr(p, '?');
2770 av_strlcpy(path, ls, path_size);
2772 ls = &p[strlen(p)]; // XXX
2774 /* the rest is hostname, use that to parse auth/port */
2776 /* authorization (user[:pass]@hostname) */
2777 if ((at = strchr(p, '@')) && at < ls) {
2778 av_strlcpy(authorization, p,
2779 FFMIN(authorization_size, at + 1 - p));
2780 p = at + 1; /* skip '@' */
2783 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2785 av_strlcpy(hostname, p + 1,
2786 FFMIN(hostname_size, brk - p));
2787 if (brk[1] == ':' && port_ptr)
2788 *port_ptr = atoi(brk + 2);
2789 } else if ((col = strchr(p, ':')) && col < ls) {
2790 av_strlcpy(hostname, p,
2791 FFMIN(col + 1 - p, hostname_size));
2793 *port_ptr = atoi(col + 1);
2795 av_strlcpy(hostname, p,
2796 FFMIN(ls + 1 - p, hostname_size));
2800 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2803 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2806 'C', 'D', 'E', 'F' };
2807 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2810 'c', 'd', 'e', 'f' };
2811 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2813 for (i = 0; i < s; i++) {
2814 buff[i * 2] = hex_table[src[i] >> 4];
2815 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2821 int ff_hex_to_data(uint8_t *data, const char *p)
2828 p += strspn(p, SPACE_CHARS);
2831 c = av_toupper((unsigned char) *p++);
2832 if (c >= '0' && c <= '9')
2834 else if (c >= 'A' && c <= 'F')
2849 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2850 unsigned int pts_num, unsigned int pts_den)
2853 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2854 if (new_tb.num != pts_num)
2855 av_log(NULL, AV_LOG_DEBUG,
2856 "st:%d removing common factor %d from timebase\n",
2857 s->index, pts_num / new_tb.num);
2859 av_log(NULL, AV_LOG_WARNING,
2860 "st:%d has too large timebase, reducing\n", s->index);
2862 if (new_tb.num <= 0 || new_tb.den <= 0) {
2863 av_log(NULL, AV_LOG_ERROR,
2864 "Ignoring attempt to set invalid timebase for st:%d\n",
2868 s->time_base = new_tb;
2869 s->pts_wrap_bits = pts_wrap_bits;
2872 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
2875 const char *ptr = str;
2877 /* Parse key=value pairs. */
2880 char *dest = NULL, *dest_end;
2881 int key_len, dest_len = 0;
2883 /* Skip whitespace and potential commas. */
2884 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
2891 if (!(ptr = strchr(key, '=')))
2894 key_len = ptr - key;
2896 callback_get_buf(context, key, key_len, &dest, &dest_len);
2897 dest_end = dest + dest_len - 1;
2901 while (*ptr && *ptr != '\"') {
2905 if (dest && dest < dest_end)
2909 if (dest && dest < dest_end)
2917 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
2918 if (dest && dest < dest_end)
2926 int ff_find_stream_index(AVFormatContext *s, int id)
2929 for (i = 0; i < s->nb_streams; i++)
2930 if (s->streams[i]->id == id)
2935 int64_t ff_iso8601_to_unix_time(const char *datestr)
2937 struct tm time1 = { 0 }, time2 = { 0 };
2938 const char *ret1, *ret2;
2939 ret1 = av_small_strptime(datestr, "%Y - %m - %d %T", &time1);
2940 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%T", &time2);
2942 return av_timegm(&time2);
2944 return av_timegm(&time1);
2947 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
2951 if (ofmt->query_codec)
2952 return ofmt->query_codec(codec_id, std_compliance);
2953 else if (ofmt->codec_tag)
2954 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
2955 else if (codec_id == ofmt->video_codec ||
2956 codec_id == ofmt->audio_codec ||
2957 codec_id == ofmt->subtitle_codec)
2960 return AVERROR_PATCHWELCOME;
2963 int avformat_network_init(void)
2967 ff_network_inited_globally = 1;
2968 if ((ret = ff_network_init()) < 0)
2975 int avformat_network_deinit(void)
2984 int ff_add_param_change(AVPacket *pkt, int32_t channels,
2985 uint64_t channel_layout, int32_t sample_rate,
2986 int32_t width, int32_t height)
2992 return AVERROR(EINVAL);
2995 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
2997 if (channel_layout) {
2999 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
3003 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
3005 if (width || height) {
3007 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
3009 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
3011 return AVERROR(ENOMEM);
3012 bytestream_put_le32(&data, flags);
3014 bytestream_put_le32(&data, channels);
3016 bytestream_put_le64(&data, channel_layout);
3018 bytestream_put_le32(&data, sample_rate);
3019 if (width || height) {
3020 bytestream_put_le32(&data, width);
3021 bytestream_put_le32(&data, height);
3026 int ff_generate_avci_extradata(AVStream *st)
3028 static const uint8_t avci100_1080p_extradata[] = {
3030 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3031 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3032 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3033 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3034 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3035 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3036 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3037 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3038 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3040 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3043 static const uint8_t avci100_1080i_extradata[] = {
3045 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3046 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3047 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3048 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3049 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3050 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3051 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3052 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3053 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3054 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3055 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3057 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3060 static const uint8_t avci50_1080i_extradata[] = {
3062 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3063 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3064 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3065 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3066 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3067 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3068 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3069 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3070 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3071 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3072 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3074 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3077 static const uint8_t avci100_720p_extradata[] = {
3079 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3080 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3081 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3082 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3083 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3084 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3085 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3086 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3087 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3088 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3090 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3094 const uint8_t *data = NULL;
3097 if (st->codec->width == 1920) {
3098 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
3099 data = avci100_1080p_extradata;
3100 size = sizeof(avci100_1080p_extradata);
3102 data = avci100_1080i_extradata;
3103 size = sizeof(avci100_1080i_extradata);
3105 } else if (st->codec->width == 1440) {
3106 data = avci50_1080i_extradata;
3107 size = sizeof(avci50_1080i_extradata);
3108 } else if (st->codec->width == 1280) {
3109 data = avci100_720p_extradata;
3110 size = sizeof(avci100_720p_extradata);
3116 av_freep(&st->codec->extradata);
3117 st->codec->extradata_size = 0;
3118 st->codec->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
3119 if (!st->codec->extradata)
3120 return AVERROR(ENOMEM);
3122 memcpy(st->codec->extradata, data, size);
3123 st->codec->extradata_size = size;
3128 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3133 for (i = 0; i < st->nb_side_data; i++) {
3134 if (st->side_data[i].type == type) {
3136 *size = st->side_data[i].size;
3137 return st->side_data[i].data;
3143 uint8_t *av_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type,
3146 AVPacketSideData *sd, *tmp;
3148 uint8_t *data = av_malloc(size);
3153 for (i = 0; i < st->nb_side_data; i++) {
3154 sd = &st->side_data[i];
3156 if (sd->type == type) {
3157 av_freep(&sd->data);
3164 tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
3170 st->side_data = tmp;
3173 sd = &st->side_data[st->nb_side_data - 1];