2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
42 #include "audiointerleave.h"
55 * various utility functions for use within Libav
58 unsigned avformat_version(void)
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return LIBAV_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
87 int prev_size = pkt->size;
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 read_size = FFMIN(size, chunk_size);
98 ret = av_grow_packet(pkt, read_size);
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
122 pkt->pos = avio_tell(s);
124 return append_packet_chunked(s, pkt, size);
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
134 int av_filename_number_test(const char *filename)
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
144 static const struct {
147 enum AVMediaType type;
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codec->codec_id = fmt_id_type[i].id;
171 st->codec->codec_type = fmt_id_type[i].type;
179 /************************************************************/
180 /* input media file */
182 /* Open input file and probe the format if necessary. */
183 static int init_input(AVFormatContext *s, const char *filename,
184 AVDictionary **options)
187 AVProbeData pd = { filename, NULL, 0 };
190 s->flags |= AVFMT_FLAG_CUSTOM_IO;
192 return av_probe_input_buffer(s->pb, &s->iformat, filename,
194 else if (s->iformat->flags & AVFMT_NOFILE)
195 return AVERROR(EINVAL);
199 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
200 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
203 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
204 &s->interrupt_callback, options)) < 0)
208 return av_probe_input_buffer(s->pb, &s->iformat, filename,
212 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
213 AVPacketList **plast_pktl)
215 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
220 (*plast_pktl)->next = pktl;
222 *packet_buffer = pktl;
224 /* Add the packet in the buffered packet list. */
230 static int queue_attached_pictures(AVFormatContext *s)
233 for (i = 0; i < s->nb_streams; i++)
234 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
235 s->streams[i]->discard < AVDISCARD_ALL) {
236 AVPacket copy = s->streams[i]->attached_pic;
237 copy.buf = av_buffer_ref(copy.buf);
239 return AVERROR(ENOMEM);
241 add_to_pktbuf(&s->internal->raw_packet_buffer, ©,
242 &s->internal->raw_packet_buffer_end);
247 int avformat_open_input(AVFormatContext **ps, const char *filename,
248 AVInputFormat *fmt, AVDictionary **options)
250 AVFormatContext *s = *ps;
252 AVDictionary *tmp = NULL;
253 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
255 if (!s && !(s = avformat_alloc_context()))
256 return AVERROR(ENOMEM);
261 av_dict_copy(&tmp, *options, 0);
263 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
266 if ((ret = init_input(s, filename, &tmp)) < 0)
269 /* Check filename in case an image number is expected. */
270 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
271 if (!av_filename_number_test(filename)) {
272 ret = AVERROR(EINVAL);
277 s->duration = s->start_time = AV_NOPTS_VALUE;
278 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
280 /* Allocate private data. */
281 if (s->iformat->priv_data_size > 0) {
282 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
283 ret = AVERROR(ENOMEM);
286 if (s->iformat->priv_class) {
287 *(const AVClass **) s->priv_data = s->iformat->priv_class;
288 av_opt_set_defaults(s->priv_data);
289 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
294 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
296 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
298 if (s->iformat->read_header)
299 if ((ret = s->iformat->read_header(s)) < 0)
302 if (id3v2_extra_meta &&
303 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
305 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
307 if ((ret = queue_attached_pictures(s)) < 0)
310 if (s->pb && !s->internal->data_offset)
311 s->internal->data_offset = avio_tell(s->pb);
313 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
316 av_dict_free(options);
323 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
325 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
327 avformat_free_context(s);
332 /*******************************************************/
334 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
336 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
337 AVProbeData *pd = &st->probe_data;
338 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
343 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
344 AVPROBE_PADDING_SIZE)) < 0)
346 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
347 pd->buf_size += pkt->size;
348 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
350 st->probe_packets = 0;
352 av_log(s, AV_LOG_ERROR,
353 "nothing to probe for stream %d\n", st->index);
358 if (!st->probe_packets ||
359 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
360 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
361 ? AVPROBE_SCORE_MAX / 4 : 0);
362 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
365 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
372 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
378 AVPacketList *pktl = s->internal->raw_packet_buffer;
382 st = s->streams[pkt->stream_index];
383 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
384 !st->probe_packets ||
385 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
387 if (st->probe_packets)
388 if ((err = probe_codec(s, st, NULL)) < 0)
390 pd = &st->probe_data;
393 s->internal->raw_packet_buffer = pktl->next;
394 s->internal->raw_packet_buffer_remaining_size += pkt->size;
403 ret = s->iformat->read_packet(s, pkt);
405 if (!pktl || ret == AVERROR(EAGAIN))
407 for (i = 0; i < s->nb_streams; i++) {
409 if (st->probe_packets)
410 if ((err = probe_codec(s, st, NULL)) < 0)
416 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
417 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
418 av_log(s, AV_LOG_WARNING,
419 "Dropped corrupted packet (stream = %d)\n",
425 st = s->streams[pkt->stream_index];
427 switch (st->codec->codec_type) {
428 case AVMEDIA_TYPE_VIDEO:
429 if (s->video_codec_id)
430 st->codec->codec_id = s->video_codec_id;
432 case AVMEDIA_TYPE_AUDIO:
433 if (s->audio_codec_id)
434 st->codec->codec_id = s->audio_codec_id;
436 case AVMEDIA_TYPE_SUBTITLE:
437 if (s->subtitle_codec_id)
438 st->codec->codec_id = s->subtitle_codec_id;
442 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
446 add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
447 &s->internal->raw_packet_buffer_end);
448 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
450 if ((err = probe_codec(s, st, pkt)) < 0)
455 /**********************************************************/
458 * Return the frame duration in seconds. Return 0 if not available.
460 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
461 AVCodecParserContext *pc, AVPacket *pkt)
463 AVRational codec_framerate = s->iformat ? st->codec->framerate :
464 av_inv_q(st->codec->time_base);
469 switch (st->codec->codec_type) {
470 case AVMEDIA_TYPE_VIDEO:
471 if (st->avg_frame_rate.num) {
472 *pnum = st->avg_frame_rate.den;
473 *pden = st->avg_frame_rate.num;
474 } else if (st->time_base.num * 1000LL > st->time_base.den) {
475 *pnum = st->time_base.num;
476 *pden = st->time_base.den;
477 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
478 *pnum = codec_framerate.den;
479 *pden = codec_framerate.num;
480 if (pc && pc->repeat_pict) {
481 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
482 *pden /= 1 + pc->repeat_pict;
484 *pnum *= 1 + pc->repeat_pict;
486 /* If this codec can be interlaced or progressive then we need
487 * a parser to compute duration of a packet. Thus if we have
488 * no parser in such case leave duration undefined. */
489 if (st->codec->ticks_per_frame > 1 && !pc)
493 case AVMEDIA_TYPE_AUDIO:
494 frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
495 if (frame_size <= 0 || st->codec->sample_rate <= 0)
498 *pden = st->codec->sample_rate;
505 static int is_intra_only(enum AVCodecID id)
507 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
510 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
515 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
516 int64_t dts, int64_t pts)
518 AVStream *st = s->streams[stream_index];
519 AVPacketList *pktl = s->internal->packet_buffer;
521 if (st->first_dts != AV_NOPTS_VALUE ||
522 dts == AV_NOPTS_VALUE ||
523 st->cur_dts == AV_NOPTS_VALUE)
526 st->first_dts = dts - st->cur_dts;
529 for (; pktl; pktl = pktl->next) {
530 if (pktl->pkt.stream_index != stream_index)
532 // FIXME: think more about this check
533 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
534 pktl->pkt.pts += st->first_dts;
536 if (pktl->pkt.dts != AV_NOPTS_VALUE)
537 pktl->pkt.dts += st->first_dts;
539 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
540 st->start_time = pktl->pkt.pts;
542 if (st->start_time == AV_NOPTS_VALUE)
543 st->start_time = pts;
546 static void update_initial_durations(AVFormatContext *s, AVStream *st,
547 int stream_index, int duration)
549 AVPacketList *pktl = s->internal->packet_buffer;
552 if (st->first_dts != AV_NOPTS_VALUE) {
553 cur_dts = st->first_dts;
554 for (; pktl; pktl = pktl->next) {
555 if (pktl->pkt.stream_index == stream_index) {
556 if (pktl->pkt.pts != pktl->pkt.dts ||
557 pktl->pkt.dts != AV_NOPTS_VALUE ||
563 pktl = s->internal->packet_buffer;
564 st->first_dts = cur_dts;
565 } else if (st->cur_dts)
568 for (; pktl; pktl = pktl->next) {
569 if (pktl->pkt.stream_index != stream_index)
571 if (pktl->pkt.pts == pktl->pkt.dts &&
572 pktl->pkt.dts == AV_NOPTS_VALUE &&
573 !pktl->pkt.duration) {
574 pktl->pkt.dts = cur_dts;
575 if (!st->codec->has_b_frames)
576 pktl->pkt.pts = cur_dts;
578 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
579 pktl->pkt.duration = duration;
583 if (st->first_dts == AV_NOPTS_VALUE)
584 st->cur_dts = cur_dts;
587 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
588 AVCodecParserContext *pc, AVPacket *pkt)
590 int num, den, presentation_delayed, delay, i;
593 if (s->flags & AVFMT_FLAG_NOFILLIN)
596 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
597 pkt->dts = AV_NOPTS_VALUE;
599 /* do we have a video B-frame ? */
600 delay = st->codec->has_b_frames;
601 presentation_delayed = 0;
603 /* XXX: need has_b_frame, but cannot get it if the codec is
606 pc && pc->pict_type != AV_PICTURE_TYPE_B)
607 presentation_delayed = 1;
609 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
610 st->pts_wrap_bits < 63 &&
611 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
612 pkt->dts -= 1LL << st->pts_wrap_bits;
615 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
616 * We take the conservative approach and discard both.
617 * Note: If this is misbehaving for an H.264 file, then possibly
618 * presentation_delayed is not set correctly. */
619 if (delay == 1 && pkt->dts == pkt->pts &&
620 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
621 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
622 pkt->dts = AV_NOPTS_VALUE;
625 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
626 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
628 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
629 den * (int64_t) st->time_base.num,
632 if (pkt->duration != 0 && s->internal->packet_buffer)
633 update_initial_durations(s, st, pkt->stream_index,
638 /* Correct timestamps with byte offset if demuxers only have timestamps
639 * on packet boundaries */
640 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
641 /* this will estimate bitrate based on this frame's duration and size */
642 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
643 if (pkt->pts != AV_NOPTS_VALUE)
645 if (pkt->dts != AV_NOPTS_VALUE)
649 /* This may be redundant, but it should not hurt. */
650 if (pkt->dts != AV_NOPTS_VALUE &&
651 pkt->pts != AV_NOPTS_VALUE &&
653 presentation_delayed = 1;
655 av_log(NULL, AV_LOG_TRACE,
656 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
657 "cur_dts:%"PRId64" st:%d pc:%p\n",
658 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
659 pkt->stream_index, pc);
660 /* Interpolate PTS and DTS if they are not present. We skip H.264
661 * currently because delay and has_b_frames are not reliably set. */
662 if ((delay == 0 || (delay == 1 && pc)) &&
663 st->codec->codec_id != AV_CODEC_ID_H264) {
664 if (presentation_delayed) {
665 /* DTS = decompression timestamp */
666 /* PTS = presentation timestamp */
667 if (pkt->dts == AV_NOPTS_VALUE)
668 pkt->dts = st->last_IP_pts;
669 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
670 if (pkt->dts == AV_NOPTS_VALUE)
671 pkt->dts = st->cur_dts;
673 /* This is tricky: the dts must be incremented by the duration
674 * of the frame we are displaying, i.e. the last I- or P-frame. */
675 if (st->last_IP_duration == 0)
676 st->last_IP_duration = pkt->duration;
677 if (pkt->dts != AV_NOPTS_VALUE)
678 st->cur_dts = pkt->dts + st->last_IP_duration;
679 st->last_IP_duration = pkt->duration;
680 st->last_IP_pts = pkt->pts;
681 /* Cannot compute PTS if not present (we can compute it only
682 * by knowing the future. */
683 } else if (pkt->pts != AV_NOPTS_VALUE ||
684 pkt->dts != AV_NOPTS_VALUE ||
686 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
687 int duration = pkt->duration;
688 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
689 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
691 duration = av_rescale_rnd(1,
692 num * (int64_t) st->time_base.den,
693 den * (int64_t) st->time_base.num,
695 if (duration != 0 && s->internal->packet_buffer)
696 update_initial_durations(s, st, pkt->stream_index,
701 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
703 /* presentation is not delayed : PTS and DTS are the same */
704 if (pkt->pts == AV_NOPTS_VALUE)
706 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
708 if (pkt->pts == AV_NOPTS_VALUE)
709 pkt->pts = st->cur_dts;
711 if (pkt->pts != AV_NOPTS_VALUE)
712 st->cur_dts = pkt->pts + duration;
717 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
718 st->pts_buffer[0] = pkt->pts;
719 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
720 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
721 if (pkt->dts == AV_NOPTS_VALUE)
722 pkt->dts = st->pts_buffer[0];
723 // We skipped it above so we try here.
724 if (st->codec->codec_id == AV_CODEC_ID_H264)
725 // This should happen on the first packet
726 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
727 if (pkt->dts > st->cur_dts)
728 st->cur_dts = pkt->dts;
731 av_log(NULL, AV_LOG_TRACE,
732 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
733 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
736 if (is_intra_only(st->codec->codec_id))
737 pkt->flags |= AV_PKT_FLAG_KEY;
738 #if FF_API_CONVERGENCE_DURATION
739 FF_DISABLE_DEPRECATION_WARNINGS
741 pkt->convergence_duration = pc->convergence_duration;
742 FF_ENABLE_DEPRECATION_WARNINGS
746 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
749 AVPacketList *pktl = *pkt_buf;
750 *pkt_buf = pktl->next;
751 av_free_packet(&pktl->pkt);
758 * Parse a packet, add all split parts to parse_queue.
760 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
762 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
764 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
765 AVStream *st = s->streams[stream_index];
766 uint8_t *data = pkt ? pkt->data : NULL;
767 int size = pkt ? pkt->size : 0;
768 int ret = 0, got_output = 0;
771 av_init_packet(&flush_pkt);
776 while (size > 0 || (pkt == &flush_pkt && got_output)) {
779 av_init_packet(&out_pkt);
780 len = av_parser_parse2(st->parser, st->codec,
781 &out_pkt.data, &out_pkt.size, data, size,
782 pkt->pts, pkt->dts, pkt->pos);
784 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
785 /* increment read pointer */
789 got_output = !!out_pkt.size;
794 if (pkt->side_data) {
795 out_pkt.side_data = pkt->side_data;
796 out_pkt.side_data_elems = pkt->side_data_elems;
797 pkt->side_data = NULL;
798 pkt->side_data_elems = 0;
801 /* set the duration */
802 out_pkt.duration = 0;
803 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
804 if (st->codec->sample_rate > 0) {
806 av_rescale_q_rnd(st->parser->duration,
807 (AVRational) { 1, st->codec->sample_rate },
813 out_pkt.stream_index = st->index;
814 out_pkt.pts = st->parser->pts;
815 out_pkt.dts = st->parser->dts;
816 out_pkt.pos = st->parser->pos;
818 if (st->parser->key_frame == 1 ||
819 (st->parser->key_frame == -1 &&
820 st->parser->pict_type == AV_PICTURE_TYPE_I))
821 out_pkt.flags |= AV_PKT_FLAG_KEY;
823 compute_pkt_fields(s, st, st->parser, &out_pkt);
825 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
826 out_pkt.flags & AV_PKT_FLAG_KEY) {
827 ff_reduce_index(s, st->index);
828 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
829 0, 0, AVINDEX_KEYFRAME);
832 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
833 out_pkt.buf = pkt->buf;
836 if ((ret = av_dup_packet(&out_pkt)) < 0)
839 if (!add_to_pktbuf(&s->internal->parse_queue, &out_pkt, &s->internal->parse_queue_end)) {
840 av_free_packet(&out_pkt);
841 ret = AVERROR(ENOMEM);
846 /* end of the stream => close and free the parser */
847 if (pkt == &flush_pkt) {
848 av_parser_close(st->parser);
857 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
858 AVPacketList **pkt_buffer_end,
862 av_assert0(*pkt_buffer);
865 *pkt_buffer = pktl->next;
867 *pkt_buffer_end = NULL;
872 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
874 int ret = 0, i, got_packet = 0;
875 AVDictionary *metadata = NULL;
879 while (!got_packet && !s->internal->parse_queue) {
883 /* read next packet */
884 ret = ff_read_packet(s, &cur_pkt);
886 if (ret == AVERROR(EAGAIN))
888 /* flush the parsers */
889 for (i = 0; i < s->nb_streams; i++) {
891 if (st->parser && st->need_parsing)
892 parse_packet(s, NULL, st->index);
894 /* all remaining packets are now in parse_queue =>
895 * really terminate parsing */
899 st = s->streams[cur_pkt.stream_index];
901 if (cur_pkt.pts != AV_NOPTS_VALUE &&
902 cur_pkt.dts != AV_NOPTS_VALUE &&
903 cur_pkt.pts < cur_pkt.dts) {
904 av_log(s, AV_LOG_WARNING,
905 "Invalid timestamps stream=%d, pts=%"PRId64", "
906 "dts=%"PRId64", size=%d\n",
907 cur_pkt.stream_index, cur_pkt.pts,
908 cur_pkt.dts, cur_pkt.size);
910 if (s->debug & FF_FDEBUG_TS)
911 av_log(s, AV_LOG_DEBUG,
912 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
913 "size=%d, duration=%"PRId64", flags=%d\n",
914 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
915 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
917 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
918 st->parser = av_parser_init(st->codec->codec_id);
920 /* no parser available: just output the raw packets */
921 st->need_parsing = AVSTREAM_PARSE_NONE;
922 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
923 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
924 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
925 st->parser->flags |= PARSER_FLAG_ONCE;
928 if (!st->need_parsing || !st->parser) {
929 /* no parsing needed: we just output the packet as is */
931 compute_pkt_fields(s, st, NULL, pkt);
932 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
933 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
934 ff_reduce_index(s, st->index);
935 av_add_index_entry(st, pkt->pos, pkt->dts,
936 0, 0, AVINDEX_KEYFRAME);
939 } else if (st->discard < AVDISCARD_ALL) {
940 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
944 av_free_packet(&cur_pkt);
948 if (!got_packet && s->internal->parse_queue)
949 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
951 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
953 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
954 av_dict_copy(&s->metadata, metadata, 0);
955 av_dict_free(&metadata);
956 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
959 if (s->debug & FF_FDEBUG_TS)
960 av_log(s, AV_LOG_DEBUG,
961 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
962 "size=%d, duration=%"PRId64", flags=%d\n",
963 pkt->stream_index, pkt->pts, pkt->dts,
964 pkt->size, pkt->duration, pkt->flags);
969 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
971 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
975 return s->internal->packet_buffer
976 ? read_from_packet_buffer(&s->internal->packet_buffer,
977 &s->internal->packet_buffer_end, pkt)
978 : read_frame_internal(s, pkt);
982 AVPacketList *pktl = s->internal->packet_buffer;
985 AVPacket *next_pkt = &pktl->pkt;
987 if (next_pkt->dts != AV_NOPTS_VALUE) {
988 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
989 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
990 if (pktl->pkt.stream_index == next_pkt->stream_index &&
991 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
992 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
994 next_pkt->pts = pktl->pkt.dts;
998 pktl = s->internal->packet_buffer;
1001 /* read packet from packet buffer, if there is data */
1002 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1003 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1004 return read_from_packet_buffer(&s->internal->packet_buffer,
1005 &s->internal->packet_buffer_end, pkt);
1008 ret = read_frame_internal(s, pkt);
1010 if (pktl && ret != AVERROR(EAGAIN)) {
1017 if (av_dup_packet(add_to_pktbuf(&s->internal->packet_buffer, pkt,
1018 &s->internal->packet_buffer_end)) < 0)
1019 return AVERROR(ENOMEM);
1023 /* XXX: suppress the packet queue */
1024 static void flush_packet_queue(AVFormatContext *s)
1026 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1027 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1028 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1030 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1033 /*******************************************************/
1036 int av_find_default_stream_index(AVFormatContext *s)
1038 int first_audio_index = -1;
1042 if (s->nb_streams <= 0)
1044 for (i = 0; i < s->nb_streams; i++) {
1046 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1047 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1050 if (first_audio_index < 0 &&
1051 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1052 first_audio_index = i;
1054 return first_audio_index >= 0 ? first_audio_index : 0;
1057 /** Flush the frame reader. */
1058 void ff_read_frame_flush(AVFormatContext *s)
1063 flush_packet_queue(s);
1065 /* Reset read state for each stream. */
1066 for (i = 0; i < s->nb_streams; i++) {
1070 av_parser_close(st->parser);
1073 st->last_IP_pts = AV_NOPTS_VALUE;
1074 /* We set the current DTS to an unspecified origin. */
1075 st->cur_dts = AV_NOPTS_VALUE;
1077 st->probe_packets = MAX_PROBE_PACKETS;
1079 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1080 st->pts_buffer[j] = AV_NOPTS_VALUE;
1084 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1088 for (i = 0; i < s->nb_streams; i++) {
1089 AVStream *st = s->streams[i];
1092 av_rescale(timestamp,
1093 st->time_base.den * (int64_t) ref_st->time_base.num,
1094 st->time_base.num * (int64_t) ref_st->time_base.den);
1098 void ff_reduce_index(AVFormatContext *s, int stream_index)
1100 AVStream *st = s->streams[stream_index];
1101 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1103 if ((unsigned) st->nb_index_entries >= max_entries) {
1105 for (i = 0; 2 * i < st->nb_index_entries; i++)
1106 st->index_entries[i] = st->index_entries[2 * i];
1107 st->nb_index_entries = i;
1111 int ff_add_index_entry(AVIndexEntry **index_entries,
1112 int *nb_index_entries,
1113 unsigned int *index_entries_allocated_size,
1114 int64_t pos, int64_t timestamp,
1115 int size, int distance, int flags)
1117 AVIndexEntry *entries, *ie;
1120 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1123 entries = av_fast_realloc(*index_entries,
1124 index_entries_allocated_size,
1125 (*nb_index_entries + 1) *
1126 sizeof(AVIndexEntry));
1130 *index_entries = entries;
1132 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1133 timestamp, AVSEEK_FLAG_ANY);
1136 index = (*nb_index_entries)++;
1137 ie = &entries[index];
1138 assert(index == 0 || ie[-1].timestamp < timestamp);
1140 ie = &entries[index];
1141 if (ie->timestamp != timestamp) {
1142 if (ie->timestamp <= timestamp)
1144 memmove(entries + index + 1, entries + index,
1145 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1146 (*nb_index_entries)++;
1147 } else if (ie->pos == pos && distance < ie->min_distance)
1148 // do not reduce the distance
1149 distance = ie->min_distance;
1153 ie->timestamp = timestamp;
1154 ie->min_distance = distance;
1161 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1162 int size, int distance, int flags)
1164 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1165 &st->index_entries_allocated_size, pos,
1166 timestamp, size, distance, flags);
1169 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1170 int64_t wanted_timestamp, int flags)
1178 // Optimize appending index entries at the end.
1179 if (b && entries[b - 1].timestamp < wanted_timestamp)
1184 timestamp = entries[m].timestamp;
1185 if (timestamp >= wanted_timestamp)
1187 if (timestamp <= wanted_timestamp)
1190 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1192 if (!(flags & AVSEEK_FLAG_ANY))
1193 while (m >= 0 && m < nb_entries &&
1194 !(entries[m].flags & AVINDEX_KEYFRAME))
1195 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1197 if (m == nb_entries)
1202 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1204 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1205 wanted_timestamp, flags);
1208 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1209 int64_t target_ts, int flags)
1211 AVInputFormat *avif = s->iformat;
1212 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1213 int64_t ts_min, ts_max, ts;
1218 if (stream_index < 0)
1221 av_log(s, AV_LOG_TRACE, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1224 ts_min = AV_NOPTS_VALUE;
1225 pos_limit = -1; // GCC falsely says it may be uninitialized.
1227 st = s->streams[stream_index];
1228 if (st->index_entries) {
1231 /* FIXME: Whole function must be checked for non-keyframe entries in
1232 * index case, especially read_timestamp(). */
1233 index = av_index_search_timestamp(st, target_ts,
1234 flags | AVSEEK_FLAG_BACKWARD);
1235 index = FFMAX(index, 0);
1236 e = &st->index_entries[index];
1238 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1240 ts_min = e->timestamp;
1241 av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1247 index = av_index_search_timestamp(st, target_ts,
1248 flags & ~AVSEEK_FLAG_BACKWARD);
1249 assert(index < st->nb_index_entries);
1251 e = &st->index_entries[index];
1252 assert(e->timestamp >= target_ts);
1254 ts_max = e->timestamp;
1255 pos_limit = pos_max - e->min_distance;
1256 av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1257 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1261 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1262 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1267 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1270 ff_update_cur_dts(s, st, ts);
1275 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1276 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1277 int64_t ts_min, int64_t ts_max,
1278 int flags, int64_t *ts_ret,
1279 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1280 int64_t *, int64_t))
1283 int64_t start_pos, filesize;
1286 av_log(s, AV_LOG_TRACE, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1288 if (ts_min == AV_NOPTS_VALUE) {
1289 pos_min = s->internal->data_offset;
1290 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1291 if (ts_min == AV_NOPTS_VALUE)
1295 if (ts_max == AV_NOPTS_VALUE) {
1297 filesize = avio_size(s->pb);
1298 pos_max = filesize - 1;
1301 ts_max = read_timestamp(s, stream_index, &pos_max,
1304 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1305 if (ts_max == AV_NOPTS_VALUE)
1309 int64_t tmp_pos = pos_max + 1;
1310 int64_t tmp_ts = read_timestamp(s, stream_index,
1311 &tmp_pos, INT64_MAX);
1312 if (tmp_ts == AV_NOPTS_VALUE)
1316 if (tmp_pos >= filesize)
1319 pos_limit = pos_max;
1322 if (ts_min > ts_max)
1324 else if (ts_min == ts_max)
1325 pos_limit = pos_min;
1328 while (pos_min < pos_limit) {
1329 av_log(s, AV_LOG_TRACE, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1330 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1331 assert(pos_limit <= pos_max);
1333 if (no_change == 0) {
1334 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1335 // interpolate position (better than dichotomy)
1336 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1338 pos_min - approximate_keyframe_distance;
1339 } else if (no_change == 1) {
1340 // bisection if interpolation did not change min / max pos last time
1341 pos = (pos_min + pos_limit) >> 1;
1343 /* linear search if bisection failed, can only happen if there
1344 * are very few or no keyframes between min/max */
1349 else if (pos > pos_limit)
1353 // May pass pos_limit instead of -1.
1354 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1359 av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1360 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1361 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1362 pos_limit, start_pos, no_change);
1363 if (ts == AV_NOPTS_VALUE) {
1364 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1367 assert(ts != AV_NOPTS_VALUE);
1368 if (target_ts <= ts) {
1369 pos_limit = start_pos - 1;
1373 if (target_ts >= ts) {
1379 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1380 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1382 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1384 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1385 av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1386 pos, ts_min, target_ts, ts_max);
1391 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1392 int64_t pos, int flags)
1394 int64_t pos_min, pos_max;
1396 pos_min = s->internal->data_offset;
1397 pos_max = avio_size(s->pb) - 1;
1401 else if (pos > pos_max)
1404 avio_seek(s->pb, pos, SEEK_SET);
1409 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1410 int64_t timestamp, int flags)
1417 st = s->streams[stream_index];
1419 index = av_index_search_timestamp(st, timestamp, flags);
1421 if (index < 0 && st->nb_index_entries &&
1422 timestamp < st->index_entries[0].timestamp)
1425 if (index < 0 || index == st->nb_index_entries - 1) {
1428 if (st->nb_index_entries) {
1429 assert(st->index_entries);
1430 ie = &st->index_entries[st->nb_index_entries - 1];
1431 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1433 ff_update_cur_dts(s, st, ie->timestamp);
1435 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1441 read_status = av_read_frame(s, &pkt);
1442 } while (read_status == AVERROR(EAGAIN));
1443 if (read_status < 0)
1445 av_free_packet(&pkt);
1446 if (stream_index == pkt.stream_index)
1447 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1450 index = av_index_search_timestamp(st, timestamp, flags);
1455 ff_read_frame_flush(s);
1456 if (s->iformat->read_seek)
1457 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1459 ie = &st->index_entries[index];
1460 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1462 ff_update_cur_dts(s, st, ie->timestamp);
1467 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1468 int64_t timestamp, int flags)
1473 if (flags & AVSEEK_FLAG_BYTE) {
1474 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1476 ff_read_frame_flush(s);
1477 return seek_frame_byte(s, stream_index, timestamp, flags);
1480 if (stream_index < 0) {
1481 stream_index = av_find_default_stream_index(s);
1482 if (stream_index < 0)
1485 st = s->streams[stream_index];
1486 /* timestamp for default must be expressed in AV_TIME_BASE units */
1487 timestamp = av_rescale(timestamp, st->time_base.den,
1488 AV_TIME_BASE * (int64_t) st->time_base.num);
1491 /* first, we try the format specific seek */
1492 if (s->iformat->read_seek) {
1493 ff_read_frame_flush(s);
1494 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1500 if (s->iformat->read_timestamp &&
1501 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1502 ff_read_frame_flush(s);
1503 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1504 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1505 ff_read_frame_flush(s);
1506 return seek_frame_generic(s, stream_index, timestamp, flags);
1511 int av_seek_frame(AVFormatContext *s, int stream_index,
1512 int64_t timestamp, int flags)
1514 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1517 ret = queue_attached_pictures(s);
1522 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1523 int64_t ts, int64_t max_ts, int flags)
1525 if (min_ts > ts || max_ts < ts)
1528 if (s->iformat->read_seek2) {
1530 ff_read_frame_flush(s);
1531 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1535 ret = queue_attached_pictures(s);
1539 if (s->iformat->read_timestamp) {
1540 // try to seek via read_timestamp()
1543 // Fall back on old API if new is not implemented but old is.
1544 // Note the old API has somewhat different semantics.
1545 if (s->iformat->read_seek || 1)
1546 return av_seek_frame(s, stream_index, ts,
1547 flags | ((uint64_t) ts - min_ts >
1548 (uint64_t) max_ts - ts
1549 ? AVSEEK_FLAG_BACKWARD : 0));
1551 // try some generic seek like seek_frame_generic() but with new ts semantics
1554 /*******************************************************/
1557 * Return TRUE if the stream has accurate duration in any stream.
1559 * @return TRUE if the stream has accurate duration for at least one component.
1561 static int has_duration(AVFormatContext *ic)
1566 for (i = 0; i < ic->nb_streams; i++) {
1567 st = ic->streams[i];
1568 if (st->duration != AV_NOPTS_VALUE)
1571 if (ic->duration != AV_NOPTS_VALUE)
1577 * Estimate the stream timings from the one of each components.
1579 * Also computes the global bitrate if possible.
1581 static void update_stream_timings(AVFormatContext *ic)
1583 int64_t start_time, start_time1, end_time, end_time1;
1584 int64_t duration, duration1, filesize;
1588 start_time = INT64_MAX;
1589 end_time = INT64_MIN;
1590 duration = INT64_MIN;
1591 for (i = 0; i < ic->nb_streams; i++) {
1592 st = ic->streams[i];
1593 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1594 start_time1 = av_rescale_q(st->start_time, st->time_base,
1596 start_time = FFMIN(start_time, start_time1);
1597 if (st->duration != AV_NOPTS_VALUE) {
1598 end_time1 = start_time1 +
1599 av_rescale_q(st->duration, st->time_base,
1601 end_time = FFMAX(end_time, end_time1);
1604 if (st->duration != AV_NOPTS_VALUE) {
1605 duration1 = av_rescale_q(st->duration, st->time_base,
1607 duration = FFMAX(duration, duration1);
1610 if (start_time != INT64_MAX) {
1611 ic->start_time = start_time;
1612 if (end_time != INT64_MIN)
1613 duration = FFMAX(duration, end_time - start_time);
1615 if (duration != INT64_MIN) {
1616 ic->duration = duration;
1617 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1618 /* compute the bitrate */
1619 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1620 (double) ic->duration;
1624 static void fill_all_stream_timings(AVFormatContext *ic)
1629 update_stream_timings(ic);
1630 for (i = 0; i < ic->nb_streams; i++) {
1631 st = ic->streams[i];
1632 if (st->start_time == AV_NOPTS_VALUE) {
1633 if (ic->start_time != AV_NOPTS_VALUE)
1634 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1636 if (ic->duration != AV_NOPTS_VALUE)
1637 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1643 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1645 int64_t filesize, duration;
1649 /* if bit_rate is already set, we believe it */
1650 if (ic->bit_rate <= 0) {
1652 for (i = 0; i < ic->nb_streams; i++) {
1653 st = ic->streams[i];
1654 if (st->codec->bit_rate > 0) {
1655 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1659 bit_rate += st->codec->bit_rate;
1662 ic->bit_rate = bit_rate;
1665 /* if duration is already set, we believe it */
1666 if (ic->duration == AV_NOPTS_VALUE &&
1667 ic->bit_rate != 0) {
1668 filesize = ic->pb ? avio_size(ic->pb) : 0;
1670 for (i = 0; i < ic->nb_streams; i++) {
1671 st = ic->streams[i];
1672 duration = av_rescale(8 * filesize, st->time_base.den,
1674 (int64_t) st->time_base.num);
1675 if (st->duration == AV_NOPTS_VALUE)
1676 st->duration = duration;
1682 #define DURATION_MAX_READ_SIZE 250000
1683 #define DURATION_MAX_RETRY 3
1685 /* only usable for MPEG-PS streams */
1686 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1688 AVPacket pkt1, *pkt = &pkt1;
1690 int read_size, i, ret;
1692 int64_t filesize, offset, duration;
1695 /* flush packet queue */
1696 flush_packet_queue(ic);
1698 for (i = 0; i < ic->nb_streams; i++) {
1699 st = ic->streams[i];
1700 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1701 av_log(st->codec, AV_LOG_WARNING,
1702 "start time is not set in estimate_timings_from_pts\n");
1705 av_parser_close(st->parser);
1710 /* estimate the end time (duration) */
1711 /* XXX: may need to support wrapping */
1712 filesize = ic->pb ? avio_size(ic->pb) : 0;
1713 end_time = AV_NOPTS_VALUE;
1715 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1719 avio_seek(ic->pb, offset, SEEK_SET);
1722 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1726 ret = ff_read_packet(ic, pkt);
1727 } while (ret == AVERROR(EAGAIN));
1730 read_size += pkt->size;
1731 st = ic->streams[pkt->stream_index];
1732 if (pkt->pts != AV_NOPTS_VALUE &&
1733 (st->start_time != AV_NOPTS_VALUE ||
1734 st->first_dts != AV_NOPTS_VALUE)) {
1735 duration = end_time = pkt->pts;
1736 if (st->start_time != AV_NOPTS_VALUE)
1737 duration -= st->start_time;
1739 duration -= st->first_dts;
1741 duration += 1LL << st->pts_wrap_bits;
1743 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1744 st->duration = duration;
1747 av_free_packet(pkt);
1749 } while (end_time == AV_NOPTS_VALUE &&
1750 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1751 ++retry <= DURATION_MAX_RETRY);
1753 fill_all_stream_timings(ic);
1755 avio_seek(ic->pb, old_offset, SEEK_SET);
1756 for (i = 0; i < ic->nb_streams; i++) {
1757 st = ic->streams[i];
1758 st->cur_dts = st->first_dts;
1759 st->last_IP_pts = AV_NOPTS_VALUE;
1763 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1767 /* get the file size, if possible */
1768 if (ic->iformat->flags & AVFMT_NOFILE) {
1771 file_size = avio_size(ic->pb);
1772 file_size = FFMAX(0, file_size);
1775 if ((!strcmp(ic->iformat->name, "mpeg") ||
1776 !strcmp(ic->iformat->name, "mpegts")) &&
1777 file_size && ic->pb->seekable) {
1778 /* get accurate estimate from the PTSes */
1779 estimate_timings_from_pts(ic, old_offset);
1780 } else if (has_duration(ic)) {
1781 /* at least one component has timings - we use them for all
1783 fill_all_stream_timings(ic);
1785 av_log(ic, AV_LOG_WARNING,
1786 "Estimating duration from bitrate, this may be inaccurate\n");
1787 /* less precise: use bitrate info */
1788 estimate_timings_from_bit_rate(ic);
1790 update_stream_timings(ic);
1794 AVStream av_unused *st;
1795 for (i = 0; i < ic->nb_streams; i++) {
1796 st = ic->streams[i];
1797 av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
1798 (double) st->start_time / AV_TIME_BASE,
1799 (double) st->duration / AV_TIME_BASE);
1801 av_log(ic, AV_LOG_TRACE,
1802 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1803 (double) ic->start_time / AV_TIME_BASE,
1804 (double) ic->duration / AV_TIME_BASE,
1805 ic->bit_rate / 1000);
1809 static int has_codec_parameters(AVStream *st)
1811 AVCodecContext *avctx = st->codec;
1814 switch (avctx->codec_type) {
1815 case AVMEDIA_TYPE_AUDIO:
1816 val = avctx->sample_rate && avctx->channels;
1817 if (st->info->found_decoder >= 0 &&
1818 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1821 case AVMEDIA_TYPE_VIDEO:
1823 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1830 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1833 static int has_decode_delay_been_guessed(AVStream *st)
1835 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1836 st->info->nb_decoded_frames >= 6;
1839 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1840 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1841 AVDictionary **options)
1843 const AVCodec *codec;
1844 int got_picture = 1, ret = 0;
1845 AVFrame *frame = av_frame_alloc();
1846 AVPacket pkt = *avpkt;
1849 return AVERROR(ENOMEM);
1851 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
1852 AVDictionary *thread_opt = NULL;
1854 codec = st->codec->codec ? st->codec->codec
1855 : avcodec_find_decoder(st->codec->codec_id);
1858 st->info->found_decoder = -1;
1863 /* Force thread count to 1 since the H.264 decoder will not extract
1864 * SPS and PPS to extradata during multi-threaded decoding. */
1865 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1866 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
1868 av_dict_free(&thread_opt);
1870 st->info->found_decoder = -1;
1873 st->info->found_decoder = 1;
1874 } else if (!st->info->found_decoder)
1875 st->info->found_decoder = 1;
1877 if (st->info->found_decoder < 0) {
1882 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1884 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1885 (!st->codec_info_nb_frames &&
1886 (st->codec->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
1888 switch (st->codec->codec_type) {
1889 case AVMEDIA_TYPE_VIDEO:
1890 ret = avcodec_decode_video2(st->codec, frame,
1891 &got_picture, &pkt);
1893 case AVMEDIA_TYPE_AUDIO:
1894 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
1901 st->info->nb_decoded_frames++;
1909 av_frame_free(&frame);
1913 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1915 while (tags->id != AV_CODEC_ID_NONE) {
1923 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1926 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1927 if (tag == tags[i].tag)
1929 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1930 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1932 return AV_CODEC_ID_NONE;
1935 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
1940 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
1942 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
1944 return AV_CODEC_ID_NONE;
1948 if (sflags & (1 << (bps - 1))) {
1951 return AV_CODEC_ID_PCM_S8;
1953 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
1955 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
1957 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
1959 return AV_CODEC_ID_NONE;
1964 return AV_CODEC_ID_PCM_U8;
1966 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
1968 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
1970 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
1972 return AV_CODEC_ID_NONE;
1978 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
1981 for (i = 0; tags && tags[i]; i++) {
1982 int tag = ff_codec_get_tag(tags[i], id);
1989 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
1992 for (i = 0; tags && tags[i]; i++) {
1993 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
1994 if (id != AV_CODEC_ID_NONE)
1997 return AV_CODEC_ID_NONE;
2000 static void compute_chapters_end(AVFormatContext *s)
2003 int64_t max_time = s->duration +
2004 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2006 for (i = 0; i < s->nb_chapters; i++)
2007 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2008 AVChapter *ch = s->chapters[i];
2009 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2013 for (j = 0; j < s->nb_chapters; j++) {
2014 AVChapter *ch1 = s->chapters[j];
2015 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2017 if (j != i && next_start > ch->start && next_start < end)
2020 ch->end = (end == INT64_MAX) ? ch->start : end;
2024 static int get_std_framerate(int i)
2027 return (i + 1) * 1001;
2029 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2032 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2034 int i, count, ret, read_size, j;
2036 AVPacket pkt1, *pkt;
2037 int64_t old_offset = avio_tell(ic->pb);
2038 // new streams might appear, no options for those
2039 int orig_nb_streams = ic->nb_streams;
2041 for (i = 0; i < ic->nb_streams; i++) {
2042 const AVCodec *codec;
2043 AVDictionary *thread_opt = NULL;
2044 st = ic->streams[i];
2046 // only for the split stuff
2047 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2048 st->parser = av_parser_init(st->codec->codec_id);
2049 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2050 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2052 codec = st->codec->codec ? st->codec->codec
2053 : avcodec_find_decoder(st->codec->codec_id);
2055 /* Force thread count to 1 since the H.264 decoder will not extract
2056 * SPS and PPS to extradata during multi-threaded decoding. */
2057 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2059 /* Ensure that subtitle_header is properly set. */
2060 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2061 && codec && !st->codec->codec)
2062 avcodec_open2(st->codec, codec,
2063 options ? &options[i] : &thread_opt);
2065 // Try to just open decoders, in case this is enough to get parameters.
2066 if (!has_codec_parameters(st)) {
2067 if (codec && !st->codec->codec)
2068 avcodec_open2(st->codec, codec,
2069 options ? &options[i] : &thread_opt);
2072 av_dict_free(&thread_opt);
2075 for (i = 0; i < ic->nb_streams; i++) {
2076 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2077 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2083 if (ff_check_interrupt(&ic->interrupt_callback)) {
2085 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2089 /* check if one codec still needs to be handled */
2090 for (i = 0; i < ic->nb_streams; i++) {
2091 int fps_analyze_framecount = 20;
2093 st = ic->streams[i];
2094 if (!has_codec_parameters(st))
2096 /* If the timebase is coarse (like the usual millisecond precision
2097 * of mkv), we need to analyze more frames to reliably arrive at
2098 * the correct fps. */
2099 if (av_q2d(st->time_base) > 0.0005)
2100 fps_analyze_framecount *= 2;
2101 if (ic->fps_probe_size >= 0)
2102 fps_analyze_framecount = ic->fps_probe_size;
2103 /* variable fps and no guess at the real fps */
2104 if (!st->avg_frame_rate.num &&
2105 st->codec_info_nb_frames < fps_analyze_framecount &&
2106 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2108 if (st->parser && st->parser->parser->split &&
2109 !st->codec->extradata)
2111 if (st->first_dts == AV_NOPTS_VALUE &&
2112 st->codec_info_nb_frames < ic->max_ts_probe &&
2113 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2114 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2117 if (i == ic->nb_streams) {
2118 /* NOTE: If the format has no header, then we need to read some
2119 * packets to get most of the streams, so we cannot stop here. */
2120 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2121 /* If we found the info for all the codecs, we can stop. */
2123 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2127 /* We did not get all the codec info, but we read too much data. */
2128 if (read_size >= ic->probesize) {
2130 av_log(ic, AV_LOG_DEBUG,
2131 "Probe buffer size limit %d reached\n", ic->probesize);
2135 /* NOTE: A new stream can be added there if no header in file
2136 * (AVFMTCTX_NOHEADER). */
2137 ret = read_frame_internal(ic, &pkt1);
2138 if (ret == AVERROR(EAGAIN))
2143 AVPacket empty_pkt = { 0 };
2145 av_init_packet(&empty_pkt);
2147 /* We could not have all the codec parameters before EOF. */
2149 for (i = 0; i < ic->nb_streams; i++) {
2150 st = ic->streams[i];
2152 /* flush the decoders */
2153 if (st->info->found_decoder == 1) {
2155 err = try_decode_frame(st, &empty_pkt,
2156 (options && i < orig_nb_streams)
2157 ? &options[i] : NULL);
2158 } while (err > 0 && !has_codec_parameters(st));
2162 av_log(ic, AV_LOG_WARNING,
2163 "decoding for stream %d failed\n", st->index);
2164 } else if (!has_codec_parameters(st)) {
2166 avcodec_string(buf, sizeof(buf), st->codec, 0);
2167 av_log(ic, AV_LOG_WARNING,
2168 "Could not find codec parameters (%s)\n", buf);
2176 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2179 pkt = add_to_pktbuf(&ic->internal->packet_buffer, &pkt1,
2180 &ic->internal->packet_buffer_end);
2181 if ((ret = av_dup_packet(pkt)) < 0)
2182 goto find_stream_info_err;
2185 read_size += pkt->size;
2187 st = ic->streams[pkt->stream_index];
2188 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2189 /* check for non-increasing dts */
2190 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2191 st->info->fps_last_dts >= pkt->dts) {
2192 av_log(ic, AV_LOG_WARNING,
2193 "Non-increasing DTS in stream %d: packet %d with DTS "
2194 "%"PRId64", packet %d with DTS %"PRId64"\n",
2195 st->index, st->info->fps_last_dts_idx,
2196 st->info->fps_last_dts, st->codec_info_nb_frames,
2198 st->info->fps_first_dts =
2199 st->info->fps_last_dts = AV_NOPTS_VALUE;
2201 /* Check for a discontinuity in dts. If the difference in dts
2202 * is more than 1000 times the average packet duration in the
2203 * sequence, we treat it as a discontinuity. */
2204 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2205 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2206 (pkt->dts - st->info->fps_last_dts) / 1000 >
2207 (st->info->fps_last_dts - st->info->fps_first_dts) /
2208 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2209 av_log(ic, AV_LOG_WARNING,
2210 "DTS discontinuity in stream %d: packet %d with DTS "
2211 "%"PRId64", packet %d with DTS %"PRId64"\n",
2212 st->index, st->info->fps_last_dts_idx,
2213 st->info->fps_last_dts, st->codec_info_nb_frames,
2215 st->info->fps_first_dts =
2216 st->info->fps_last_dts = AV_NOPTS_VALUE;
2219 /* update stored dts values */
2220 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2221 st->info->fps_first_dts = pkt->dts;
2222 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2224 st->info->fps_last_dts = pkt->dts;
2225 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2227 /* check max_analyze_duration */
2228 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2229 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2230 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2231 ic->max_analyze_duration);
2232 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2233 av_packet_unref(pkt);
2237 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2238 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2239 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2240 st->codec->extradata_size = i;
2241 st->codec->extradata = av_mallocz(st->codec->extradata_size +
2242 AV_INPUT_BUFFER_PADDING_SIZE);
2243 if (!st->codec->extradata)
2244 return AVERROR(ENOMEM);
2245 memcpy(st->codec->extradata, pkt->data,
2246 st->codec->extradata_size);
2250 /* If still no information, we try to open the codec and to
2251 * decompress the frame. We try to avoid that in most cases as
2252 * it takes longer and uses more memory. For MPEG-4, we need to
2253 * decompress for QuickTime.
2255 * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2256 * least one frame of codec data, this makes sure the codec initializes
2257 * the channel configuration and does not only trust the values from
2259 try_decode_frame(st, pkt,
2260 (options && i < orig_nb_streams) ? &options[i] : NULL);
2262 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2263 av_packet_unref(pkt);
2265 st->codec_info_nb_frames++;
2269 // close codecs which were opened in try_decode_frame()
2270 for (i = 0; i < ic->nb_streams; i++) {
2271 st = ic->streams[i];
2272 avcodec_close(st->codec);
2274 for (i = 0; i < ic->nb_streams; i++) {
2275 st = ic->streams[i];
2276 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2277 /* estimate average framerate if not set by demuxer */
2278 if (!st->avg_frame_rate.num &&
2279 st->info->fps_last_dts != st->info->fps_first_dts) {
2280 int64_t delta_dts = st->info->fps_last_dts -
2281 st->info->fps_first_dts;
2282 int delta_packets = st->info->fps_last_dts_idx -
2283 st->info->fps_first_dts_idx;
2285 double best_error = 0.01;
2287 if (delta_dts >= INT64_MAX / st->time_base.num ||
2288 delta_packets >= INT64_MAX / st->time_base.den ||
2291 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2292 delta_packets * (int64_t) st->time_base.den,
2293 delta_dts * (int64_t) st->time_base.num, 60000);
2295 /* Round guessed framerate to a "standard" framerate if it's
2296 * within 1% of the original estimate. */
2297 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2298 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2299 double error = fabs(av_q2d(st->avg_frame_rate) /
2300 av_q2d(std_fps) - 1);
2302 if (error < best_error) {
2304 best_fps = std_fps.num;
2308 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2309 best_fps, 12 * 1001, INT_MAX);
2311 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2312 if (!st->codec->bits_per_coded_sample)
2313 st->codec->bits_per_coded_sample =
2314 av_get_bits_per_sample(st->codec->codec_id);
2315 // set stream disposition based on audio service type
2316 switch (st->codec->audio_service_type) {
2317 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2318 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2320 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2321 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2323 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2324 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2326 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2327 st->disposition = AV_DISPOSITION_COMMENT;
2329 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2330 st->disposition = AV_DISPOSITION_KARAOKE;
2336 estimate_timings(ic, old_offset);
2338 compute_chapters_end(ic);
2340 find_stream_info_err:
2341 for (i = 0; i < ic->nb_streams; i++) {
2342 ic->streams[i]->codec->thread_count = 0;
2343 av_freep(&ic->streams[i]->info);
2348 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2352 for (i = 0; i < ic->nb_programs; i++)
2353 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2354 if (ic->programs[i]->stream_index[j] == s)
2355 return ic->programs[i];
2359 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2360 int wanted_stream_nb, int related_stream,
2361 AVCodec **decoder_ret, int flags)
2363 int i, nb_streams = ic->nb_streams;
2364 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2365 unsigned *program = NULL;
2366 AVCodec *decoder = NULL, *best_decoder = NULL;
2368 if (related_stream >= 0 && wanted_stream_nb < 0) {
2369 AVProgram *p = find_program_from_stream(ic, related_stream);
2371 program = p->stream_index;
2372 nb_streams = p->nb_stream_indexes;
2375 for (i = 0; i < nb_streams; i++) {
2376 int real_stream_index = program ? program[i] : i;
2377 AVStream *st = ic->streams[real_stream_index];
2378 AVCodecContext *avctx = st->codec;
2379 if (avctx->codec_type != type)
2381 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2383 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2384 AV_DISPOSITION_VISUAL_IMPAIRED))
2387 decoder = avcodec_find_decoder(st->codec->codec_id);
2390 ret = AVERROR_DECODER_NOT_FOUND;
2394 if (best_count >= st->codec_info_nb_frames)
2396 best_count = st->codec_info_nb_frames;
2397 ret = real_stream_index;
2398 best_decoder = decoder;
2399 if (program && i == nb_streams - 1 && ret < 0) {
2401 nb_streams = ic->nb_streams;
2402 /* no related stream found, try again with everything */
2407 *decoder_ret = best_decoder;
2411 /*******************************************************/
2413 int av_read_play(AVFormatContext *s)
2415 if (s->iformat->read_play)
2416 return s->iformat->read_play(s);
2418 return avio_pause(s->pb, 0);
2419 return AVERROR(ENOSYS);
2422 int av_read_pause(AVFormatContext *s)
2424 if (s->iformat->read_pause)
2425 return s->iformat->read_pause(s);
2427 return avio_pause(s->pb, 1);
2428 return AVERROR(ENOSYS);
2431 void avformat_free_context(AVFormatContext *s)
2440 if (s->iformat && s->iformat->priv_class && s->priv_data)
2441 av_opt_free(s->priv_data);
2443 for (i = 0; i < s->nb_streams; i++) {
2444 /* free all data in a stream component */
2447 for (j = 0; j < st->nb_side_data; j++)
2448 av_freep(&st->side_data[j].data);
2449 av_freep(&st->side_data);
2450 st->nb_side_data = 0;
2453 av_parser_close(st->parser);
2455 if (st->attached_pic.data)
2456 av_free_packet(&st->attached_pic);
2457 av_dict_free(&st->metadata);
2458 av_freep(&st->probe_data.buf);
2459 av_free(st->index_entries);
2460 av_free(st->codec->extradata);
2461 av_free(st->codec->subtitle_header);
2463 av_free(st->priv_data);
2467 for (i = s->nb_programs - 1; i >= 0; i--) {
2468 av_dict_free(&s->programs[i]->metadata);
2469 av_freep(&s->programs[i]->stream_index);
2470 av_freep(&s->programs[i]);
2472 av_freep(&s->programs);
2473 av_freep(&s->priv_data);
2474 while (s->nb_chapters--) {
2475 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2476 av_free(s->chapters[s->nb_chapters]);
2478 av_freep(&s->chapters);
2479 av_dict_free(&s->metadata);
2480 av_freep(&s->streams);
2481 av_freep(&s->internal);
2485 void avformat_close_input(AVFormatContext **ps)
2487 AVFormatContext *s = *ps;
2488 AVIOContext *pb = s->pb;
2490 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2491 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2494 flush_packet_queue(s);
2497 if (s->iformat->read_close)
2498 s->iformat->read_close(s);
2500 avformat_free_context(s);
2507 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2512 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2513 sizeof(*s->streams)) < 0) {
2518 st = av_mallocz(sizeof(AVStream));
2521 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2526 st->codec = avcodec_alloc_context3(c);
2533 /* no default bitrate if decoding */
2534 st->codec->bit_rate = 0;
2536 /* default pts setting is MPEG-like */
2537 avpriv_set_pts_info(st, 33, 1, 90000);
2540 st->index = s->nb_streams;
2541 st->start_time = AV_NOPTS_VALUE;
2542 st->duration = AV_NOPTS_VALUE;
2543 /* we set the current DTS to 0 so that formats without any timestamps
2544 * but durations get some timestamps, formats with some unknown
2545 * timestamps have their first few packets buffered and the
2546 * timestamps corrected before they are returned to the user */
2548 st->first_dts = AV_NOPTS_VALUE;
2549 st->probe_packets = MAX_PROBE_PACKETS;
2551 st->last_IP_pts = AV_NOPTS_VALUE;
2552 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2553 st->pts_buffer[i] = AV_NOPTS_VALUE;
2555 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2557 st->info->fps_first_dts = AV_NOPTS_VALUE;
2558 st->info->fps_last_dts = AV_NOPTS_VALUE;
2560 s->streams[s->nb_streams++] = st;
2564 AVProgram *av_new_program(AVFormatContext *ac, int id)
2566 AVProgram *program = NULL;
2569 av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
2571 for (i = 0; i < ac->nb_programs; i++)
2572 if (ac->programs[i]->id == id)
2573 program = ac->programs[i];
2576 program = av_mallocz(sizeof(AVProgram));
2579 dynarray_add(&ac->programs, &ac->nb_programs, program);
2580 program->discard = AVDISCARD_NONE;
2587 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2588 int64_t start, int64_t end, const char *title)
2590 AVChapter *chapter = NULL;
2593 for (i = 0; i < s->nb_chapters; i++)
2594 if (s->chapters[i]->id == id)
2595 chapter = s->chapters[i];
2598 chapter = av_mallocz(sizeof(AVChapter));
2601 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2603 av_dict_set(&chapter->metadata, "title", title, 0);
2605 chapter->time_base = time_base;
2606 chapter->start = start;
2612 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2615 AVProgram *program = NULL;
2617 if (idx >= ac->nb_streams) {
2618 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2622 for (i = 0; i < ac->nb_programs; i++) {
2623 if (ac->programs[i]->id != progid)
2625 program = ac->programs[i];
2626 for (j = 0; j < program->nb_stream_indexes; j++)
2627 if (program->stream_index[j] == idx)
2630 if (av_reallocp_array(&program->stream_index,
2631 program->nb_stream_indexes + 1,
2632 sizeof(*program->stream_index)) < 0) {
2633 program->nb_stream_indexes = 0;
2636 program->stream_index[program->nb_stream_indexes++] = idx;
2641 uint64_t ff_ntp_time(void)
2643 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2646 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2649 char *q, buf1[20], c;
2650 int nd, len, percentd_found;
2662 while (av_isdigit(*p))
2663 nd = nd * 10 + *p++ - '0';
2665 } while (av_isdigit(c));
2674 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2676 if ((q - buf + len) > buf_size - 1)
2678 memcpy(q, buf1, len);
2686 if ((q - buf) < buf_size - 1)
2690 if (!percentd_found)
2699 void av_url_split(char *proto, int proto_size,
2700 char *authorization, int authorization_size,
2701 char *hostname, int hostname_size,
2702 int *port_ptr, char *path, int path_size, const char *url)
2704 const char *p, *ls, *at, *col, *brk;
2710 if (authorization_size > 0)
2711 authorization[0] = 0;
2712 if (hostname_size > 0)
2717 /* parse protocol */
2718 if ((p = strchr(url, ':'))) {
2719 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2726 /* no protocol means plain filename */
2727 av_strlcpy(path, url, path_size);
2731 /* separate path from hostname */
2732 ls = strchr(p, '/');
2734 ls = strchr(p, '?');
2736 av_strlcpy(path, ls, path_size);
2738 ls = &p[strlen(p)]; // XXX
2740 /* the rest is hostname, use that to parse auth/port */
2742 /* authorization (user[:pass]@hostname) */
2743 if ((at = strchr(p, '@')) && at < ls) {
2744 av_strlcpy(authorization, p,
2745 FFMIN(authorization_size, at + 1 - p));
2746 p = at + 1; /* skip '@' */
2749 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2751 av_strlcpy(hostname, p + 1,
2752 FFMIN(hostname_size, brk - p));
2753 if (brk[1] == ':' && port_ptr)
2754 *port_ptr = atoi(brk + 2);
2755 } else if ((col = strchr(p, ':')) && col < ls) {
2756 av_strlcpy(hostname, p,
2757 FFMIN(col + 1 - p, hostname_size));
2759 *port_ptr = atoi(col + 1);
2761 av_strlcpy(hostname, p,
2762 FFMIN(ls + 1 - p, hostname_size));
2766 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2769 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2772 'C', 'D', 'E', 'F' };
2773 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2776 'c', 'd', 'e', 'f' };
2777 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2779 for (i = 0; i < s; i++) {
2780 buff[i * 2] = hex_table[src[i] >> 4];
2781 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2787 int ff_hex_to_data(uint8_t *data, const char *p)
2794 p += strspn(p, SPACE_CHARS);
2797 c = av_toupper((unsigned char) *p++);
2798 if (c >= '0' && c <= '9')
2800 else if (c >= 'A' && c <= 'F')
2815 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2816 unsigned int pts_num, unsigned int pts_den)
2819 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2820 if (new_tb.num != pts_num)
2821 av_log(NULL, AV_LOG_DEBUG,
2822 "st:%d removing common factor %d from timebase\n",
2823 s->index, pts_num / new_tb.num);
2825 av_log(NULL, AV_LOG_WARNING,
2826 "st:%d has too large timebase, reducing\n", s->index);
2828 if (new_tb.num <= 0 || new_tb.den <= 0) {
2829 av_log(NULL, AV_LOG_ERROR,
2830 "Ignoring attempt to set invalid timebase for st:%d\n",
2834 s->time_base = new_tb;
2835 s->pts_wrap_bits = pts_wrap_bits;
2838 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
2841 const char *ptr = str;
2843 /* Parse key=value pairs. */
2846 char *dest = NULL, *dest_end;
2847 int key_len, dest_len = 0;
2849 /* Skip whitespace and potential commas. */
2850 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
2857 if (!(ptr = strchr(key, '=')))
2860 key_len = ptr - key;
2862 callback_get_buf(context, key, key_len, &dest, &dest_len);
2863 dest_end = dest + dest_len - 1;
2867 while (*ptr && *ptr != '\"') {
2871 if (dest && dest < dest_end)
2875 if (dest && dest < dest_end)
2883 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
2884 if (dest && dest < dest_end)
2892 int ff_find_stream_index(AVFormatContext *s, int id)
2895 for (i = 0; i < s->nb_streams; i++)
2896 if (s->streams[i]->id == id)
2901 int64_t ff_iso8601_to_unix_time(const char *datestr)
2903 struct tm time1 = { 0 }, time2 = { 0 };
2904 const char *ret1, *ret2;
2905 ret1 = av_small_strptime(datestr, "%Y - %m - %d %T", &time1);
2906 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%T", &time2);
2908 return av_timegm(&time2);
2910 return av_timegm(&time1);
2913 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
2917 if (ofmt->query_codec)
2918 return ofmt->query_codec(codec_id, std_compliance);
2919 else if (ofmt->codec_tag)
2920 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
2921 else if (codec_id == ofmt->video_codec ||
2922 codec_id == ofmt->audio_codec ||
2923 codec_id == ofmt->subtitle_codec)
2926 return AVERROR_PATCHWELCOME;
2929 int avformat_network_init(void)
2933 ff_network_inited_globally = 1;
2934 if ((ret = ff_network_init()) < 0)
2941 int avformat_network_deinit(void)
2950 int ff_add_param_change(AVPacket *pkt, int32_t channels,
2951 uint64_t channel_layout, int32_t sample_rate,
2952 int32_t width, int32_t height)
2958 return AVERROR(EINVAL);
2961 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
2963 if (channel_layout) {
2965 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
2969 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
2971 if (width || height) {
2973 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
2975 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
2977 return AVERROR(ENOMEM);
2978 bytestream_put_le32(&data, flags);
2980 bytestream_put_le32(&data, channels);
2982 bytestream_put_le64(&data, channel_layout);
2984 bytestream_put_le32(&data, sample_rate);
2985 if (width || height) {
2986 bytestream_put_le32(&data, width);
2987 bytestream_put_le32(&data, height);
2992 int ff_generate_avci_extradata(AVStream *st)
2994 static const uint8_t avci100_1080p_extradata[] = {
2996 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
2997 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
2998 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
2999 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3000 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3001 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3002 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3003 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3004 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3006 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3009 static const uint8_t avci100_1080i_extradata[] = {
3011 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3012 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3013 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3014 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3015 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3016 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3017 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3018 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3019 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3020 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3021 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3023 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3026 static const uint8_t avci50_1080i_extradata[] = {
3028 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3029 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3030 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3031 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3032 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3033 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3034 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3035 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3036 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3037 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3038 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3040 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3043 static const uint8_t avci100_720p_extradata[] = {
3045 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3046 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3047 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3048 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3049 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3050 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3051 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3052 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3053 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3054 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3056 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3060 const uint8_t *data = NULL;
3063 if (st->codec->width == 1920) {
3064 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
3065 data = avci100_1080p_extradata;
3066 size = sizeof(avci100_1080p_extradata);
3068 data = avci100_1080i_extradata;
3069 size = sizeof(avci100_1080i_extradata);
3071 } else if (st->codec->width == 1440) {
3072 data = avci50_1080i_extradata;
3073 size = sizeof(avci50_1080i_extradata);
3074 } else if (st->codec->width == 1280) {
3075 data = avci100_720p_extradata;
3076 size = sizeof(avci100_720p_extradata);
3082 av_freep(&st->codec->extradata);
3083 st->codec->extradata_size = 0;
3084 st->codec->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
3085 if (!st->codec->extradata)
3086 return AVERROR(ENOMEM);
3088 memcpy(st->codec->extradata, data, size);
3089 st->codec->extradata_size = size;
3094 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3099 for (i = 0; i < st->nb_side_data; i++) {
3100 if (st->side_data[i].type == type) {
3102 *size = st->side_data[i].size;
3103 return st->side_data[i].data;
3109 uint8_t *ff_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type,
3112 AVPacketSideData *sd, *tmp;
3114 uint8_t *data = av_malloc(size);
3119 for (i = 0; i < st->nb_side_data; i++) {
3120 sd = &st->side_data[i];
3122 if (sd->type == type) {
3123 av_freep(&sd->data);
3130 tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
3136 st->side_data = tmp;
3139 sd = &st->side_data[st->nb_side_data - 1];