2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
42 #include "audiointerleave.h"
55 * various utility functions for use within Libav
58 unsigned avformat_version(void)
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return LIBAV_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
87 int prev_size = pkt->size;
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 read_size = FFMIN(size, chunk_size);
98 ret = av_grow_packet(pkt, read_size);
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
122 pkt->pos = avio_tell(s);
124 return append_packet_chunked(s, pkt, size);
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
134 int av_filename_number_test(const char *filename)
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
144 static const struct {
147 enum AVMediaType type;
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codec->codec_id = fmt_id_type[i].id;
171 st->codec->codec_type = fmt_id_type[i].type;
179 /************************************************************/
180 /* input media file */
182 /* Open input file and probe the format if necessary. */
183 static int init_input(AVFormatContext *s, const char *filename,
184 AVDictionary **options)
187 AVProbeData pd = { filename, NULL, 0 };
190 s->flags |= AVFMT_FLAG_CUSTOM_IO;
192 return av_probe_input_buffer(s->pb, &s->iformat, filename,
194 else if (s->iformat->flags & AVFMT_NOFILE)
195 return AVERROR(EINVAL);
199 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
200 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
203 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
204 &s->interrupt_callback, options)) < 0)
208 return av_probe_input_buffer(s->pb, &s->iformat, filename,
212 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
213 AVPacketList **plast_pktl)
215 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
220 (*plast_pktl)->next = pktl;
222 *packet_buffer = pktl;
224 /* Add the packet in the buffered packet list. */
230 static int queue_attached_pictures(AVFormatContext *s)
233 for (i = 0; i < s->nb_streams; i++)
234 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
235 s->streams[i]->discard < AVDISCARD_ALL) {
236 AVPacket copy = s->streams[i]->attached_pic;
237 copy.buf = av_buffer_ref(copy.buf);
239 return AVERROR(ENOMEM);
241 add_to_pktbuf(&s->raw_packet_buffer, ©,
242 &s->raw_packet_buffer_end);
247 int avformat_open_input(AVFormatContext **ps, const char *filename,
248 AVInputFormat *fmt, AVDictionary **options)
250 AVFormatContext *s = *ps;
252 AVDictionary *tmp = NULL;
253 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
255 if (!s && !(s = avformat_alloc_context()))
256 return AVERROR(ENOMEM);
261 av_dict_copy(&tmp, *options, 0);
263 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
266 if ((ret = init_input(s, filename, &tmp)) < 0)
269 /* Check filename in case an image number is expected. */
270 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
271 if (!av_filename_number_test(filename)) {
272 ret = AVERROR(EINVAL);
277 s->duration = s->start_time = AV_NOPTS_VALUE;
278 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
280 /* Allocate private data. */
281 if (s->iformat->priv_data_size > 0) {
282 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
283 ret = AVERROR(ENOMEM);
286 if (s->iformat->priv_class) {
287 *(const AVClass **) s->priv_data = s->iformat->priv_class;
288 av_opt_set_defaults(s->priv_data);
289 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
294 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
296 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
298 if (s->iformat->read_header)
299 if ((ret = s->iformat->read_header(s)) < 0)
302 if (id3v2_extra_meta &&
303 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
305 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
307 if ((ret = queue_attached_pictures(s)) < 0)
310 if (s->pb && !s->data_offset)
311 s->data_offset = avio_tell(s->pb);
313 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
316 av_dict_free(options);
323 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
325 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
327 avformat_free_context(s);
332 /*******************************************************/
334 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
336 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
337 AVProbeData *pd = &st->probe_data;
338 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
343 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
344 AVPROBE_PADDING_SIZE)) < 0)
346 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
347 pd->buf_size += pkt->size;
348 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
350 st->probe_packets = 0;
352 av_log(s, AV_LOG_ERROR,
353 "nothing to probe for stream %d\n", st->index);
358 if (!st->probe_packets ||
359 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
360 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
361 ? AVPROBE_SCORE_MAX / 4 : 0);
362 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
365 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
372 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
378 AVPacketList *pktl = s->raw_packet_buffer;
382 st = s->streams[pkt->stream_index];
383 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
384 !st->probe_packets ||
385 s->raw_packet_buffer_remaining_size < pkt->size) {
387 if (st->probe_packets)
388 if ((err = probe_codec(s, st, NULL)) < 0)
390 pd = &st->probe_data;
393 s->raw_packet_buffer = pktl->next;
394 s->raw_packet_buffer_remaining_size += pkt->size;
403 ret = s->iformat->read_packet(s, pkt);
405 if (!pktl || ret == AVERROR(EAGAIN))
407 for (i = 0; i < s->nb_streams; i++) {
409 if (st->probe_packets)
410 if ((err = probe_codec(s, st, NULL)) < 0)
416 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
417 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
418 av_log(s, AV_LOG_WARNING,
419 "Dropped corrupted packet (stream = %d)\n",
425 st = s->streams[pkt->stream_index];
427 switch (st->codec->codec_type) {
428 case AVMEDIA_TYPE_VIDEO:
429 if (s->video_codec_id)
430 st->codec->codec_id = s->video_codec_id;
432 case AVMEDIA_TYPE_AUDIO:
433 if (s->audio_codec_id)
434 st->codec->codec_id = s->audio_codec_id;
436 case AVMEDIA_TYPE_SUBTITLE:
437 if (s->subtitle_codec_id)
438 st->codec->codec_id = s->subtitle_codec_id;
442 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
446 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
447 s->raw_packet_buffer_remaining_size -= pkt->size;
449 if ((err = probe_codec(s, st, pkt)) < 0)
454 /**********************************************************/
457 * Return the frame duration in seconds. Return 0 if not available.
459 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
460 AVCodecParserContext *pc, AVPacket *pkt)
462 AVRational codec_framerate = s->iformat ? st->codec->framerate :
463 av_inv_q(st->codec->time_base);
468 switch (st->codec->codec_type) {
469 case AVMEDIA_TYPE_VIDEO:
470 if (st->avg_frame_rate.num) {
471 *pnum = st->avg_frame_rate.den;
472 *pden = st->avg_frame_rate.num;
473 } else if (st->time_base.num * 1000LL > st->time_base.den) {
474 *pnum = st->time_base.num;
475 *pden = st->time_base.den;
476 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
477 *pnum = codec_framerate.den;
478 *pden = codec_framerate.num;
479 if (pc && pc->repeat_pict) {
480 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
481 *pden /= 1 + pc->repeat_pict;
483 *pnum *= 1 + pc->repeat_pict;
485 /* If this codec can be interlaced or progressive then we need
486 * a parser to compute duration of a packet. Thus if we have
487 * no parser in such case leave duration undefined. */
488 if (st->codec->ticks_per_frame > 1 && !pc)
492 case AVMEDIA_TYPE_AUDIO:
493 frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
494 if (frame_size <= 0 || st->codec->sample_rate <= 0)
497 *pden = st->codec->sample_rate;
504 static int is_intra_only(enum AVCodecID id)
506 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
509 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
514 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
515 int64_t dts, int64_t pts)
517 AVStream *st = s->streams[stream_index];
518 AVPacketList *pktl = s->packet_buffer;
520 if (st->first_dts != AV_NOPTS_VALUE ||
521 dts == AV_NOPTS_VALUE ||
522 st->cur_dts == AV_NOPTS_VALUE)
525 st->first_dts = dts - st->cur_dts;
528 for (; pktl; pktl = pktl->next) {
529 if (pktl->pkt.stream_index != stream_index)
531 // FIXME: think more about this check
532 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
533 pktl->pkt.pts += st->first_dts;
535 if (pktl->pkt.dts != AV_NOPTS_VALUE)
536 pktl->pkt.dts += st->first_dts;
538 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
539 st->start_time = pktl->pkt.pts;
541 if (st->start_time == AV_NOPTS_VALUE)
542 st->start_time = pts;
545 static void update_initial_durations(AVFormatContext *s, AVStream *st,
546 int stream_index, int duration)
548 AVPacketList *pktl = s->packet_buffer;
551 if (st->first_dts != AV_NOPTS_VALUE) {
552 cur_dts = st->first_dts;
553 for (; pktl; pktl = pktl->next) {
554 if (pktl->pkt.stream_index == stream_index) {
555 if (pktl->pkt.pts != pktl->pkt.dts ||
556 pktl->pkt.dts != AV_NOPTS_VALUE ||
562 pktl = s->packet_buffer;
563 st->first_dts = cur_dts;
564 } else if (st->cur_dts)
567 for (; pktl; pktl = pktl->next) {
568 if (pktl->pkt.stream_index != stream_index)
570 if (pktl->pkt.pts == pktl->pkt.dts &&
571 pktl->pkt.dts == AV_NOPTS_VALUE &&
572 !pktl->pkt.duration) {
573 pktl->pkt.dts = cur_dts;
574 if (!st->codec->has_b_frames)
575 pktl->pkt.pts = cur_dts;
577 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
578 pktl->pkt.duration = duration;
582 if (st->first_dts == AV_NOPTS_VALUE)
583 st->cur_dts = cur_dts;
586 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
587 AVCodecParserContext *pc, AVPacket *pkt)
589 int num, den, presentation_delayed, delay, i;
592 if (s->flags & AVFMT_FLAG_NOFILLIN)
595 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
596 pkt->dts = AV_NOPTS_VALUE;
598 /* do we have a video B-frame ? */
599 delay = st->codec->has_b_frames;
600 presentation_delayed = 0;
602 /* XXX: need has_b_frame, but cannot get it if the codec is
605 pc && pc->pict_type != AV_PICTURE_TYPE_B)
606 presentation_delayed = 1;
608 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
609 st->pts_wrap_bits < 63 &&
610 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
611 pkt->dts -= 1LL << st->pts_wrap_bits;
614 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
615 * We take the conservative approach and discard both.
616 * Note: If this is misbehaving for an H.264 file, then possibly
617 * presentation_delayed is not set correctly. */
618 if (delay == 1 && pkt->dts == pkt->pts &&
619 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
620 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
621 pkt->dts = AV_NOPTS_VALUE;
624 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
625 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
627 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
628 den * (int64_t) st->time_base.num,
631 if (pkt->duration != 0 && s->packet_buffer)
632 update_initial_durations(s, st, pkt->stream_index,
637 /* Correct timestamps with byte offset if demuxers only have timestamps
638 * on packet boundaries */
639 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
640 /* this will estimate bitrate based on this frame's duration and size */
641 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
642 if (pkt->pts != AV_NOPTS_VALUE)
644 if (pkt->dts != AV_NOPTS_VALUE)
648 /* This may be redundant, but it should not hurt. */
649 if (pkt->dts != AV_NOPTS_VALUE &&
650 pkt->pts != AV_NOPTS_VALUE &&
652 presentation_delayed = 1;
655 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
656 "cur_dts:%"PRId64" st:%d pc:%p\n",
657 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
658 pkt->stream_index, pc);
659 /* Interpolate PTS and DTS if they are not present. We skip H.264
660 * currently because delay and has_b_frames are not reliably set. */
661 if ((delay == 0 || (delay == 1 && pc)) &&
662 st->codec->codec_id != AV_CODEC_ID_H264) {
663 if (presentation_delayed) {
664 /* DTS = decompression timestamp */
665 /* PTS = presentation timestamp */
666 if (pkt->dts == AV_NOPTS_VALUE)
667 pkt->dts = st->last_IP_pts;
668 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
669 if (pkt->dts == AV_NOPTS_VALUE)
670 pkt->dts = st->cur_dts;
672 /* This is tricky: the dts must be incremented by the duration
673 * of the frame we are displaying, i.e. the last I- or P-frame. */
674 if (st->last_IP_duration == 0)
675 st->last_IP_duration = pkt->duration;
676 if (pkt->dts != AV_NOPTS_VALUE)
677 st->cur_dts = pkt->dts + st->last_IP_duration;
678 st->last_IP_duration = pkt->duration;
679 st->last_IP_pts = pkt->pts;
680 /* Cannot compute PTS if not present (we can compute it only
681 * by knowing the future. */
682 } else if (pkt->pts != AV_NOPTS_VALUE ||
683 pkt->dts != AV_NOPTS_VALUE ||
685 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
686 int duration = pkt->duration;
687 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
688 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
690 duration = av_rescale_rnd(1,
691 num * (int64_t) st->time_base.den,
692 den * (int64_t) st->time_base.num,
694 if (duration != 0 && s->packet_buffer)
695 update_initial_durations(s, st, pkt->stream_index,
700 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
702 /* presentation is not delayed : PTS and DTS are the same */
703 if (pkt->pts == AV_NOPTS_VALUE)
705 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
707 if (pkt->pts == AV_NOPTS_VALUE)
708 pkt->pts = st->cur_dts;
710 if (pkt->pts != AV_NOPTS_VALUE)
711 st->cur_dts = pkt->pts + duration;
716 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
717 st->pts_buffer[0] = pkt->pts;
718 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
719 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
720 if (pkt->dts == AV_NOPTS_VALUE)
721 pkt->dts = st->pts_buffer[0];
722 // We skipped it above so we try here.
723 if (st->codec->codec_id == AV_CODEC_ID_H264)
724 // This should happen on the first packet
725 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
726 if (pkt->dts > st->cur_dts)
727 st->cur_dts = pkt->dts;
731 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
732 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
735 if (is_intra_only(st->codec->codec_id))
736 pkt->flags |= AV_PKT_FLAG_KEY;
738 pkt->convergence_duration = pc->convergence_duration;
741 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
744 AVPacketList *pktl = *pkt_buf;
745 *pkt_buf = pktl->next;
746 av_free_packet(&pktl->pkt);
753 * Parse a packet, add all split parts to parse_queue.
755 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
757 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
759 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
760 AVStream *st = s->streams[stream_index];
761 uint8_t *data = pkt ? pkt->data : NULL;
762 int size = pkt ? pkt->size : 0;
763 int ret = 0, got_output = 0;
766 av_init_packet(&flush_pkt);
771 while (size > 0 || (pkt == &flush_pkt && got_output)) {
774 av_init_packet(&out_pkt);
775 len = av_parser_parse2(st->parser, st->codec,
776 &out_pkt.data, &out_pkt.size, data, size,
777 pkt->pts, pkt->dts, pkt->pos);
779 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
780 /* increment read pointer */
784 got_output = !!out_pkt.size;
789 if (pkt->side_data) {
790 out_pkt.side_data = pkt->side_data;
791 out_pkt.side_data_elems = pkt->side_data_elems;
792 pkt->side_data = NULL;
793 pkt->side_data_elems = 0;
796 /* set the duration */
797 out_pkt.duration = 0;
798 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
799 if (st->codec->sample_rate > 0) {
801 av_rescale_q_rnd(st->parser->duration,
802 (AVRational) { 1, st->codec->sample_rate },
808 out_pkt.stream_index = st->index;
809 out_pkt.pts = st->parser->pts;
810 out_pkt.dts = st->parser->dts;
811 out_pkt.pos = st->parser->pos;
813 if (st->parser->key_frame == 1 ||
814 (st->parser->key_frame == -1 &&
815 st->parser->pict_type == AV_PICTURE_TYPE_I))
816 out_pkt.flags |= AV_PKT_FLAG_KEY;
818 compute_pkt_fields(s, st, st->parser, &out_pkt);
820 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
821 out_pkt.flags & AV_PKT_FLAG_KEY) {
822 ff_reduce_index(s, st->index);
823 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
824 0, 0, AVINDEX_KEYFRAME);
827 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
828 out_pkt.buf = pkt->buf;
830 #if FF_API_DESTRUCT_PACKET
831 FF_DISABLE_DEPRECATION_WARNINGS
832 out_pkt.destruct = pkt->destruct;
833 pkt->destruct = NULL;
834 FF_ENABLE_DEPRECATION_WARNINGS
837 if ((ret = av_dup_packet(&out_pkt)) < 0)
840 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
841 av_free_packet(&out_pkt);
842 ret = AVERROR(ENOMEM);
847 /* end of the stream => close and free the parser */
848 if (pkt == &flush_pkt) {
849 av_parser_close(st->parser);
858 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
859 AVPacketList **pkt_buffer_end,
863 av_assert0(*pkt_buffer);
866 *pkt_buffer = pktl->next;
868 *pkt_buffer_end = NULL;
873 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
875 int ret = 0, i, got_packet = 0;
876 AVDictionary *metadata = NULL;
880 while (!got_packet && !s->parse_queue) {
884 /* read next packet */
885 ret = ff_read_packet(s, &cur_pkt);
887 if (ret == AVERROR(EAGAIN))
889 /* flush the parsers */
890 for (i = 0; i < s->nb_streams; i++) {
892 if (st->parser && st->need_parsing)
893 parse_packet(s, NULL, st->index);
895 /* all remaining packets are now in parse_queue =>
896 * really terminate parsing */
900 st = s->streams[cur_pkt.stream_index];
902 if (cur_pkt.pts != AV_NOPTS_VALUE &&
903 cur_pkt.dts != AV_NOPTS_VALUE &&
904 cur_pkt.pts < cur_pkt.dts) {
905 av_log(s, AV_LOG_WARNING,
906 "Invalid timestamps stream=%d, pts=%"PRId64", "
907 "dts=%"PRId64", size=%d\n",
908 cur_pkt.stream_index, cur_pkt.pts,
909 cur_pkt.dts, cur_pkt.size);
911 if (s->debug & FF_FDEBUG_TS)
912 av_log(s, AV_LOG_DEBUG,
913 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
914 "size=%d, duration=%d, flags=%d\n",
915 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
916 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
918 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
919 st->parser = av_parser_init(st->codec->codec_id);
921 /* no parser available: just output the raw packets */
922 st->need_parsing = AVSTREAM_PARSE_NONE;
923 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
924 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
925 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
926 st->parser->flags |= PARSER_FLAG_ONCE;
929 if (!st->need_parsing || !st->parser) {
930 /* no parsing needed: we just output the packet as is */
932 compute_pkt_fields(s, st, NULL, pkt);
933 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
934 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
935 ff_reduce_index(s, st->index);
936 av_add_index_entry(st, pkt->pos, pkt->dts,
937 0, 0, AVINDEX_KEYFRAME);
940 } else if (st->discard < AVDISCARD_ALL) {
941 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
945 av_free_packet(&cur_pkt);
949 if (!got_packet && s->parse_queue)
950 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
952 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
954 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
955 av_dict_copy(&s->metadata, metadata, 0);
956 av_dict_free(&metadata);
957 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
960 if (s->debug & FF_FDEBUG_TS)
961 av_log(s, AV_LOG_DEBUG,
962 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
963 "size=%d, duration=%d, flags=%d\n",
964 pkt->stream_index, pkt->pts, pkt->dts,
965 pkt->size, pkt->duration, pkt->flags);
970 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
972 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
976 return s->packet_buffer
977 ? read_from_packet_buffer(&s->packet_buffer,
978 &s->packet_buffer_end, pkt)
979 : read_frame_internal(s, pkt);
983 AVPacketList *pktl = s->packet_buffer;
986 AVPacket *next_pkt = &pktl->pkt;
988 if (next_pkt->dts != AV_NOPTS_VALUE) {
989 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
990 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
991 if (pktl->pkt.stream_index == next_pkt->stream_index &&
992 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
993 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
995 next_pkt->pts = pktl->pkt.dts;
999 pktl = s->packet_buffer;
1002 /* read packet from packet buffer, if there is data */
1003 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1004 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1005 return read_from_packet_buffer(&s->packet_buffer,
1006 &s->packet_buffer_end, pkt);
1009 ret = read_frame_internal(s, pkt);
1011 if (pktl && ret != AVERROR(EAGAIN)) {
1018 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1019 &s->packet_buffer_end)) < 0)
1020 return AVERROR(ENOMEM);
1024 /* XXX: suppress the packet queue */
1025 static void flush_packet_queue(AVFormatContext *s)
1027 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1028 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1029 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1031 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1034 /*******************************************************/
1037 int av_find_default_stream_index(AVFormatContext *s)
1039 int first_audio_index = -1;
1043 if (s->nb_streams <= 0)
1045 for (i = 0; i < s->nb_streams; i++) {
1047 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1048 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1051 if (first_audio_index < 0 &&
1052 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1053 first_audio_index = i;
1055 return first_audio_index >= 0 ? first_audio_index : 0;
1058 /** Flush the frame reader. */
1059 void ff_read_frame_flush(AVFormatContext *s)
1064 flush_packet_queue(s);
1066 /* Reset read state for each stream. */
1067 for (i = 0; i < s->nb_streams; i++) {
1071 av_parser_close(st->parser);
1074 st->last_IP_pts = AV_NOPTS_VALUE;
1075 /* We set the current DTS to an unspecified origin. */
1076 st->cur_dts = AV_NOPTS_VALUE;
1078 st->probe_packets = MAX_PROBE_PACKETS;
1080 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1081 st->pts_buffer[j] = AV_NOPTS_VALUE;
1085 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1089 for (i = 0; i < s->nb_streams; i++) {
1090 AVStream *st = s->streams[i];
1093 av_rescale(timestamp,
1094 st->time_base.den * (int64_t) ref_st->time_base.num,
1095 st->time_base.num * (int64_t) ref_st->time_base.den);
1099 void ff_reduce_index(AVFormatContext *s, int stream_index)
1101 AVStream *st = s->streams[stream_index];
1102 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1104 if ((unsigned) st->nb_index_entries >= max_entries) {
1106 for (i = 0; 2 * i < st->nb_index_entries; i++)
1107 st->index_entries[i] = st->index_entries[2 * i];
1108 st->nb_index_entries = i;
1112 int ff_add_index_entry(AVIndexEntry **index_entries,
1113 int *nb_index_entries,
1114 unsigned int *index_entries_allocated_size,
1115 int64_t pos, int64_t timestamp,
1116 int size, int distance, int flags)
1118 AVIndexEntry *entries, *ie;
1121 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1124 entries = av_fast_realloc(*index_entries,
1125 index_entries_allocated_size,
1126 (*nb_index_entries + 1) *
1127 sizeof(AVIndexEntry));
1131 *index_entries = entries;
1133 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1134 timestamp, AVSEEK_FLAG_ANY);
1137 index = (*nb_index_entries)++;
1138 ie = &entries[index];
1139 assert(index == 0 || ie[-1].timestamp < timestamp);
1141 ie = &entries[index];
1142 if (ie->timestamp != timestamp) {
1143 if (ie->timestamp <= timestamp)
1145 memmove(entries + index + 1, entries + index,
1146 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1147 (*nb_index_entries)++;
1148 } else if (ie->pos == pos && distance < ie->min_distance)
1149 // do not reduce the distance
1150 distance = ie->min_distance;
1154 ie->timestamp = timestamp;
1155 ie->min_distance = distance;
1162 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1163 int size, int distance, int flags)
1165 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1166 &st->index_entries_allocated_size, pos,
1167 timestamp, size, distance, flags);
1170 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1171 int64_t wanted_timestamp, int flags)
1179 // Optimize appending index entries at the end.
1180 if (b && entries[b - 1].timestamp < wanted_timestamp)
1185 timestamp = entries[m].timestamp;
1186 if (timestamp >= wanted_timestamp)
1188 if (timestamp <= wanted_timestamp)
1191 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1193 if (!(flags & AVSEEK_FLAG_ANY))
1194 while (m >= 0 && m < nb_entries &&
1195 !(entries[m].flags & AVINDEX_KEYFRAME))
1196 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1198 if (m == nb_entries)
1203 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1205 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1206 wanted_timestamp, flags);
1209 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1210 int64_t target_ts, int flags)
1212 AVInputFormat *avif = s->iformat;
1213 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1214 int64_t ts_min, ts_max, ts;
1219 if (stream_index < 0)
1222 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1225 ts_min = AV_NOPTS_VALUE;
1226 pos_limit = -1; // GCC falsely says it may be uninitialized.
1228 st = s->streams[stream_index];
1229 if (st->index_entries) {
1232 /* FIXME: Whole function must be checked for non-keyframe entries in
1233 * index case, especially read_timestamp(). */
1234 index = av_index_search_timestamp(st, target_ts,
1235 flags | AVSEEK_FLAG_BACKWARD);
1236 index = FFMAX(index, 0);
1237 e = &st->index_entries[index];
1239 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1241 ts_min = e->timestamp;
1242 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1248 index = av_index_search_timestamp(st, target_ts,
1249 flags & ~AVSEEK_FLAG_BACKWARD);
1250 assert(index < st->nb_index_entries);
1252 e = &st->index_entries[index];
1253 assert(e->timestamp >= target_ts);
1255 ts_max = e->timestamp;
1256 pos_limit = pos_max - e->min_distance;
1257 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1258 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1262 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1263 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1268 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1271 ff_update_cur_dts(s, st, ts);
1276 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1277 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1278 int64_t ts_min, int64_t ts_max,
1279 int flags, int64_t *ts_ret,
1280 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1281 int64_t *, int64_t))
1284 int64_t start_pos, filesize;
1287 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1289 if (ts_min == AV_NOPTS_VALUE) {
1290 pos_min = s->data_offset;
1291 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1292 if (ts_min == AV_NOPTS_VALUE)
1296 if (ts_max == AV_NOPTS_VALUE) {
1298 filesize = avio_size(s->pb);
1299 pos_max = filesize - 1;
1302 ts_max = read_timestamp(s, stream_index, &pos_max,
1305 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1306 if (ts_max == AV_NOPTS_VALUE)
1310 int64_t tmp_pos = pos_max + 1;
1311 int64_t tmp_ts = read_timestamp(s, stream_index,
1312 &tmp_pos, INT64_MAX);
1313 if (tmp_ts == AV_NOPTS_VALUE)
1317 if (tmp_pos >= filesize)
1320 pos_limit = pos_max;
1323 if (ts_min > ts_max)
1325 else if (ts_min == ts_max)
1326 pos_limit = pos_min;
1329 while (pos_min < pos_limit) {
1330 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1331 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1332 assert(pos_limit <= pos_max);
1334 if (no_change == 0) {
1335 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1336 // interpolate position (better than dichotomy)
1337 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1339 pos_min - approximate_keyframe_distance;
1340 } else if (no_change == 1) {
1341 // bisection if interpolation did not change min / max pos last time
1342 pos = (pos_min + pos_limit) >> 1;
1344 /* linear search if bisection failed, can only happen if there
1345 * are very few or no keyframes between min/max */
1350 else if (pos > pos_limit)
1354 // May pass pos_limit instead of -1.
1355 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1360 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1361 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1362 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1363 pos_limit, start_pos, no_change);
1364 if (ts == AV_NOPTS_VALUE) {
1365 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1368 assert(ts != AV_NOPTS_VALUE);
1369 if (target_ts <= ts) {
1370 pos_limit = start_pos - 1;
1374 if (target_ts >= ts) {
1380 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1381 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1383 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1385 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1386 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1387 pos, ts_min, target_ts, ts_max);
1392 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1393 int64_t pos, int flags)
1395 int64_t pos_min, pos_max;
1397 pos_min = s->data_offset;
1398 pos_max = avio_size(s->pb) - 1;
1402 else if (pos > pos_max)
1405 avio_seek(s->pb, pos, SEEK_SET);
1410 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1411 int64_t timestamp, int flags)
1418 st = s->streams[stream_index];
1420 index = av_index_search_timestamp(st, timestamp, flags);
1422 if (index < 0 && st->nb_index_entries &&
1423 timestamp < st->index_entries[0].timestamp)
1426 if (index < 0 || index == st->nb_index_entries - 1) {
1429 if (st->nb_index_entries) {
1430 assert(st->index_entries);
1431 ie = &st->index_entries[st->nb_index_entries - 1];
1432 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1434 ff_update_cur_dts(s, st, ie->timestamp);
1436 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1442 read_status = av_read_frame(s, &pkt);
1443 } while (read_status == AVERROR(EAGAIN));
1444 if (read_status < 0)
1446 av_free_packet(&pkt);
1447 if (stream_index == pkt.stream_index)
1448 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1451 index = av_index_search_timestamp(st, timestamp, flags);
1456 ff_read_frame_flush(s);
1457 if (s->iformat->read_seek)
1458 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1460 ie = &st->index_entries[index];
1461 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1463 ff_update_cur_dts(s, st, ie->timestamp);
1468 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1469 int64_t timestamp, int flags)
1474 if (flags & AVSEEK_FLAG_BYTE) {
1475 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1477 ff_read_frame_flush(s);
1478 return seek_frame_byte(s, stream_index, timestamp, flags);
1481 if (stream_index < 0) {
1482 stream_index = av_find_default_stream_index(s);
1483 if (stream_index < 0)
1486 st = s->streams[stream_index];
1487 /* timestamp for default must be expressed in AV_TIME_BASE units */
1488 timestamp = av_rescale(timestamp, st->time_base.den,
1489 AV_TIME_BASE * (int64_t) st->time_base.num);
1492 /* first, we try the format specific seek */
1493 if (s->iformat->read_seek) {
1494 ff_read_frame_flush(s);
1495 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1501 if (s->iformat->read_timestamp &&
1502 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1503 ff_read_frame_flush(s);
1504 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1505 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1506 ff_read_frame_flush(s);
1507 return seek_frame_generic(s, stream_index, timestamp, flags);
1512 int av_seek_frame(AVFormatContext *s, int stream_index,
1513 int64_t timestamp, int flags)
1515 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1518 ret = queue_attached_pictures(s);
1523 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1524 int64_t ts, int64_t max_ts, int flags)
1526 if (min_ts > ts || max_ts < ts)
1529 if (s->iformat->read_seek2) {
1531 ff_read_frame_flush(s);
1532 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1536 ret = queue_attached_pictures(s);
1540 if (s->iformat->read_timestamp) {
1541 // try to seek via read_timestamp()
1544 // Fall back on old API if new is not implemented but old is.
1545 // Note the old API has somewhat different semantics.
1546 if (s->iformat->read_seek || 1)
1547 return av_seek_frame(s, stream_index, ts,
1548 flags | ((uint64_t) ts - min_ts >
1549 (uint64_t) max_ts - ts
1550 ? AVSEEK_FLAG_BACKWARD : 0));
1552 // try some generic seek like seek_frame_generic() but with new ts semantics
1555 /*******************************************************/
1558 * Return TRUE if the stream has accurate duration in any stream.
1560 * @return TRUE if the stream has accurate duration for at least one component.
1562 static int has_duration(AVFormatContext *ic)
1567 for (i = 0; i < ic->nb_streams; i++) {
1568 st = ic->streams[i];
1569 if (st->duration != AV_NOPTS_VALUE)
1572 if (ic->duration != AV_NOPTS_VALUE)
1578 * Estimate the stream timings from the one of each components.
1580 * Also computes the global bitrate if possible.
1582 static void update_stream_timings(AVFormatContext *ic)
1584 int64_t start_time, start_time1, end_time, end_time1;
1585 int64_t duration, duration1, filesize;
1589 start_time = INT64_MAX;
1590 end_time = INT64_MIN;
1591 duration = INT64_MIN;
1592 for (i = 0; i < ic->nb_streams; i++) {
1593 st = ic->streams[i];
1594 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1595 start_time1 = av_rescale_q(st->start_time, st->time_base,
1597 start_time = FFMIN(start_time, start_time1);
1598 if (st->duration != AV_NOPTS_VALUE) {
1599 end_time1 = start_time1 +
1600 av_rescale_q(st->duration, st->time_base,
1602 end_time = FFMAX(end_time, end_time1);
1605 if (st->duration != AV_NOPTS_VALUE) {
1606 duration1 = av_rescale_q(st->duration, st->time_base,
1608 duration = FFMAX(duration, duration1);
1611 if (start_time != INT64_MAX) {
1612 ic->start_time = start_time;
1613 if (end_time != INT64_MIN)
1614 duration = FFMAX(duration, end_time - start_time);
1616 if (duration != INT64_MIN) {
1617 ic->duration = duration;
1618 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1619 /* compute the bitrate */
1620 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1621 (double) ic->duration;
1625 static void fill_all_stream_timings(AVFormatContext *ic)
1630 update_stream_timings(ic);
1631 for (i = 0; i < ic->nb_streams; i++) {
1632 st = ic->streams[i];
1633 if (st->start_time == AV_NOPTS_VALUE) {
1634 if (ic->start_time != AV_NOPTS_VALUE)
1635 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1637 if (ic->duration != AV_NOPTS_VALUE)
1638 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1644 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1646 int64_t filesize, duration;
1650 /* if bit_rate is already set, we believe it */
1651 if (ic->bit_rate <= 0) {
1653 for (i = 0; i < ic->nb_streams; i++) {
1654 st = ic->streams[i];
1655 if (st->codec->bit_rate > 0) {
1656 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1660 bit_rate += st->codec->bit_rate;
1663 ic->bit_rate = bit_rate;
1666 /* if duration is already set, we believe it */
1667 if (ic->duration == AV_NOPTS_VALUE &&
1668 ic->bit_rate != 0) {
1669 filesize = ic->pb ? avio_size(ic->pb) : 0;
1671 for (i = 0; i < ic->nb_streams; i++) {
1672 st = ic->streams[i];
1673 duration = av_rescale(8 * filesize, st->time_base.den,
1675 (int64_t) st->time_base.num);
1676 if (st->duration == AV_NOPTS_VALUE)
1677 st->duration = duration;
1683 #define DURATION_MAX_READ_SIZE 250000
1684 #define DURATION_MAX_RETRY 3
1686 /* only usable for MPEG-PS streams */
1687 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1689 AVPacket pkt1, *pkt = &pkt1;
1691 int read_size, i, ret;
1693 int64_t filesize, offset, duration;
1696 /* flush packet queue */
1697 flush_packet_queue(ic);
1699 for (i = 0; i < ic->nb_streams; i++) {
1700 st = ic->streams[i];
1701 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1702 av_log(st->codec, AV_LOG_WARNING,
1703 "start time is not set in estimate_timings_from_pts\n");
1706 av_parser_close(st->parser);
1711 /* estimate the end time (duration) */
1712 /* XXX: may need to support wrapping */
1713 filesize = ic->pb ? avio_size(ic->pb) : 0;
1714 end_time = AV_NOPTS_VALUE;
1716 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1720 avio_seek(ic->pb, offset, SEEK_SET);
1723 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1727 ret = ff_read_packet(ic, pkt);
1728 } while (ret == AVERROR(EAGAIN));
1731 read_size += pkt->size;
1732 st = ic->streams[pkt->stream_index];
1733 if (pkt->pts != AV_NOPTS_VALUE &&
1734 (st->start_time != AV_NOPTS_VALUE ||
1735 st->first_dts != AV_NOPTS_VALUE)) {
1736 duration = end_time = pkt->pts;
1737 if (st->start_time != AV_NOPTS_VALUE)
1738 duration -= st->start_time;
1740 duration -= st->first_dts;
1742 duration += 1LL << st->pts_wrap_bits;
1744 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1745 st->duration = duration;
1748 av_free_packet(pkt);
1750 } while (end_time == AV_NOPTS_VALUE &&
1751 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1752 ++retry <= DURATION_MAX_RETRY);
1754 fill_all_stream_timings(ic);
1756 avio_seek(ic->pb, old_offset, SEEK_SET);
1757 for (i = 0; i < ic->nb_streams; i++) {
1758 st = ic->streams[i];
1759 st->cur_dts = st->first_dts;
1760 st->last_IP_pts = AV_NOPTS_VALUE;
1764 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1768 /* get the file size, if possible */
1769 if (ic->iformat->flags & AVFMT_NOFILE) {
1772 file_size = avio_size(ic->pb);
1773 file_size = FFMAX(0, file_size);
1776 if ((!strcmp(ic->iformat->name, "mpeg") ||
1777 !strcmp(ic->iformat->name, "mpegts")) &&
1778 file_size && ic->pb->seekable) {
1779 /* get accurate estimate from the PTSes */
1780 estimate_timings_from_pts(ic, old_offset);
1781 } else if (has_duration(ic)) {
1782 /* at least one component has timings - we use them for all
1784 fill_all_stream_timings(ic);
1786 av_log(ic, AV_LOG_WARNING,
1787 "Estimating duration from bitrate, this may be inaccurate\n");
1788 /* less precise: use bitrate info */
1789 estimate_timings_from_bit_rate(ic);
1791 update_stream_timings(ic);
1795 AVStream av_unused *st;
1796 for (i = 0; i < ic->nb_streams; i++) {
1797 st = ic->streams[i];
1798 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
1799 (double) st->start_time / AV_TIME_BASE,
1800 (double) st->duration / AV_TIME_BASE);
1803 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1804 (double) ic->start_time / AV_TIME_BASE,
1805 (double) ic->duration / AV_TIME_BASE,
1806 ic->bit_rate / 1000);
1810 static int has_codec_parameters(AVStream *st)
1812 AVCodecContext *avctx = st->codec;
1815 switch (avctx->codec_type) {
1816 case AVMEDIA_TYPE_AUDIO:
1817 val = avctx->sample_rate && avctx->channels;
1818 if (st->info->found_decoder >= 0 &&
1819 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1822 case AVMEDIA_TYPE_VIDEO:
1824 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1831 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1834 static int has_decode_delay_been_guessed(AVStream *st)
1836 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1837 st->info->nb_decoded_frames >= 6;
1840 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1841 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1842 AVDictionary **options)
1844 const AVCodec *codec;
1845 int got_picture = 1, ret = 0;
1846 AVFrame *frame = av_frame_alloc();
1847 AVPacket pkt = *avpkt;
1850 return AVERROR(ENOMEM);
1852 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
1853 AVDictionary *thread_opt = NULL;
1855 codec = st->codec->codec ? st->codec->codec
1856 : avcodec_find_decoder(st->codec->codec_id);
1859 st->info->found_decoder = -1;
1864 /* Force thread count to 1 since the H.264 decoder will not extract
1865 * SPS and PPS to extradata during multi-threaded decoding. */
1866 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1867 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
1869 av_dict_free(&thread_opt);
1871 st->info->found_decoder = -1;
1874 st->info->found_decoder = 1;
1875 } else if (!st->info->found_decoder)
1876 st->info->found_decoder = 1;
1878 if (st->info->found_decoder < 0) {
1883 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1885 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1886 (!st->codec_info_nb_frames &&
1887 st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
1889 switch (st->codec->codec_type) {
1890 case AVMEDIA_TYPE_VIDEO:
1891 ret = avcodec_decode_video2(st->codec, frame,
1892 &got_picture, &pkt);
1894 case AVMEDIA_TYPE_AUDIO:
1895 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
1902 st->info->nb_decoded_frames++;
1910 av_frame_free(&frame);
1914 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1916 while (tags->id != AV_CODEC_ID_NONE) {
1924 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1927 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1928 if (tag == tags[i].tag)
1930 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1931 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1933 return AV_CODEC_ID_NONE;
1936 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
1941 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
1943 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
1945 return AV_CODEC_ID_NONE;
1949 if (sflags & (1 << (bps - 1))) {
1952 return AV_CODEC_ID_PCM_S8;
1954 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
1956 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
1958 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
1960 return AV_CODEC_ID_NONE;
1965 return AV_CODEC_ID_PCM_U8;
1967 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
1969 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
1971 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
1973 return AV_CODEC_ID_NONE;
1979 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
1982 for (i = 0; tags && tags[i]; i++) {
1983 int tag = ff_codec_get_tag(tags[i], id);
1990 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
1993 for (i = 0; tags && tags[i]; i++) {
1994 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
1995 if (id != AV_CODEC_ID_NONE)
1998 return AV_CODEC_ID_NONE;
2001 static void compute_chapters_end(AVFormatContext *s)
2004 int64_t max_time = s->duration +
2005 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2007 for (i = 0; i < s->nb_chapters; i++)
2008 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2009 AVChapter *ch = s->chapters[i];
2010 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2014 for (j = 0; j < s->nb_chapters; j++) {
2015 AVChapter *ch1 = s->chapters[j];
2016 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2018 if (j != i && next_start > ch->start && next_start < end)
2021 ch->end = (end == INT64_MAX) ? ch->start : end;
2025 static int get_std_framerate(int i)
2028 return (i + 1) * 1001;
2030 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2033 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2035 int i, count, ret, read_size, j;
2037 AVPacket pkt1, *pkt;
2038 int64_t old_offset = avio_tell(ic->pb);
2039 // new streams might appear, no options for those
2040 int orig_nb_streams = ic->nb_streams;
2042 for (i = 0; i < ic->nb_streams; i++) {
2043 const AVCodec *codec;
2044 AVDictionary *thread_opt = NULL;
2045 st = ic->streams[i];
2047 // only for the split stuff
2048 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2049 st->parser = av_parser_init(st->codec->codec_id);
2050 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2051 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2053 codec = st->codec->codec ? st->codec->codec
2054 : avcodec_find_decoder(st->codec->codec_id);
2056 /* Force thread count to 1 since the H.264 decoder will not extract
2057 * SPS and PPS to extradata during multi-threaded decoding. */
2058 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2060 /* Ensure that subtitle_header is properly set. */
2061 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2062 && codec && !st->codec->codec)
2063 avcodec_open2(st->codec, codec,
2064 options ? &options[i] : &thread_opt);
2066 // Try to just open decoders, in case this is enough to get parameters.
2067 if (!has_codec_parameters(st)) {
2068 if (codec && !st->codec->codec)
2069 avcodec_open2(st->codec, codec,
2070 options ? &options[i] : &thread_opt);
2073 av_dict_free(&thread_opt);
2076 for (i = 0; i < ic->nb_streams; i++) {
2077 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2078 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2084 if (ff_check_interrupt(&ic->interrupt_callback)) {
2086 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2090 /* check if one codec still needs to be handled */
2091 for (i = 0; i < ic->nb_streams; i++) {
2092 int fps_analyze_framecount = 20;
2094 st = ic->streams[i];
2095 if (!has_codec_parameters(st))
2097 /* If the timebase is coarse (like the usual millisecond precision
2098 * of mkv), we need to analyze more frames to reliably arrive at
2099 * the correct fps. */
2100 if (av_q2d(st->time_base) > 0.0005)
2101 fps_analyze_framecount *= 2;
2102 if (ic->fps_probe_size >= 0)
2103 fps_analyze_framecount = ic->fps_probe_size;
2104 /* variable fps and no guess at the real fps */
2105 if (!st->avg_frame_rate.num &&
2106 st->codec_info_nb_frames < fps_analyze_framecount &&
2107 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2109 if (st->parser && st->parser->parser->split &&
2110 !st->codec->extradata)
2112 if (st->first_dts == AV_NOPTS_VALUE &&
2113 st->codec_info_nb_frames < ic->max_ts_probe &&
2114 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2115 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2118 if (i == ic->nb_streams) {
2119 /* NOTE: If the format has no header, then we need to read some
2120 * packets to get most of the streams, so we cannot stop here. */
2121 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2122 /* If we found the info for all the codecs, we can stop. */
2124 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2128 /* We did not get all the codec info, but we read too much data. */
2129 if (read_size >= ic->probesize) {
2131 av_log(ic, AV_LOG_DEBUG,
2132 "Probe buffer size limit %d reached\n", ic->probesize);
2136 /* NOTE: A new stream can be added there if no header in file
2137 * (AVFMTCTX_NOHEADER). */
2138 ret = read_frame_internal(ic, &pkt1);
2139 if (ret == AVERROR(EAGAIN))
2144 AVPacket empty_pkt = { 0 };
2146 av_init_packet(&empty_pkt);
2148 /* We could not have all the codec parameters before EOF. */
2150 for (i = 0; i < ic->nb_streams; i++) {
2151 st = ic->streams[i];
2153 /* flush the decoders */
2154 if (st->info->found_decoder == 1) {
2156 err = try_decode_frame(st, &empty_pkt,
2157 (options && i < orig_nb_streams)
2158 ? &options[i] : NULL);
2159 } while (err > 0 && !has_codec_parameters(st));
2163 av_log(ic, AV_LOG_WARNING,
2164 "decoding for stream %d failed\n", st->index);
2165 } else if (!has_codec_parameters(st)) {
2167 avcodec_string(buf, sizeof(buf), st->codec, 0);
2168 av_log(ic, AV_LOG_WARNING,
2169 "Could not find codec parameters (%s)\n", buf);
2177 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2180 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2181 &ic->packet_buffer_end);
2182 if ((ret = av_dup_packet(pkt)) < 0)
2183 goto find_stream_info_err;
2186 read_size += pkt->size;
2188 st = ic->streams[pkt->stream_index];
2189 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2190 /* check for non-increasing dts */
2191 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2192 st->info->fps_last_dts >= pkt->dts) {
2193 av_log(ic, AV_LOG_WARNING,
2194 "Non-increasing DTS in stream %d: packet %d with DTS "
2195 "%"PRId64", packet %d with DTS %"PRId64"\n",
2196 st->index, st->info->fps_last_dts_idx,
2197 st->info->fps_last_dts, st->codec_info_nb_frames,
2199 st->info->fps_first_dts =
2200 st->info->fps_last_dts = AV_NOPTS_VALUE;
2202 /* Check for a discontinuity in dts. If the difference in dts
2203 * is more than 1000 times the average packet duration in the
2204 * sequence, we treat it as a discontinuity. */
2205 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2206 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2207 (pkt->dts - st->info->fps_last_dts) / 1000 >
2208 (st->info->fps_last_dts - st->info->fps_first_dts) /
2209 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2210 av_log(ic, AV_LOG_WARNING,
2211 "DTS discontinuity in stream %d: packet %d with DTS "
2212 "%"PRId64", packet %d with DTS %"PRId64"\n",
2213 st->index, st->info->fps_last_dts_idx,
2214 st->info->fps_last_dts, st->codec_info_nb_frames,
2216 st->info->fps_first_dts =
2217 st->info->fps_last_dts = AV_NOPTS_VALUE;
2220 /* update stored dts values */
2221 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2222 st->info->fps_first_dts = pkt->dts;
2223 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2225 st->info->fps_last_dts = pkt->dts;
2226 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2228 /* check max_analyze_duration */
2229 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2230 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2231 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2232 ic->max_analyze_duration);
2233 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2234 av_packet_unref(pkt);
2238 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2239 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2240 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2241 st->codec->extradata_size = i;
2242 st->codec->extradata = av_mallocz(st->codec->extradata_size +
2243 FF_INPUT_BUFFER_PADDING_SIZE);
2244 if (!st->codec->extradata)
2245 return AVERROR(ENOMEM);
2246 memcpy(st->codec->extradata, pkt->data,
2247 st->codec->extradata_size);
2251 /* If still no information, we try to open the codec and to
2252 * decompress the frame. We try to avoid that in most cases as
2253 * it takes longer and uses more memory. For MPEG-4, we need to
2254 * decompress for QuickTime.
2256 * If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2257 * least one frame of codec data, this makes sure the codec initializes
2258 * the channel configuration and does not only trust the values from
2260 try_decode_frame(st, pkt,
2261 (options && i < orig_nb_streams) ? &options[i] : NULL);
2263 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2264 av_packet_unref(pkt);
2266 st->codec_info_nb_frames++;
2270 // close codecs which were opened in try_decode_frame()
2271 for (i = 0; i < ic->nb_streams; i++) {
2272 st = ic->streams[i];
2273 avcodec_close(st->codec);
2275 for (i = 0; i < ic->nb_streams; i++) {
2276 st = ic->streams[i];
2277 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2278 /* estimate average framerate if not set by demuxer */
2279 if (!st->avg_frame_rate.num &&
2280 st->info->fps_last_dts != st->info->fps_first_dts) {
2281 int64_t delta_dts = st->info->fps_last_dts -
2282 st->info->fps_first_dts;
2283 int delta_packets = st->info->fps_last_dts_idx -
2284 st->info->fps_first_dts_idx;
2286 double best_error = 0.01;
2288 if (delta_dts >= INT64_MAX / st->time_base.num ||
2289 delta_packets >= INT64_MAX / st->time_base.den ||
2292 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2293 delta_packets * (int64_t) st->time_base.den,
2294 delta_dts * (int64_t) st->time_base.num, 60000);
2296 /* Round guessed framerate to a "standard" framerate if it's
2297 * within 1% of the original estimate. */
2298 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2299 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2300 double error = fabs(av_q2d(st->avg_frame_rate) /
2301 av_q2d(std_fps) - 1);
2303 if (error < best_error) {
2305 best_fps = std_fps.num;
2309 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2310 best_fps, 12 * 1001, INT_MAX);
2312 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2313 if (!st->codec->bits_per_coded_sample)
2314 st->codec->bits_per_coded_sample =
2315 av_get_bits_per_sample(st->codec->codec_id);
2316 // set stream disposition based on audio service type
2317 switch (st->codec->audio_service_type) {
2318 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2319 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2321 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2322 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2324 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2325 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2327 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2328 st->disposition = AV_DISPOSITION_COMMENT;
2330 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2331 st->disposition = AV_DISPOSITION_KARAOKE;
2337 estimate_timings(ic, old_offset);
2339 compute_chapters_end(ic);
2341 find_stream_info_err:
2342 for (i = 0; i < ic->nb_streams; i++) {
2343 ic->streams[i]->codec->thread_count = 0;
2344 av_freep(&ic->streams[i]->info);
2349 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2353 for (i = 0; i < ic->nb_programs; i++)
2354 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2355 if (ic->programs[i]->stream_index[j] == s)
2356 return ic->programs[i];
2360 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2361 int wanted_stream_nb, int related_stream,
2362 AVCodec **decoder_ret, int flags)
2364 int i, nb_streams = ic->nb_streams;
2365 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2366 unsigned *program = NULL;
2367 AVCodec *decoder = NULL, *best_decoder = NULL;
2369 if (related_stream >= 0 && wanted_stream_nb < 0) {
2370 AVProgram *p = find_program_from_stream(ic, related_stream);
2372 program = p->stream_index;
2373 nb_streams = p->nb_stream_indexes;
2376 for (i = 0; i < nb_streams; i++) {
2377 int real_stream_index = program ? program[i] : i;
2378 AVStream *st = ic->streams[real_stream_index];
2379 AVCodecContext *avctx = st->codec;
2380 if (avctx->codec_type != type)
2382 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2384 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2385 AV_DISPOSITION_VISUAL_IMPAIRED))
2388 decoder = avcodec_find_decoder(st->codec->codec_id);
2391 ret = AVERROR_DECODER_NOT_FOUND;
2395 if (best_count >= st->codec_info_nb_frames)
2397 best_count = st->codec_info_nb_frames;
2398 ret = real_stream_index;
2399 best_decoder = decoder;
2400 if (program && i == nb_streams - 1 && ret < 0) {
2402 nb_streams = ic->nb_streams;
2403 /* no related stream found, try again with everything */
2408 *decoder_ret = best_decoder;
2412 /*******************************************************/
2414 int av_read_play(AVFormatContext *s)
2416 if (s->iformat->read_play)
2417 return s->iformat->read_play(s);
2419 return avio_pause(s->pb, 0);
2420 return AVERROR(ENOSYS);
2423 int av_read_pause(AVFormatContext *s)
2425 if (s->iformat->read_pause)
2426 return s->iformat->read_pause(s);
2428 return avio_pause(s->pb, 1);
2429 return AVERROR(ENOSYS);
2432 void avformat_free_context(AVFormatContext *s)
2441 if (s->iformat && s->iformat->priv_class && s->priv_data)
2442 av_opt_free(s->priv_data);
2444 for (i = 0; i < s->nb_streams; i++) {
2445 /* free all data in a stream component */
2448 for (j = 0; j < st->nb_side_data; j++)
2449 av_freep(&st->side_data[j].data);
2450 av_freep(&st->side_data);
2451 st->nb_side_data = 0;
2454 av_parser_close(st->parser);
2456 if (st->attached_pic.data)
2457 av_free_packet(&st->attached_pic);
2458 av_dict_free(&st->metadata);
2459 av_freep(&st->probe_data.buf);
2460 av_free(st->index_entries);
2461 av_free(st->codec->extradata);
2462 av_free(st->codec->subtitle_header);
2464 av_free(st->priv_data);
2468 for (i = s->nb_programs - 1; i >= 0; i--) {
2469 av_dict_free(&s->programs[i]->metadata);
2470 av_freep(&s->programs[i]->stream_index);
2471 av_freep(&s->programs[i]);
2473 av_freep(&s->programs);
2474 av_freep(&s->priv_data);
2475 while (s->nb_chapters--) {
2476 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2477 av_free(s->chapters[s->nb_chapters]);
2479 av_freep(&s->chapters);
2480 av_dict_free(&s->metadata);
2481 av_freep(&s->streams);
2482 av_freep(&s->internal);
2486 void avformat_close_input(AVFormatContext **ps)
2488 AVFormatContext *s = *ps;
2489 AVIOContext *pb = s->pb;
2491 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2492 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2495 flush_packet_queue(s);
2498 if (s->iformat->read_close)
2499 s->iformat->read_close(s);
2501 avformat_free_context(s);
2508 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2513 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2514 sizeof(*s->streams)) < 0) {
2519 st = av_mallocz(sizeof(AVStream));
2522 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2527 st->codec = avcodec_alloc_context3(c);
2529 /* no default bitrate if decoding */
2530 st->codec->bit_rate = 0;
2532 /* default pts setting is MPEG-like */
2533 avpriv_set_pts_info(st, 33, 1, 90000);
2536 st->index = s->nb_streams;
2537 st->start_time = AV_NOPTS_VALUE;
2538 st->duration = AV_NOPTS_VALUE;
2539 /* we set the current DTS to 0 so that formats without any timestamps
2540 * but durations get some timestamps, formats with some unknown
2541 * timestamps have their first few packets buffered and the
2542 * timestamps corrected before they are returned to the user */
2544 st->first_dts = AV_NOPTS_VALUE;
2545 st->probe_packets = MAX_PROBE_PACKETS;
2547 st->last_IP_pts = AV_NOPTS_VALUE;
2548 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2549 st->pts_buffer[i] = AV_NOPTS_VALUE;
2551 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2553 st->info->fps_first_dts = AV_NOPTS_VALUE;
2554 st->info->fps_last_dts = AV_NOPTS_VALUE;
2556 s->streams[s->nb_streams++] = st;
2560 AVProgram *av_new_program(AVFormatContext *ac, int id)
2562 AVProgram *program = NULL;
2565 av_dlog(ac, "new_program: id=0x%04x\n", id);
2567 for (i = 0; i < ac->nb_programs; i++)
2568 if (ac->programs[i]->id == id)
2569 program = ac->programs[i];
2572 program = av_mallocz(sizeof(AVProgram));
2575 dynarray_add(&ac->programs, &ac->nb_programs, program);
2576 program->discard = AVDISCARD_NONE;
2583 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2584 int64_t start, int64_t end, const char *title)
2586 AVChapter *chapter = NULL;
2589 for (i = 0; i < s->nb_chapters; i++)
2590 if (s->chapters[i]->id == id)
2591 chapter = s->chapters[i];
2594 chapter = av_mallocz(sizeof(AVChapter));
2597 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2599 av_dict_set(&chapter->metadata, "title", title, 0);
2601 chapter->time_base = time_base;
2602 chapter->start = start;
2608 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2611 AVProgram *program = NULL;
2613 if (idx >= ac->nb_streams) {
2614 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2618 for (i = 0; i < ac->nb_programs; i++) {
2619 if (ac->programs[i]->id != progid)
2621 program = ac->programs[i];
2622 for (j = 0; j < program->nb_stream_indexes; j++)
2623 if (program->stream_index[j] == idx)
2626 if (av_reallocp_array(&program->stream_index,
2627 program->nb_stream_indexes + 1,
2628 sizeof(*program->stream_index)) < 0) {
2629 program->nb_stream_indexes = 0;
2632 program->stream_index[program->nb_stream_indexes++] = idx;
2637 uint64_t ff_ntp_time(void)
2639 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2642 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2645 char *q, buf1[20], c;
2646 int nd, len, percentd_found;
2658 while (av_isdigit(*p))
2659 nd = nd * 10 + *p++ - '0';
2661 } while (av_isdigit(c));
2670 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2672 if ((q - buf + len) > buf_size - 1)
2674 memcpy(q, buf1, len);
2682 if ((q - buf) < buf_size - 1)
2686 if (!percentd_found)
2695 void av_url_split(char *proto, int proto_size,
2696 char *authorization, int authorization_size,
2697 char *hostname, int hostname_size,
2698 int *port_ptr, char *path, int path_size, const char *url)
2700 const char *p, *ls, *at, *col, *brk;
2706 if (authorization_size > 0)
2707 authorization[0] = 0;
2708 if (hostname_size > 0)
2713 /* parse protocol */
2714 if ((p = strchr(url, ':'))) {
2715 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2722 /* no protocol means plain filename */
2723 av_strlcpy(path, url, path_size);
2727 /* separate path from hostname */
2728 ls = strchr(p, '/');
2730 ls = strchr(p, '?');
2732 av_strlcpy(path, ls, path_size);
2734 ls = &p[strlen(p)]; // XXX
2736 /* the rest is hostname, use that to parse auth/port */
2738 /* authorization (user[:pass]@hostname) */
2739 if ((at = strchr(p, '@')) && at < ls) {
2740 av_strlcpy(authorization, p,
2741 FFMIN(authorization_size, at + 1 - p));
2742 p = at + 1; /* skip '@' */
2745 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2747 av_strlcpy(hostname, p + 1,
2748 FFMIN(hostname_size, brk - p));
2749 if (brk[1] == ':' && port_ptr)
2750 *port_ptr = atoi(brk + 2);
2751 } else if ((col = strchr(p, ':')) && col < ls) {
2752 av_strlcpy(hostname, p,
2753 FFMIN(col + 1 - p, hostname_size));
2755 *port_ptr = atoi(col + 1);
2757 av_strlcpy(hostname, p,
2758 FFMIN(ls + 1 - p, hostname_size));
2762 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2765 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2768 'C', 'D', 'E', 'F' };
2769 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2772 'c', 'd', 'e', 'f' };
2773 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2775 for (i = 0; i < s; i++) {
2776 buff[i * 2] = hex_table[src[i] >> 4];
2777 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2783 int ff_hex_to_data(uint8_t *data, const char *p)
2790 p += strspn(p, SPACE_CHARS);
2793 c = av_toupper((unsigned char) *p++);
2794 if (c >= '0' && c <= '9')
2796 else if (c >= 'A' && c <= 'F')
2811 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2812 unsigned int pts_num, unsigned int pts_den)
2815 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2816 if (new_tb.num != pts_num)
2817 av_log(NULL, AV_LOG_DEBUG,
2818 "st:%d removing common factor %d from timebase\n",
2819 s->index, pts_num / new_tb.num);
2821 av_log(NULL, AV_LOG_WARNING,
2822 "st:%d has too large timebase, reducing\n", s->index);
2824 if (new_tb.num <= 0 || new_tb.den <= 0) {
2825 av_log(NULL, AV_LOG_ERROR,
2826 "Ignoring attempt to set invalid timebase for st:%d\n",
2830 s->time_base = new_tb;
2831 s->pts_wrap_bits = pts_wrap_bits;
2834 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
2837 const char *ptr = str;
2839 /* Parse key=value pairs. */
2842 char *dest = NULL, *dest_end;
2843 int key_len, dest_len = 0;
2845 /* Skip whitespace and potential commas. */
2846 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
2853 if (!(ptr = strchr(key, '=')))
2856 key_len = ptr - key;
2858 callback_get_buf(context, key, key_len, &dest, &dest_len);
2859 dest_end = dest + dest_len - 1;
2863 while (*ptr && *ptr != '\"') {
2867 if (dest && dest < dest_end)
2871 if (dest && dest < dest_end)
2879 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
2880 if (dest && dest < dest_end)
2888 int ff_find_stream_index(AVFormatContext *s, int id)
2891 for (i = 0; i < s->nb_streams; i++)
2892 if (s->streams[i]->id == id)
2897 int64_t ff_iso8601_to_unix_time(const char *datestr)
2900 struct tm time1 = { 0 }, time2 = { 0 };
2902 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
2903 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
2905 return av_timegm(&time2);
2907 return av_timegm(&time1);
2909 av_log(NULL, AV_LOG_WARNING,
2910 "strptime() unavailable on this system, cannot convert "
2911 "the date string.\n");
2916 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
2920 if (ofmt->query_codec)
2921 return ofmt->query_codec(codec_id, std_compliance);
2922 else if (ofmt->codec_tag)
2923 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
2924 else if (codec_id == ofmt->video_codec ||
2925 codec_id == ofmt->audio_codec ||
2926 codec_id == ofmt->subtitle_codec)
2929 return AVERROR_PATCHWELCOME;
2932 int avformat_network_init(void)
2936 ff_network_inited_globally = 1;
2937 if ((ret = ff_network_init()) < 0)
2944 int avformat_network_deinit(void)
2953 int ff_add_param_change(AVPacket *pkt, int32_t channels,
2954 uint64_t channel_layout, int32_t sample_rate,
2955 int32_t width, int32_t height)
2961 return AVERROR(EINVAL);
2964 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
2966 if (channel_layout) {
2968 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
2972 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
2974 if (width || height) {
2976 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
2978 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
2980 return AVERROR(ENOMEM);
2981 bytestream_put_le32(&data, flags);
2983 bytestream_put_le32(&data, channels);
2985 bytestream_put_le64(&data, channel_layout);
2987 bytestream_put_le32(&data, sample_rate);
2988 if (width || height) {
2989 bytestream_put_le32(&data, width);
2990 bytestream_put_le32(&data, height);
2995 int ff_generate_avci_extradata(AVStream *st)
2997 static const uint8_t avci100_1080p_extradata[] = {
2999 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3000 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3001 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3002 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3003 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3004 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3005 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3006 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3007 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3009 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3012 static const uint8_t avci100_1080i_extradata[] = {
3014 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3015 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3016 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3017 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3018 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3019 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3020 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3021 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3022 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3023 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3024 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3026 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3029 static const uint8_t avci50_1080i_extradata[] = {
3031 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3032 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3033 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3034 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3035 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3036 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3037 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3038 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3039 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3040 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3041 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3043 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3046 static const uint8_t avci100_720p_extradata[] = {
3048 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3049 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3050 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3051 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3052 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3053 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3054 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3055 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3056 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3057 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3059 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3063 const uint8_t *data = NULL;
3066 if (st->codec->width == 1920) {
3067 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
3068 data = avci100_1080p_extradata;
3069 size = sizeof(avci100_1080p_extradata);
3071 data = avci100_1080i_extradata;
3072 size = sizeof(avci100_1080i_extradata);
3074 } else if (st->codec->width == 1440) {
3075 data = avci50_1080i_extradata;
3076 size = sizeof(avci50_1080i_extradata);
3077 } else if (st->codec->width == 1280) {
3078 data = avci100_720p_extradata;
3079 size = sizeof(avci100_720p_extradata);
3085 av_freep(&st->codec->extradata);
3086 st->codec->extradata_size = 0;
3087 st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
3088 if (!st->codec->extradata)
3089 return AVERROR(ENOMEM);
3091 memcpy(st->codec->extradata, data, size);
3092 st->codec->extradata_size = size;
3097 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3102 for (i = 0; i < st->nb_side_data; i++) {
3103 if (st->side_data[i].type == type) {
3105 *size = st->side_data[i].size;
3106 return st->side_data[i].data;