2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
42 #include "audiointerleave.h"
55 * various utility functions for use within Libav
58 unsigned avformat_version(void)
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return LIBAV_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
87 int prev_size = pkt->size;
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 read_size = FFMIN(size, chunk_size);
98 ret = av_grow_packet(pkt, read_size);
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
122 pkt->pos = avio_tell(s);
124 return append_packet_chunked(s, pkt, size);
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
134 int av_filename_number_test(const char *filename)
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
144 static const struct {
147 enum AVMediaType type;
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codec->codec_id = fmt_id_type[i].id;
171 st->codec->codec_type = fmt_id_type[i].type;
179 /************************************************************/
180 /* input media file */
182 /* Open input file and probe the format if necessary. */
183 static int init_input(AVFormatContext *s, const char *filename,
184 AVDictionary **options)
187 AVProbeData pd = { filename, NULL, 0 };
190 s->flags |= AVFMT_FLAG_CUSTOM_IO;
192 return av_probe_input_buffer(s->pb, &s->iformat, filename,
194 else if (s->iformat->flags & AVFMT_NOFILE)
195 return AVERROR(EINVAL);
199 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
200 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
203 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
204 &s->interrupt_callback, options)) < 0)
208 return av_probe_input_buffer(s->pb, &s->iformat, filename,
212 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
213 AVPacketList **plast_pktl)
215 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
220 (*plast_pktl)->next = pktl;
222 *packet_buffer = pktl;
224 /* Add the packet in the buffered packet list. */
230 static int queue_attached_pictures(AVFormatContext *s)
233 for (i = 0; i < s->nb_streams; i++)
234 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
235 s->streams[i]->discard < AVDISCARD_ALL) {
236 AVPacket copy = s->streams[i]->attached_pic;
237 copy.buf = av_buffer_ref(copy.buf);
239 return AVERROR(ENOMEM);
241 add_to_pktbuf(&s->raw_packet_buffer, ©,
242 &s->raw_packet_buffer_end);
247 int avformat_open_input(AVFormatContext **ps, const char *filename,
248 AVInputFormat *fmt, AVDictionary **options)
250 AVFormatContext *s = *ps;
252 AVDictionary *tmp = NULL;
253 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
255 if (!s && !(s = avformat_alloc_context()))
256 return AVERROR(ENOMEM);
261 av_dict_copy(&tmp, *options, 0);
263 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
266 if ((ret = init_input(s, filename, &tmp)) < 0)
269 /* Check filename in case an image number is expected. */
270 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
271 if (!av_filename_number_test(filename)) {
272 ret = AVERROR(EINVAL);
277 s->duration = s->start_time = AV_NOPTS_VALUE;
278 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
280 /* Allocate private data. */
281 if (s->iformat->priv_data_size > 0) {
282 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
283 ret = AVERROR(ENOMEM);
286 if (s->iformat->priv_class) {
287 *(const AVClass **) s->priv_data = s->iformat->priv_class;
288 av_opt_set_defaults(s->priv_data);
289 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
294 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
296 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
298 if (s->iformat->read_header)
299 if ((ret = s->iformat->read_header(s)) < 0)
302 if (id3v2_extra_meta &&
303 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
305 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
307 if ((ret = queue_attached_pictures(s)) < 0)
310 if (s->pb && !s->data_offset)
311 s->data_offset = avio_tell(s->pb);
313 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
316 av_dict_free(options);
323 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
325 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
327 avformat_free_context(s);
332 /*******************************************************/
334 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
336 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
337 AVProbeData *pd = &st->probe_data;
338 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
343 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
344 AVPROBE_PADDING_SIZE)) < 0)
346 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
347 pd->buf_size += pkt->size;
348 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
350 st->probe_packets = 0;
352 av_log(s, AV_LOG_ERROR,
353 "nothing to probe for stream %d\n", st->index);
358 if (!st->probe_packets ||
359 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
360 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
361 ? AVPROBE_SCORE_MAX / 4 : 0);
362 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
365 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
372 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
378 AVPacketList *pktl = s->raw_packet_buffer;
382 st = s->streams[pkt->stream_index];
383 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
384 !st->probe_packets ||
385 s->raw_packet_buffer_remaining_size < pkt->size) {
387 if (st->probe_packets)
388 if ((err = probe_codec(s, st, NULL)) < 0)
390 pd = &st->probe_data;
393 s->raw_packet_buffer = pktl->next;
394 s->raw_packet_buffer_remaining_size += pkt->size;
403 ret = s->iformat->read_packet(s, pkt);
405 if (!pktl || ret == AVERROR(EAGAIN))
407 for (i = 0; i < s->nb_streams; i++) {
409 if (st->probe_packets)
410 if ((err = probe_codec(s, st, NULL)) < 0)
416 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
417 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
418 av_log(s, AV_LOG_WARNING,
419 "Dropped corrupted packet (stream = %d)\n",
425 st = s->streams[pkt->stream_index];
427 switch (st->codec->codec_type) {
428 case AVMEDIA_TYPE_VIDEO:
429 if (s->video_codec_id)
430 st->codec->codec_id = s->video_codec_id;
432 case AVMEDIA_TYPE_AUDIO:
433 if (s->audio_codec_id)
434 st->codec->codec_id = s->audio_codec_id;
436 case AVMEDIA_TYPE_SUBTITLE:
437 if (s->subtitle_codec_id)
438 st->codec->codec_id = s->subtitle_codec_id;
442 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
446 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
447 s->raw_packet_buffer_remaining_size -= pkt->size;
449 if ((err = probe_codec(s, st, pkt)) < 0)
454 /**********************************************************/
457 * Get the number of samples of an audio frame. Return -1 on error.
459 int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
463 /* give frame_size priority if demuxing */
464 if (!mux && enc->frame_size > 1)
465 return enc->frame_size;
467 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
470 /* Fall back on using frame_size if muxing. */
471 if (enc->frame_size > 1)
472 return enc->frame_size;
478 * Return the frame duration in seconds. Return 0 if not available.
480 void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
481 AVCodecParserContext *pc, AVPacket *pkt)
487 switch (st->codec->codec_type) {
488 case AVMEDIA_TYPE_VIDEO:
489 if (st->avg_frame_rate.num) {
490 *pnum = st->avg_frame_rate.den;
491 *pden = st->avg_frame_rate.num;
492 } else if (st->time_base.num * 1000LL > st->time_base.den) {
493 *pnum = st->time_base.num;
494 *pden = st->time_base.den;
495 } else if (st->codec->time_base.num * 1000LL > st->codec->time_base.den) {
496 *pnum = st->codec->time_base.num;
497 *pden = st->codec->time_base.den;
498 if (pc && pc->repeat_pict) {
499 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
500 *pden /= 1 + pc->repeat_pict;
502 *pnum *= 1 + pc->repeat_pict;
504 /* If this codec can be interlaced or progressive then we need
505 * a parser to compute duration of a packet. Thus if we have
506 * no parser in such case leave duration undefined. */
507 if (st->codec->ticks_per_frame > 1 && !pc)
511 case AVMEDIA_TYPE_AUDIO:
512 frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
513 if (frame_size <= 0 || st->codec->sample_rate <= 0)
516 *pden = st->codec->sample_rate;
523 static int is_intra_only(enum AVCodecID id)
525 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
528 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
533 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
534 int64_t dts, int64_t pts)
536 AVStream *st = s->streams[stream_index];
537 AVPacketList *pktl = s->packet_buffer;
539 if (st->first_dts != AV_NOPTS_VALUE ||
540 dts == AV_NOPTS_VALUE ||
541 st->cur_dts == AV_NOPTS_VALUE)
544 st->first_dts = dts - st->cur_dts;
547 for (; pktl; pktl = pktl->next) {
548 if (pktl->pkt.stream_index != stream_index)
550 // FIXME: think more about this check
551 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
552 pktl->pkt.pts += st->first_dts;
554 if (pktl->pkt.dts != AV_NOPTS_VALUE)
555 pktl->pkt.dts += st->first_dts;
557 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
558 st->start_time = pktl->pkt.pts;
560 if (st->start_time == AV_NOPTS_VALUE)
561 st->start_time = pts;
564 static void update_initial_durations(AVFormatContext *s, AVStream *st,
565 int stream_index, int duration)
567 AVPacketList *pktl = s->packet_buffer;
570 if (st->first_dts != AV_NOPTS_VALUE) {
571 cur_dts = st->first_dts;
572 for (; pktl; pktl = pktl->next) {
573 if (pktl->pkt.stream_index == stream_index) {
574 if (pktl->pkt.pts != pktl->pkt.dts ||
575 pktl->pkt.dts != AV_NOPTS_VALUE ||
581 pktl = s->packet_buffer;
582 st->first_dts = cur_dts;
583 } else if (st->cur_dts)
586 for (; pktl; pktl = pktl->next) {
587 if (pktl->pkt.stream_index != stream_index)
589 if (pktl->pkt.pts == pktl->pkt.dts &&
590 pktl->pkt.dts == AV_NOPTS_VALUE &&
591 !pktl->pkt.duration) {
592 pktl->pkt.dts = cur_dts;
593 if (!st->codec->has_b_frames)
594 pktl->pkt.pts = cur_dts;
596 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
597 pktl->pkt.duration = duration;
601 if (st->first_dts == AV_NOPTS_VALUE)
602 st->cur_dts = cur_dts;
605 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
606 AVCodecParserContext *pc, AVPacket *pkt)
608 int num, den, presentation_delayed, delay, i;
611 if (s->flags & AVFMT_FLAG_NOFILLIN)
614 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
615 pkt->dts = AV_NOPTS_VALUE;
617 /* do we have a video B-frame ? */
618 delay = st->codec->has_b_frames;
619 presentation_delayed = 0;
621 /* XXX: need has_b_frame, but cannot get it if the codec is
624 pc && pc->pict_type != AV_PICTURE_TYPE_B)
625 presentation_delayed = 1;
627 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
628 st->pts_wrap_bits < 63 &&
629 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
630 pkt->dts -= 1LL << st->pts_wrap_bits;
633 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
634 * We take the conservative approach and discard both.
635 * Note: If this is misbehaving for an H.264 file, then possibly
636 * presentation_delayed is not set correctly. */
637 if (delay == 1 && pkt->dts == pkt->pts &&
638 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
639 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
640 pkt->dts = pkt->pts = AV_NOPTS_VALUE;
643 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
644 ff_compute_frame_duration(&num, &den, st, pc, pkt);
646 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
647 den * (int64_t) st->time_base.num,
650 if (pkt->duration != 0 && s->packet_buffer)
651 update_initial_durations(s, st, pkt->stream_index,
656 /* Correct timestamps with byte offset if demuxers only have timestamps
657 * on packet boundaries */
658 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
659 /* this will estimate bitrate based on this frame's duration and size */
660 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
661 if (pkt->pts != AV_NOPTS_VALUE)
663 if (pkt->dts != AV_NOPTS_VALUE)
667 /* This may be redundant, but it should not hurt. */
668 if (pkt->dts != AV_NOPTS_VALUE &&
669 pkt->pts != AV_NOPTS_VALUE &&
671 presentation_delayed = 1;
674 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
675 "cur_dts:%"PRId64" st:%d pc:%p\n",
676 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
677 pkt->stream_index, pc);
678 /* Interpolate PTS and DTS if they are not present. We skip H.264
679 * currently because delay and has_b_frames are not reliably set. */
680 if ((delay == 0 || (delay == 1 && pc)) &&
681 st->codec->codec_id != AV_CODEC_ID_H264) {
682 if (presentation_delayed) {
683 /* DTS = decompression timestamp */
684 /* PTS = presentation timestamp */
685 if (pkt->dts == AV_NOPTS_VALUE)
686 pkt->dts = st->last_IP_pts;
687 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
688 if (pkt->dts == AV_NOPTS_VALUE)
689 pkt->dts = st->cur_dts;
691 /* This is tricky: the dts must be incremented by the duration
692 * of the frame we are displaying, i.e. the last I- or P-frame. */
693 if (st->last_IP_duration == 0)
694 st->last_IP_duration = pkt->duration;
695 if (pkt->dts != AV_NOPTS_VALUE)
696 st->cur_dts = pkt->dts + st->last_IP_duration;
697 st->last_IP_duration = pkt->duration;
698 st->last_IP_pts = pkt->pts;
699 /* Cannot compute PTS if not present (we can compute it only
700 * by knowing the future. */
701 } else if (pkt->pts != AV_NOPTS_VALUE ||
702 pkt->dts != AV_NOPTS_VALUE ||
704 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
705 int duration = pkt->duration;
706 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
707 ff_compute_frame_duration(&num, &den, st, pc, pkt);
709 duration = av_rescale_rnd(1,
710 num * (int64_t) st->time_base.den,
711 den * (int64_t) st->time_base.num,
713 if (duration != 0 && s->packet_buffer)
714 update_initial_durations(s, st, pkt->stream_index,
719 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
721 /* presentation is not delayed : PTS and DTS are the same */
722 if (pkt->pts == AV_NOPTS_VALUE)
724 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
726 if (pkt->pts == AV_NOPTS_VALUE)
727 pkt->pts = st->cur_dts;
729 if (pkt->pts != AV_NOPTS_VALUE)
730 st->cur_dts = pkt->pts + duration;
735 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
736 st->pts_buffer[0] = pkt->pts;
737 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
738 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
739 if (pkt->dts == AV_NOPTS_VALUE)
740 pkt->dts = st->pts_buffer[0];
741 // We skipped it above so we try here.
742 if (st->codec->codec_id == AV_CODEC_ID_H264)
743 // This should happen on the first packet
744 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
745 if (pkt->dts > st->cur_dts)
746 st->cur_dts = pkt->dts;
750 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
751 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
754 if (is_intra_only(st->codec->codec_id))
755 pkt->flags |= AV_PKT_FLAG_KEY;
757 pkt->convergence_duration = pc->convergence_duration;
760 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
763 AVPacketList *pktl = *pkt_buf;
764 *pkt_buf = pktl->next;
765 av_free_packet(&pktl->pkt);
772 * Parse a packet, add all split parts to parse_queue.
774 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
776 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
778 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
779 AVStream *st = s->streams[stream_index];
780 uint8_t *data = pkt ? pkt->data : NULL;
781 int size = pkt ? pkt->size : 0;
782 int ret = 0, got_output = 0;
785 av_init_packet(&flush_pkt);
790 while (size > 0 || (pkt == &flush_pkt && got_output)) {
793 av_init_packet(&out_pkt);
794 len = av_parser_parse2(st->parser, st->codec,
795 &out_pkt.data, &out_pkt.size, data, size,
796 pkt->pts, pkt->dts, pkt->pos);
798 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
799 /* increment read pointer */
803 got_output = !!out_pkt.size;
808 if (pkt->side_data) {
809 out_pkt.side_data = pkt->side_data;
810 out_pkt.side_data_elems = pkt->side_data_elems;
811 pkt->side_data = NULL;
812 pkt->side_data_elems = 0;
815 /* set the duration */
816 out_pkt.duration = 0;
817 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
818 if (st->codec->sample_rate > 0) {
820 av_rescale_q_rnd(st->parser->duration,
821 (AVRational) { 1, st->codec->sample_rate },
827 out_pkt.stream_index = st->index;
828 out_pkt.pts = st->parser->pts;
829 out_pkt.dts = st->parser->dts;
830 out_pkt.pos = st->parser->pos;
832 if (st->parser->key_frame == 1 ||
833 (st->parser->key_frame == -1 &&
834 st->parser->pict_type == AV_PICTURE_TYPE_I))
835 out_pkt.flags |= AV_PKT_FLAG_KEY;
837 compute_pkt_fields(s, st, st->parser, &out_pkt);
839 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
840 out_pkt.flags & AV_PKT_FLAG_KEY) {
841 ff_reduce_index(s, st->index);
842 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
843 0, 0, AVINDEX_KEYFRAME);
846 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
847 out_pkt.buf = pkt->buf;
849 #if FF_API_DESTRUCT_PACKET
850 FF_DISABLE_DEPRECATION_WARNINGS
851 out_pkt.destruct = pkt->destruct;
852 pkt->destruct = NULL;
853 FF_ENABLE_DEPRECATION_WARNINGS
856 if ((ret = av_dup_packet(&out_pkt)) < 0)
859 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
860 av_free_packet(&out_pkt);
861 ret = AVERROR(ENOMEM);
866 /* end of the stream => close and free the parser */
867 if (pkt == &flush_pkt) {
868 av_parser_close(st->parser);
877 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
878 AVPacketList **pkt_buffer_end,
882 av_assert0(*pkt_buffer);
885 *pkt_buffer = pktl->next;
887 *pkt_buffer_end = NULL;
892 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
894 int ret = 0, i, got_packet = 0;
898 while (!got_packet && !s->parse_queue) {
902 /* read next packet */
903 ret = ff_read_packet(s, &cur_pkt);
905 if (ret == AVERROR(EAGAIN))
907 /* flush the parsers */
908 for (i = 0; i < s->nb_streams; i++) {
910 if (st->parser && st->need_parsing)
911 parse_packet(s, NULL, st->index);
913 /* all remaining packets are now in parse_queue =>
914 * really terminate parsing */
918 st = s->streams[cur_pkt.stream_index];
920 if (cur_pkt.pts != AV_NOPTS_VALUE &&
921 cur_pkt.dts != AV_NOPTS_VALUE &&
922 cur_pkt.pts < cur_pkt.dts) {
923 av_log(s, AV_LOG_WARNING,
924 "Invalid timestamps stream=%d, pts=%"PRId64", "
925 "dts=%"PRId64", size=%d\n",
926 cur_pkt.stream_index, cur_pkt.pts,
927 cur_pkt.dts, cur_pkt.size);
929 if (s->debug & FF_FDEBUG_TS)
930 av_log(s, AV_LOG_DEBUG,
931 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
932 "size=%d, duration=%d, flags=%d\n",
933 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
934 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
936 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
937 st->parser = av_parser_init(st->codec->codec_id);
939 /* no parser available: just output the raw packets */
940 st->need_parsing = AVSTREAM_PARSE_NONE;
941 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
942 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
943 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
944 st->parser->flags |= PARSER_FLAG_ONCE;
947 if (!st->need_parsing || !st->parser) {
948 /* no parsing needed: we just output the packet as is */
950 compute_pkt_fields(s, st, NULL, pkt);
951 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
952 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
953 ff_reduce_index(s, st->index);
954 av_add_index_entry(st, pkt->pos, pkt->dts,
955 0, 0, AVINDEX_KEYFRAME);
958 } else if (st->discard < AVDISCARD_ALL) {
959 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
963 av_free_packet(&cur_pkt);
967 if (!got_packet && s->parse_queue)
968 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
970 if (s->debug & FF_FDEBUG_TS)
971 av_log(s, AV_LOG_DEBUG,
972 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
973 "size=%d, duration=%d, flags=%d\n",
974 pkt->stream_index, pkt->pts, pkt->dts,
975 pkt->size, pkt->duration, pkt->flags);
980 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
982 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
986 return s->packet_buffer
987 ? read_from_packet_buffer(&s->packet_buffer,
988 &s->packet_buffer_end, pkt)
989 : read_frame_internal(s, pkt);
993 AVPacketList *pktl = s->packet_buffer;
996 AVPacket *next_pkt = &pktl->pkt;
998 if (next_pkt->dts != AV_NOPTS_VALUE) {
999 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1000 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1001 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1002 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1003 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1005 next_pkt->pts = pktl->pkt.dts;
1009 pktl = s->packet_buffer;
1012 /* read packet from packet buffer, if there is data */
1013 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1014 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1015 return read_from_packet_buffer(&s->packet_buffer,
1016 &s->packet_buffer_end, pkt);
1019 ret = read_frame_internal(s, pkt);
1021 if (pktl && ret != AVERROR(EAGAIN)) {
1028 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1029 &s->packet_buffer_end)) < 0)
1030 return AVERROR(ENOMEM);
1034 /* XXX: suppress the packet queue */
1035 static void flush_packet_queue(AVFormatContext *s)
1037 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1038 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1039 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1041 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1044 /*******************************************************/
1047 int av_find_default_stream_index(AVFormatContext *s)
1049 int first_audio_index = -1;
1053 if (s->nb_streams <= 0)
1055 for (i = 0; i < s->nb_streams; i++) {
1057 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1058 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1061 if (first_audio_index < 0 &&
1062 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1063 first_audio_index = i;
1065 return first_audio_index >= 0 ? first_audio_index : 0;
1068 /** Flush the frame reader. */
1069 void ff_read_frame_flush(AVFormatContext *s)
1074 flush_packet_queue(s);
1076 /* Reset read state for each stream. */
1077 for (i = 0; i < s->nb_streams; i++) {
1081 av_parser_close(st->parser);
1084 st->last_IP_pts = AV_NOPTS_VALUE;
1085 /* We set the current DTS to an unspecified origin. */
1086 st->cur_dts = AV_NOPTS_VALUE;
1088 st->probe_packets = MAX_PROBE_PACKETS;
1090 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1091 st->pts_buffer[j] = AV_NOPTS_VALUE;
1095 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1099 for (i = 0; i < s->nb_streams; i++) {
1100 AVStream *st = s->streams[i];
1103 av_rescale(timestamp,
1104 st->time_base.den * (int64_t) ref_st->time_base.num,
1105 st->time_base.num * (int64_t) ref_st->time_base.den);
1109 void ff_reduce_index(AVFormatContext *s, int stream_index)
1111 AVStream *st = s->streams[stream_index];
1112 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1114 if ((unsigned) st->nb_index_entries >= max_entries) {
1116 for (i = 0; 2 * i < st->nb_index_entries; i++)
1117 st->index_entries[i] = st->index_entries[2 * i];
1118 st->nb_index_entries = i;
1122 int ff_add_index_entry(AVIndexEntry **index_entries,
1123 int *nb_index_entries,
1124 unsigned int *index_entries_allocated_size,
1125 int64_t pos, int64_t timestamp,
1126 int size, int distance, int flags)
1128 AVIndexEntry *entries, *ie;
1131 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1134 entries = av_fast_realloc(*index_entries,
1135 index_entries_allocated_size,
1136 (*nb_index_entries + 1) *
1137 sizeof(AVIndexEntry));
1141 *index_entries = entries;
1143 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1144 timestamp, AVSEEK_FLAG_ANY);
1147 index = (*nb_index_entries)++;
1148 ie = &entries[index];
1149 assert(index == 0 || ie[-1].timestamp < timestamp);
1151 ie = &entries[index];
1152 if (ie->timestamp != timestamp) {
1153 if (ie->timestamp <= timestamp)
1155 memmove(entries + index + 1, entries + index,
1156 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1157 (*nb_index_entries)++;
1158 } else if (ie->pos == pos && distance < ie->min_distance)
1159 // do not reduce the distance
1160 distance = ie->min_distance;
1164 ie->timestamp = timestamp;
1165 ie->min_distance = distance;
1172 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1173 int size, int distance, int flags)
1175 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1176 &st->index_entries_allocated_size, pos,
1177 timestamp, size, distance, flags);
1180 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1181 int64_t wanted_timestamp, int flags)
1189 // Optimize appending index entries at the end.
1190 if (b && entries[b - 1].timestamp < wanted_timestamp)
1195 timestamp = entries[m].timestamp;
1196 if (timestamp >= wanted_timestamp)
1198 if (timestamp <= wanted_timestamp)
1201 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1203 if (!(flags & AVSEEK_FLAG_ANY))
1204 while (m >= 0 && m < nb_entries &&
1205 !(entries[m].flags & AVINDEX_KEYFRAME))
1206 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1208 if (m == nb_entries)
1213 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1215 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1216 wanted_timestamp, flags);
1219 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1220 int64_t target_ts, int flags)
1222 AVInputFormat *avif = s->iformat;
1223 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1224 int64_t ts_min, ts_max, ts;
1229 if (stream_index < 0)
1232 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1235 ts_min = AV_NOPTS_VALUE;
1236 pos_limit = -1; // GCC falsely says it may be uninitialized.
1238 st = s->streams[stream_index];
1239 if (st->index_entries) {
1242 /* FIXME: Whole function must be checked for non-keyframe entries in
1243 * index case, especially read_timestamp(). */
1244 index = av_index_search_timestamp(st, target_ts,
1245 flags | AVSEEK_FLAG_BACKWARD);
1246 index = FFMAX(index, 0);
1247 e = &st->index_entries[index];
1249 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1251 ts_min = e->timestamp;
1252 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1258 index = av_index_search_timestamp(st, target_ts,
1259 flags & ~AVSEEK_FLAG_BACKWARD);
1260 assert(index < st->nb_index_entries);
1262 e = &st->index_entries[index];
1263 assert(e->timestamp >= target_ts);
1265 ts_max = e->timestamp;
1266 pos_limit = pos_max - e->min_distance;
1267 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1268 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1272 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1273 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1278 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1281 ff_update_cur_dts(s, st, ts);
1286 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1287 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1288 int64_t ts_min, int64_t ts_max,
1289 int flags, int64_t *ts_ret,
1290 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1291 int64_t *, int64_t))
1294 int64_t start_pos, filesize;
1297 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1299 if (ts_min == AV_NOPTS_VALUE) {
1300 pos_min = s->data_offset;
1301 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1302 if (ts_min == AV_NOPTS_VALUE)
1306 if (ts_max == AV_NOPTS_VALUE) {
1308 filesize = avio_size(s->pb);
1309 pos_max = filesize - 1;
1312 ts_max = read_timestamp(s, stream_index, &pos_max,
1315 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1316 if (ts_max == AV_NOPTS_VALUE)
1320 int64_t tmp_pos = pos_max + 1;
1321 int64_t tmp_ts = read_timestamp(s, stream_index,
1322 &tmp_pos, INT64_MAX);
1323 if (tmp_ts == AV_NOPTS_VALUE)
1327 if (tmp_pos >= filesize)
1330 pos_limit = pos_max;
1333 if (ts_min > ts_max)
1335 else if (ts_min == ts_max)
1336 pos_limit = pos_min;
1339 while (pos_min < pos_limit) {
1340 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1341 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1342 assert(pos_limit <= pos_max);
1344 if (no_change == 0) {
1345 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1346 // interpolate position (better than dichotomy)
1347 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1349 pos_min - approximate_keyframe_distance;
1350 } else if (no_change == 1) {
1351 // bisection if interpolation did not change min / max pos last time
1352 pos = (pos_min + pos_limit) >> 1;
1354 /* linear search if bisection failed, can only happen if there
1355 * are very few or no keyframes between min/max */
1360 else if (pos > pos_limit)
1364 // May pass pos_limit instead of -1.
1365 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1370 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1371 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1372 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1373 pos_limit, start_pos, no_change);
1374 if (ts == AV_NOPTS_VALUE) {
1375 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1378 assert(ts != AV_NOPTS_VALUE);
1379 if (target_ts <= ts) {
1380 pos_limit = start_pos - 1;
1384 if (target_ts >= ts) {
1390 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1391 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1393 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1395 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1396 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1397 pos, ts_min, target_ts, ts_max);
1402 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1403 int64_t pos, int flags)
1405 int64_t pos_min, pos_max;
1407 pos_min = s->data_offset;
1408 pos_max = avio_size(s->pb) - 1;
1412 else if (pos > pos_max)
1415 avio_seek(s->pb, pos, SEEK_SET);
1420 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1421 int64_t timestamp, int flags)
1428 st = s->streams[stream_index];
1430 index = av_index_search_timestamp(st, timestamp, flags);
1432 if (index < 0 && st->nb_index_entries &&
1433 timestamp < st->index_entries[0].timestamp)
1436 if (index < 0 || index == st->nb_index_entries - 1) {
1439 if (st->nb_index_entries) {
1440 assert(st->index_entries);
1441 ie = &st->index_entries[st->nb_index_entries - 1];
1442 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1444 ff_update_cur_dts(s, st, ie->timestamp);
1446 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1452 read_status = av_read_frame(s, &pkt);
1453 } while (read_status == AVERROR(EAGAIN));
1454 if (read_status < 0)
1456 av_free_packet(&pkt);
1457 if (stream_index == pkt.stream_index)
1458 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1461 index = av_index_search_timestamp(st, timestamp, flags);
1466 ff_read_frame_flush(s);
1467 if (s->iformat->read_seek)
1468 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1470 ie = &st->index_entries[index];
1471 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1473 ff_update_cur_dts(s, st, ie->timestamp);
1478 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1479 int64_t timestamp, int flags)
1484 if (flags & AVSEEK_FLAG_BYTE) {
1485 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1487 ff_read_frame_flush(s);
1488 return seek_frame_byte(s, stream_index, timestamp, flags);
1491 if (stream_index < 0) {
1492 stream_index = av_find_default_stream_index(s);
1493 if (stream_index < 0)
1496 st = s->streams[stream_index];
1497 /* timestamp for default must be expressed in AV_TIME_BASE units */
1498 timestamp = av_rescale(timestamp, st->time_base.den,
1499 AV_TIME_BASE * (int64_t) st->time_base.num);
1502 /* first, we try the format specific seek */
1503 if (s->iformat->read_seek) {
1504 ff_read_frame_flush(s);
1505 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1511 if (s->iformat->read_timestamp &&
1512 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1513 ff_read_frame_flush(s);
1514 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1515 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1516 ff_read_frame_flush(s);
1517 return seek_frame_generic(s, stream_index, timestamp, flags);
1522 int av_seek_frame(AVFormatContext *s, int stream_index,
1523 int64_t timestamp, int flags)
1525 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1528 ret = queue_attached_pictures(s);
1533 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1534 int64_t ts, int64_t max_ts, int flags)
1536 if (min_ts > ts || max_ts < ts)
1539 if (s->iformat->read_seek2) {
1541 ff_read_frame_flush(s);
1542 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1546 ret = queue_attached_pictures(s);
1550 if (s->iformat->read_timestamp) {
1551 // try to seek via read_timestamp()
1554 // Fall back on old API if new is not implemented but old is.
1555 // Note the old API has somewhat different semantics.
1556 if (s->iformat->read_seek || 1)
1557 return av_seek_frame(s, stream_index, ts,
1558 flags | ((uint64_t) ts - min_ts >
1559 (uint64_t) max_ts - ts
1560 ? AVSEEK_FLAG_BACKWARD : 0));
1562 // try some generic seek like seek_frame_generic() but with new ts semantics
1565 /*******************************************************/
1568 * Return TRUE if the stream has accurate duration in any stream.
1570 * @return TRUE if the stream has accurate duration for at least one component.
1572 static int has_duration(AVFormatContext *ic)
1577 for (i = 0; i < ic->nb_streams; i++) {
1578 st = ic->streams[i];
1579 if (st->duration != AV_NOPTS_VALUE)
1582 if (ic->duration != AV_NOPTS_VALUE)
1588 * Estimate the stream timings from the one of each components.
1590 * Also computes the global bitrate if possible.
1592 static void update_stream_timings(AVFormatContext *ic)
1594 int64_t start_time, start_time1, end_time, end_time1;
1595 int64_t duration, duration1, filesize;
1599 start_time = INT64_MAX;
1600 end_time = INT64_MIN;
1601 duration = INT64_MIN;
1602 for (i = 0; i < ic->nb_streams; i++) {
1603 st = ic->streams[i];
1604 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1605 start_time1 = av_rescale_q(st->start_time, st->time_base,
1607 start_time = FFMIN(start_time, start_time1);
1608 if (st->duration != AV_NOPTS_VALUE) {
1609 end_time1 = start_time1 +
1610 av_rescale_q(st->duration, st->time_base,
1612 end_time = FFMAX(end_time, end_time1);
1615 if (st->duration != AV_NOPTS_VALUE) {
1616 duration1 = av_rescale_q(st->duration, st->time_base,
1618 duration = FFMAX(duration, duration1);
1621 if (start_time != INT64_MAX) {
1622 ic->start_time = start_time;
1623 if (end_time != INT64_MIN)
1624 duration = FFMAX(duration, end_time - start_time);
1626 if (duration != INT64_MIN) {
1627 ic->duration = duration;
1628 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1629 /* compute the bitrate */
1630 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1631 (double) ic->duration;
1635 static void fill_all_stream_timings(AVFormatContext *ic)
1640 update_stream_timings(ic);
1641 for (i = 0; i < ic->nb_streams; i++) {
1642 st = ic->streams[i];
1643 if (st->start_time == AV_NOPTS_VALUE) {
1644 if (ic->start_time != AV_NOPTS_VALUE)
1645 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1647 if (ic->duration != AV_NOPTS_VALUE)
1648 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1654 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1656 int64_t filesize, duration;
1660 /* if bit_rate is already set, we believe it */
1661 if (ic->bit_rate <= 0) {
1663 for (i = 0; i < ic->nb_streams; i++) {
1664 st = ic->streams[i];
1665 if (st->codec->bit_rate > 0) {
1666 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1670 bit_rate += st->codec->bit_rate;
1673 ic->bit_rate = bit_rate;
1676 /* if duration is already set, we believe it */
1677 if (ic->duration == AV_NOPTS_VALUE &&
1678 ic->bit_rate != 0) {
1679 filesize = ic->pb ? avio_size(ic->pb) : 0;
1681 for (i = 0; i < ic->nb_streams; i++) {
1682 st = ic->streams[i];
1683 duration = av_rescale(8 * filesize, st->time_base.den,
1685 (int64_t) st->time_base.num);
1686 if (st->duration == AV_NOPTS_VALUE)
1687 st->duration = duration;
1693 #define DURATION_MAX_READ_SIZE 250000
1694 #define DURATION_MAX_RETRY 3
1696 /* only usable for MPEG-PS streams */
1697 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1699 AVPacket pkt1, *pkt = &pkt1;
1701 int read_size, i, ret;
1703 int64_t filesize, offset, duration;
1706 /* flush packet queue */
1707 flush_packet_queue(ic);
1709 for (i = 0; i < ic->nb_streams; i++) {
1710 st = ic->streams[i];
1711 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1712 av_log(st->codec, AV_LOG_WARNING,
1713 "start time is not set in estimate_timings_from_pts\n");
1716 av_parser_close(st->parser);
1721 /* estimate the end time (duration) */
1722 /* XXX: may need to support wrapping */
1723 filesize = ic->pb ? avio_size(ic->pb) : 0;
1724 end_time = AV_NOPTS_VALUE;
1726 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1730 avio_seek(ic->pb, offset, SEEK_SET);
1733 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1737 ret = ff_read_packet(ic, pkt);
1738 } while (ret == AVERROR(EAGAIN));
1741 read_size += pkt->size;
1742 st = ic->streams[pkt->stream_index];
1743 if (pkt->pts != AV_NOPTS_VALUE &&
1744 (st->start_time != AV_NOPTS_VALUE ||
1745 st->first_dts != AV_NOPTS_VALUE)) {
1746 duration = end_time = pkt->pts;
1747 if (st->start_time != AV_NOPTS_VALUE)
1748 duration -= st->start_time;
1750 duration -= st->first_dts;
1752 duration += 1LL << st->pts_wrap_bits;
1754 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1755 st->duration = duration;
1758 av_free_packet(pkt);
1760 } while (end_time == AV_NOPTS_VALUE &&
1761 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1762 ++retry <= DURATION_MAX_RETRY);
1764 fill_all_stream_timings(ic);
1766 avio_seek(ic->pb, old_offset, SEEK_SET);
1767 for (i = 0; i < ic->nb_streams; i++) {
1768 st = ic->streams[i];
1769 st->cur_dts = st->first_dts;
1770 st->last_IP_pts = AV_NOPTS_VALUE;
1774 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1778 /* get the file size, if possible */
1779 if (ic->iformat->flags & AVFMT_NOFILE) {
1782 file_size = avio_size(ic->pb);
1783 file_size = FFMAX(0, file_size);
1786 if ((!strcmp(ic->iformat->name, "mpeg") ||
1787 !strcmp(ic->iformat->name, "mpegts")) &&
1788 file_size && ic->pb->seekable) {
1789 /* get accurate estimate from the PTSes */
1790 estimate_timings_from_pts(ic, old_offset);
1791 } else if (has_duration(ic)) {
1792 /* at least one component has timings - we use them for all
1794 fill_all_stream_timings(ic);
1796 av_log(ic, AV_LOG_WARNING,
1797 "Estimating duration from bitrate, this may be inaccurate\n");
1798 /* less precise: use bitrate info */
1799 estimate_timings_from_bit_rate(ic);
1801 update_stream_timings(ic);
1805 AVStream av_unused *st;
1806 for (i = 0; i < ic->nb_streams; i++) {
1807 st = ic->streams[i];
1808 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
1809 (double) st->start_time / AV_TIME_BASE,
1810 (double) st->duration / AV_TIME_BASE);
1813 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1814 (double) ic->start_time / AV_TIME_BASE,
1815 (double) ic->duration / AV_TIME_BASE,
1816 ic->bit_rate / 1000);
1820 static int has_codec_parameters(AVStream *st)
1822 AVCodecContext *avctx = st->codec;
1825 switch (avctx->codec_type) {
1826 case AVMEDIA_TYPE_AUDIO:
1827 val = avctx->sample_rate && avctx->channels;
1828 if (st->info->found_decoder >= 0 &&
1829 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1832 case AVMEDIA_TYPE_VIDEO:
1834 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1841 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1844 static int has_decode_delay_been_guessed(AVStream *st)
1846 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1847 st->info->nb_decoded_frames >= 6;
1850 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1851 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1852 AVDictionary **options)
1854 const AVCodec *codec;
1855 int got_picture = 1, ret = 0;
1856 AVFrame *frame = av_frame_alloc();
1857 AVPacket pkt = *avpkt;
1860 return AVERROR(ENOMEM);
1862 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
1863 AVDictionary *thread_opt = NULL;
1865 codec = st->codec->codec ? st->codec->codec
1866 : avcodec_find_decoder(st->codec->codec_id);
1869 st->info->found_decoder = -1;
1874 /* Force thread count to 1 since the H.264 decoder will not extract
1875 * SPS and PPS to extradata during multi-threaded decoding. */
1876 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1877 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
1879 av_dict_free(&thread_opt);
1881 st->info->found_decoder = -1;
1884 st->info->found_decoder = 1;
1885 } else if (!st->info->found_decoder)
1886 st->info->found_decoder = 1;
1888 if (st->info->found_decoder < 0) {
1893 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1895 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1896 (!st->codec_info_nb_frames &&
1897 st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
1899 switch (st->codec->codec_type) {
1900 case AVMEDIA_TYPE_VIDEO:
1901 ret = avcodec_decode_video2(st->codec, frame,
1902 &got_picture, &pkt);
1904 case AVMEDIA_TYPE_AUDIO:
1905 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
1912 st->info->nb_decoded_frames++;
1920 av_frame_free(&frame);
1924 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1926 while (tags->id != AV_CODEC_ID_NONE) {
1934 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1937 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1938 if (tag == tags[i].tag)
1940 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1941 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1943 return AV_CODEC_ID_NONE;
1946 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
1951 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
1953 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
1955 return AV_CODEC_ID_NONE;
1959 if (sflags & (1 << (bps - 1))) {
1962 return AV_CODEC_ID_PCM_S8;
1964 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
1966 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
1968 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
1970 return AV_CODEC_ID_NONE;
1975 return AV_CODEC_ID_PCM_U8;
1977 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
1979 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
1981 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
1983 return AV_CODEC_ID_NONE;
1989 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
1992 for (i = 0; tags && tags[i]; i++) {
1993 int tag = ff_codec_get_tag(tags[i], id);
2000 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2003 for (i = 0; tags && tags[i]; i++) {
2004 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2005 if (id != AV_CODEC_ID_NONE)
2008 return AV_CODEC_ID_NONE;
2011 static void compute_chapters_end(AVFormatContext *s)
2014 int64_t max_time = s->duration +
2015 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2017 for (i = 0; i < s->nb_chapters; i++)
2018 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2019 AVChapter *ch = s->chapters[i];
2020 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2024 for (j = 0; j < s->nb_chapters; j++) {
2025 AVChapter *ch1 = s->chapters[j];
2026 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2028 if (j != i && next_start > ch->start && next_start < end)
2031 ch->end = (end == INT64_MAX) ? ch->start : end;
2035 static int get_std_framerate(int i)
2038 return (i + 1) * 1001;
2040 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2043 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2045 int i, count, ret, read_size, j;
2047 AVPacket pkt1, *pkt;
2048 int64_t old_offset = avio_tell(ic->pb);
2049 // new streams might appear, no options for those
2050 int orig_nb_streams = ic->nb_streams;
2052 for (i = 0; i < ic->nb_streams; i++) {
2053 const AVCodec *codec;
2054 AVDictionary *thread_opt = NULL;
2055 st = ic->streams[i];
2057 // only for the split stuff
2058 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2059 st->parser = av_parser_init(st->codec->codec_id);
2060 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2061 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2063 codec = st->codec->codec ? st->codec->codec
2064 : avcodec_find_decoder(st->codec->codec_id);
2066 /* Force thread count to 1 since the H.264 decoder will not extract
2067 * SPS and PPS to extradata during multi-threaded decoding. */
2068 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2070 /* Ensure that subtitle_header is properly set. */
2071 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2072 && codec && !st->codec->codec)
2073 avcodec_open2(st->codec, codec,
2074 options ? &options[i] : &thread_opt);
2076 // Try to just open decoders, in case this is enough to get parameters.
2077 if (!has_codec_parameters(st)) {
2078 if (codec && !st->codec->codec)
2079 avcodec_open2(st->codec, codec,
2080 options ? &options[i] : &thread_opt);
2083 av_dict_free(&thread_opt);
2086 for (i = 0; i < ic->nb_streams; i++) {
2087 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2088 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2094 if (ff_check_interrupt(&ic->interrupt_callback)) {
2096 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2100 /* check if one codec still needs to be handled */
2101 for (i = 0; i < ic->nb_streams; i++) {
2102 int fps_analyze_framecount = 20;
2104 st = ic->streams[i];
2105 if (!has_codec_parameters(st))
2107 /* If the timebase is coarse (like the usual millisecond precision
2108 * of mkv), we need to analyze more frames to reliably arrive at
2109 * the correct fps. */
2110 if (av_q2d(st->time_base) > 0.0005)
2111 fps_analyze_framecount *= 2;
2112 if (ic->fps_probe_size >= 0)
2113 fps_analyze_framecount = ic->fps_probe_size;
2114 /* variable fps and no guess at the real fps */
2115 if (!st->avg_frame_rate.num &&
2116 st->codec_info_nb_frames < fps_analyze_framecount &&
2117 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2119 if (st->parser && st->parser->parser->split &&
2120 !st->codec->extradata)
2122 if (st->first_dts == AV_NOPTS_VALUE &&
2123 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2124 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2127 if (i == ic->nb_streams) {
2128 /* NOTE: If the format has no header, then we need to read some
2129 * packets to get most of the streams, so we cannot stop here. */
2130 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2131 /* If we found the info for all the codecs, we can stop. */
2133 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2137 /* We did not get all the codec info, but we read too much data. */
2138 if (read_size >= ic->probesize) {
2140 av_log(ic, AV_LOG_DEBUG,
2141 "Probe buffer size limit %d reached\n", ic->probesize);
2145 /* NOTE: A new stream can be added there if no header in file
2146 * (AVFMTCTX_NOHEADER). */
2147 ret = read_frame_internal(ic, &pkt1);
2148 if (ret == AVERROR(EAGAIN))
2153 AVPacket empty_pkt = { 0 };
2155 av_init_packet(&empty_pkt);
2157 /* We could not have all the codec parameters before EOF. */
2159 for (i = 0; i < ic->nb_streams; i++) {
2160 st = ic->streams[i];
2162 /* flush the decoders */
2163 if (st->info->found_decoder == 1) {
2165 err = try_decode_frame(st, &empty_pkt,
2166 (options && i < orig_nb_streams)
2167 ? &options[i] : NULL);
2168 } while (err > 0 && !has_codec_parameters(st));
2172 av_log(ic, AV_LOG_WARNING,
2173 "decoding for stream %d failed\n", st->index);
2174 } else if (!has_codec_parameters(st)) {
2176 avcodec_string(buf, sizeof(buf), st->codec, 0);
2177 av_log(ic, AV_LOG_WARNING,
2178 "Could not find codec parameters (%s)\n", buf);
2186 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2189 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2190 &ic->packet_buffer_end);
2191 if ((ret = av_dup_packet(pkt)) < 0)
2192 goto find_stream_info_err;
2195 read_size += pkt->size;
2197 st = ic->streams[pkt->stream_index];
2198 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2199 /* check for non-increasing dts */
2200 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2201 st->info->fps_last_dts >= pkt->dts) {
2202 av_log(ic, AV_LOG_WARNING,
2203 "Non-increasing DTS in stream %d: packet %d with DTS "
2204 "%"PRId64", packet %d with DTS %"PRId64"\n",
2205 st->index, st->info->fps_last_dts_idx,
2206 st->info->fps_last_dts, st->codec_info_nb_frames,
2208 st->info->fps_first_dts =
2209 st->info->fps_last_dts = AV_NOPTS_VALUE;
2211 /* Check for a discontinuity in dts. If the difference in dts
2212 * is more than 1000 times the average packet duration in the
2213 * sequence, we treat it as a discontinuity. */
2214 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2215 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2216 (pkt->dts - st->info->fps_last_dts) / 1000 >
2217 (st->info->fps_last_dts - st->info->fps_first_dts) /
2218 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2219 av_log(ic, AV_LOG_WARNING,
2220 "DTS discontinuity in stream %d: packet %d with DTS "
2221 "%"PRId64", packet %d with DTS %"PRId64"\n",
2222 st->index, st->info->fps_last_dts_idx,
2223 st->info->fps_last_dts, st->codec_info_nb_frames,
2225 st->info->fps_first_dts =
2226 st->info->fps_last_dts = AV_NOPTS_VALUE;
2229 /* update stored dts values */
2230 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2231 st->info->fps_first_dts = pkt->dts;
2232 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2234 st->info->fps_last_dts = pkt->dts;
2235 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2237 /* check max_analyze_duration */
2238 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2239 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2240 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2241 ic->max_analyze_duration);
2245 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2246 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2247 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2248 st->codec->extradata_size = i;
2249 st->codec->extradata = av_mallocz(st->codec->extradata_size +
2250 FF_INPUT_BUFFER_PADDING_SIZE);
2251 if (!st->codec->extradata)
2252 return AVERROR(ENOMEM);
2253 memcpy(st->codec->extradata, pkt->data,
2254 st->codec->extradata_size);
2258 /* If still no information, we try to open the codec and to
2259 * decompress the frame. We try to avoid that in most cases as
2260 * it takes longer and uses more memory. For MPEG-4, we need to
2261 * decompress for QuickTime.
2263 * If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2264 * least one frame of codec data, this makes sure the codec initializes
2265 * the channel configuration and does not only trust the values from
2267 try_decode_frame(st, pkt,
2268 (options && i < orig_nb_streams) ? &options[i] : NULL);
2270 st->codec_info_nb_frames++;
2274 // close codecs which were opened in try_decode_frame()
2275 for (i = 0; i < ic->nb_streams; i++) {
2276 st = ic->streams[i];
2277 avcodec_close(st->codec);
2279 for (i = 0; i < ic->nb_streams; i++) {
2280 st = ic->streams[i];
2281 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2282 /* estimate average framerate if not set by demuxer */
2283 if (!st->avg_frame_rate.num &&
2284 st->info->fps_last_dts != st->info->fps_first_dts) {
2285 int64_t delta_dts = st->info->fps_last_dts -
2286 st->info->fps_first_dts;
2287 int delta_packets = st->info->fps_last_dts_idx -
2288 st->info->fps_first_dts_idx;
2290 double best_error = 0.01;
2292 if (delta_dts >= INT64_MAX / st->time_base.num ||
2293 delta_packets >= INT64_MAX / st->time_base.den ||
2296 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2297 delta_packets * (int64_t) st->time_base.den,
2298 delta_dts * (int64_t) st->time_base.num, 60000);
2300 /* Round guessed framerate to a "standard" framerate if it's
2301 * within 1% of the original estimate. */
2302 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2303 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2304 double error = fabs(av_q2d(st->avg_frame_rate) /
2305 av_q2d(std_fps) - 1);
2307 if (error < best_error) {
2309 best_fps = std_fps.num;
2313 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2314 best_fps, 12 * 1001, INT_MAX);
2316 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2317 if (!st->codec->bits_per_coded_sample)
2318 st->codec->bits_per_coded_sample =
2319 av_get_bits_per_sample(st->codec->codec_id);
2320 // set stream disposition based on audio service type
2321 switch (st->codec->audio_service_type) {
2322 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2323 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2325 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2326 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2328 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2329 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2331 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2332 st->disposition = AV_DISPOSITION_COMMENT;
2334 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2335 st->disposition = AV_DISPOSITION_KARAOKE;
2341 estimate_timings(ic, old_offset);
2343 compute_chapters_end(ic);
2345 find_stream_info_err:
2346 for (i = 0; i < ic->nb_streams; i++) {
2347 ic->streams[i]->codec->thread_count = 0;
2348 av_freep(&ic->streams[i]->info);
2353 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2357 for (i = 0; i < ic->nb_programs; i++)
2358 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2359 if (ic->programs[i]->stream_index[j] == s)
2360 return ic->programs[i];
2364 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2365 int wanted_stream_nb, int related_stream,
2366 AVCodec **decoder_ret, int flags)
2368 int i, nb_streams = ic->nb_streams;
2369 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2370 unsigned *program = NULL;
2371 AVCodec *decoder = NULL, *best_decoder = NULL;
2373 if (related_stream >= 0 && wanted_stream_nb < 0) {
2374 AVProgram *p = find_program_from_stream(ic, related_stream);
2376 program = p->stream_index;
2377 nb_streams = p->nb_stream_indexes;
2380 for (i = 0; i < nb_streams; i++) {
2381 int real_stream_index = program ? program[i] : i;
2382 AVStream *st = ic->streams[real_stream_index];
2383 AVCodecContext *avctx = st->codec;
2384 if (avctx->codec_type != type)
2386 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2388 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2389 AV_DISPOSITION_VISUAL_IMPAIRED))
2392 decoder = avcodec_find_decoder(st->codec->codec_id);
2395 ret = AVERROR_DECODER_NOT_FOUND;
2399 if (best_count >= st->codec_info_nb_frames)
2401 best_count = st->codec_info_nb_frames;
2402 ret = real_stream_index;
2403 best_decoder = decoder;
2404 if (program && i == nb_streams - 1 && ret < 0) {
2406 nb_streams = ic->nb_streams;
2407 /* no related stream found, try again with everything */
2412 *decoder_ret = best_decoder;
2416 /*******************************************************/
2418 int av_read_play(AVFormatContext *s)
2420 if (s->iformat->read_play)
2421 return s->iformat->read_play(s);
2423 return avio_pause(s->pb, 0);
2424 return AVERROR(ENOSYS);
2427 int av_read_pause(AVFormatContext *s)
2429 if (s->iformat->read_pause)
2430 return s->iformat->read_pause(s);
2432 return avio_pause(s->pb, 1);
2433 return AVERROR(ENOSYS);
2436 void avformat_free_context(AVFormatContext *s)
2442 if (s->iformat && s->iformat->priv_class && s->priv_data)
2443 av_opt_free(s->priv_data);
2445 for (i = 0; i < s->nb_streams; i++) {
2446 /* free all data in a stream component */
2449 for (j = 0; j < st->nb_side_data; j++)
2450 av_freep(&st->side_data[j].data);
2451 av_freep(&st->side_data);
2452 st->nb_side_data = 0;
2455 av_parser_close(st->parser);
2457 if (st->attached_pic.data)
2458 av_free_packet(&st->attached_pic);
2459 av_dict_free(&st->metadata);
2460 av_freep(&st->probe_data.buf);
2461 av_free(st->index_entries);
2462 av_free(st->codec->extradata);
2463 av_free(st->codec->subtitle_header);
2465 av_free(st->priv_data);
2469 for (i = s->nb_programs - 1; i >= 0; i--) {
2470 av_dict_free(&s->programs[i]->metadata);
2471 av_freep(&s->programs[i]->stream_index);
2472 av_freep(&s->programs[i]);
2474 av_freep(&s->programs);
2475 av_freep(&s->priv_data);
2476 while (s->nb_chapters--) {
2477 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2478 av_free(s->chapters[s->nb_chapters]);
2480 av_freep(&s->chapters);
2481 av_dict_free(&s->metadata);
2482 av_freep(&s->streams);
2483 av_freep(&s->internal);
2487 void avformat_close_input(AVFormatContext **ps)
2489 AVFormatContext *s = *ps;
2490 AVIOContext *pb = s->pb;
2492 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2493 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2496 flush_packet_queue(s);
2499 if (s->iformat->read_close)
2500 s->iformat->read_close(s);
2502 avformat_free_context(s);
2509 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2514 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2515 sizeof(*s->streams)) < 0) {
2520 st = av_mallocz(sizeof(AVStream));
2523 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2528 st->codec = avcodec_alloc_context3(c);
2530 /* no default bitrate if decoding */
2531 st->codec->bit_rate = 0;
2533 /* default pts setting is MPEG-like */
2534 avpriv_set_pts_info(st, 33, 1, 90000);
2537 st->index = s->nb_streams;
2538 st->start_time = AV_NOPTS_VALUE;
2539 st->duration = AV_NOPTS_VALUE;
2540 /* we set the current DTS to 0 so that formats without any timestamps
2541 * but durations get some timestamps, formats with some unknown
2542 * timestamps have their first few packets buffered and the
2543 * timestamps corrected before they are returned to the user */
2545 st->first_dts = AV_NOPTS_VALUE;
2546 st->probe_packets = MAX_PROBE_PACKETS;
2548 st->last_IP_pts = AV_NOPTS_VALUE;
2549 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2550 st->pts_buffer[i] = AV_NOPTS_VALUE;
2552 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2554 st->info->fps_first_dts = AV_NOPTS_VALUE;
2555 st->info->fps_last_dts = AV_NOPTS_VALUE;
2557 s->streams[s->nb_streams++] = st;
2561 AVProgram *av_new_program(AVFormatContext *ac, int id)
2563 AVProgram *program = NULL;
2566 av_dlog(ac, "new_program: id=0x%04x\n", id);
2568 for (i = 0; i < ac->nb_programs; i++)
2569 if (ac->programs[i]->id == id)
2570 program = ac->programs[i];
2573 program = av_mallocz(sizeof(AVProgram));
2576 dynarray_add(&ac->programs, &ac->nb_programs, program);
2577 program->discard = AVDISCARD_NONE;
2584 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2585 int64_t start, int64_t end, const char *title)
2587 AVChapter *chapter = NULL;
2590 for (i = 0; i < s->nb_chapters; i++)
2591 if (s->chapters[i]->id == id)
2592 chapter = s->chapters[i];
2595 chapter = av_mallocz(sizeof(AVChapter));
2598 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2600 av_dict_set(&chapter->metadata, "title", title, 0);
2602 chapter->time_base = time_base;
2603 chapter->start = start;
2609 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2612 AVProgram *program = NULL;
2614 if (idx >= ac->nb_streams) {
2615 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2619 for (i = 0; i < ac->nb_programs; i++) {
2620 if (ac->programs[i]->id != progid)
2622 program = ac->programs[i];
2623 for (j = 0; j < program->nb_stream_indexes; j++)
2624 if (program->stream_index[j] == idx)
2627 if (av_reallocp_array(&program->stream_index,
2628 program->nb_stream_indexes + 1,
2629 sizeof(*program->stream_index)) < 0) {
2630 program->nb_stream_indexes = 0;
2633 program->stream_index[program->nb_stream_indexes++] = idx;
2638 uint64_t ff_ntp_time(void)
2640 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2643 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2646 char *q, buf1[20], c;
2647 int nd, len, percentd_found;
2659 while (av_isdigit(*p))
2660 nd = nd * 10 + *p++ - '0';
2662 } while (av_isdigit(c));
2671 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2673 if ((q - buf + len) > buf_size - 1)
2675 memcpy(q, buf1, len);
2683 if ((q - buf) < buf_size - 1)
2687 if (!percentd_found)
2696 void av_url_split(char *proto, int proto_size,
2697 char *authorization, int authorization_size,
2698 char *hostname, int hostname_size,
2699 int *port_ptr, char *path, int path_size, const char *url)
2701 const char *p, *ls, *at, *col, *brk;
2707 if (authorization_size > 0)
2708 authorization[0] = 0;
2709 if (hostname_size > 0)
2714 /* parse protocol */
2715 if ((p = strchr(url, ':'))) {
2716 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2723 /* no protocol means plain filename */
2724 av_strlcpy(path, url, path_size);
2728 /* separate path from hostname */
2729 ls = strchr(p, '/');
2731 ls = strchr(p, '?');
2733 av_strlcpy(path, ls, path_size);
2735 ls = &p[strlen(p)]; // XXX
2737 /* the rest is hostname, use that to parse auth/port */
2739 /* authorization (user[:pass]@hostname) */
2740 if ((at = strchr(p, '@')) && at < ls) {
2741 av_strlcpy(authorization, p,
2742 FFMIN(authorization_size, at + 1 - p));
2743 p = at + 1; /* skip '@' */
2746 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2748 av_strlcpy(hostname, p + 1,
2749 FFMIN(hostname_size, brk - p));
2750 if (brk[1] == ':' && port_ptr)
2751 *port_ptr = atoi(brk + 2);
2752 } else if ((col = strchr(p, ':')) && col < ls) {
2753 av_strlcpy(hostname, p,
2754 FFMIN(col + 1 - p, hostname_size));
2756 *port_ptr = atoi(col + 1);
2758 av_strlcpy(hostname, p,
2759 FFMIN(ls + 1 - p, hostname_size));
2763 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2766 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2769 'C', 'D', 'E', 'F' };
2770 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2773 'c', 'd', 'e', 'f' };
2774 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2776 for (i = 0; i < s; i++) {
2777 buff[i * 2] = hex_table[src[i] >> 4];
2778 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2784 int ff_hex_to_data(uint8_t *data, const char *p)
2791 p += strspn(p, SPACE_CHARS);
2794 c = av_toupper((unsigned char) *p++);
2795 if (c >= '0' && c <= '9')
2797 else if (c >= 'A' && c <= 'F')
2812 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2813 unsigned int pts_num, unsigned int pts_den)
2816 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2817 if (new_tb.num != pts_num)
2818 av_log(NULL, AV_LOG_DEBUG,
2819 "st:%d removing common factor %d from timebase\n",
2820 s->index, pts_num / new_tb.num);
2822 av_log(NULL, AV_LOG_WARNING,
2823 "st:%d has too large timebase, reducing\n", s->index);
2825 if (new_tb.num <= 0 || new_tb.den <= 0) {
2826 av_log(NULL, AV_LOG_ERROR,
2827 "Ignoring attempt to set invalid timebase for st:%d\n",
2831 s->time_base = new_tb;
2832 s->pts_wrap_bits = pts_wrap_bits;
2835 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
2838 const char *ptr = str;
2840 /* Parse key=value pairs. */
2843 char *dest = NULL, *dest_end;
2844 int key_len, dest_len = 0;
2846 /* Skip whitespace and potential commas. */
2847 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
2854 if (!(ptr = strchr(key, '=')))
2857 key_len = ptr - key;
2859 callback_get_buf(context, key, key_len, &dest, &dest_len);
2860 dest_end = dest + dest_len - 1;
2864 while (*ptr && *ptr != '\"') {
2868 if (dest && dest < dest_end)
2872 if (dest && dest < dest_end)
2880 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
2881 if (dest && dest < dest_end)
2889 int ff_find_stream_index(AVFormatContext *s, int id)
2892 for (i = 0; i < s->nb_streams; i++)
2893 if (s->streams[i]->id == id)
2898 int64_t ff_iso8601_to_unix_time(const char *datestr)
2901 struct tm time1 = { 0 }, time2 = { 0 };
2903 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
2904 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
2906 return av_timegm(&time2);
2908 return av_timegm(&time1);
2910 av_log(NULL, AV_LOG_WARNING,
2911 "strptime() unavailable on this system, cannot convert "
2912 "the date string.\n");
2917 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
2921 if (ofmt->query_codec)
2922 return ofmt->query_codec(codec_id, std_compliance);
2923 else if (ofmt->codec_tag)
2924 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
2925 else if (codec_id == ofmt->video_codec ||
2926 codec_id == ofmt->audio_codec ||
2927 codec_id == ofmt->subtitle_codec)
2930 return AVERROR_PATCHWELCOME;
2933 int avformat_network_init(void)
2937 ff_network_inited_globally = 1;
2938 if ((ret = ff_network_init()) < 0)
2945 int avformat_network_deinit(void)
2954 int ff_add_param_change(AVPacket *pkt, int32_t channels,
2955 uint64_t channel_layout, int32_t sample_rate,
2956 int32_t width, int32_t height)
2962 return AVERROR(EINVAL);
2965 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
2967 if (channel_layout) {
2969 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
2973 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
2975 if (width || height) {
2977 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
2979 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
2981 return AVERROR(ENOMEM);
2982 bytestream_put_le32(&data, flags);
2984 bytestream_put_le32(&data, channels);
2986 bytestream_put_le64(&data, channel_layout);
2988 bytestream_put_le32(&data, sample_rate);
2989 if (width || height) {
2990 bytestream_put_le32(&data, width);
2991 bytestream_put_le32(&data, height);
2996 int ff_generate_avci_extradata(AVStream *st)
2998 static const uint8_t avci100_1080p_extradata[] = {
3000 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3001 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3002 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3003 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3004 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3005 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3006 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3007 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3008 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3010 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3013 static const uint8_t avci100_1080i_extradata[] = {
3015 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3016 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3017 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3018 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3019 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3020 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3021 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3022 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3023 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3024 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3025 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3027 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3030 static const uint8_t avci50_1080i_extradata[] = {
3032 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3033 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3034 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3035 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3036 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3037 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3038 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3039 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3040 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3041 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3042 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3044 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3047 static const uint8_t avci100_720p_extradata[] = {
3049 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3050 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3051 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3052 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3053 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3054 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3055 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3056 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3057 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3058 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3060 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3064 const uint8_t *data = NULL;
3067 if (st->codec->width == 1920) {
3068 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
3069 data = avci100_1080p_extradata;
3070 size = sizeof(avci100_1080p_extradata);
3072 data = avci100_1080i_extradata;
3073 size = sizeof(avci100_1080i_extradata);
3075 } else if (st->codec->width == 1440) {
3076 data = avci50_1080i_extradata;
3077 size = sizeof(avci50_1080i_extradata);
3078 } else if (st->codec->width == 1280) {
3079 data = avci100_720p_extradata;
3080 size = sizeof(avci100_720p_extradata);
3086 av_freep(&st->codec->extradata);
3087 st->codec->extradata_size = 0;
3088 st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
3089 if (!st->codec->extradata)
3090 return AVERROR(ENOMEM);
3092 memcpy(st->codec->extradata, data, size);
3093 st->codec->extradata_size = size;
3098 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3103 for (i = 0; i < st->nb_side_data; i++) {
3104 if (st->side_data[i].type == type) {
3106 *size = st->side_data[i].size;
3107 return st->side_data[i].data;