2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
42 #include "audiointerleave.h"
55 * various utility functions for use within Libav
58 unsigned avformat_version(void)
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return LIBAV_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
87 int prev_size = pkt->size;
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 read_size = FFMIN(size, chunk_size);
98 ret = av_grow_packet(pkt, read_size);
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
122 pkt->pos = avio_tell(s);
124 return append_packet_chunked(s, pkt, size);
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
134 int av_filename_number_test(const char *filename)
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
144 static const struct {
147 enum AVMediaType type;
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codec->codec_id = fmt_id_type[i].id;
171 st->codec->codec_type = fmt_id_type[i].type;
179 /************************************************************/
180 /* input media file */
182 /* Open input file and probe the format if necessary. */
183 static int init_input(AVFormatContext *s, const char *filename,
184 AVDictionary **options)
187 AVProbeData pd = { filename, NULL, 0 };
190 s->flags |= AVFMT_FLAG_CUSTOM_IO;
192 return av_probe_input_buffer(s->pb, &s->iformat, filename,
194 else if (s->iformat->flags & AVFMT_NOFILE)
195 return AVERROR(EINVAL);
199 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
200 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
203 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ,
204 &s->interrupt_callback, options)) < 0)
208 return av_probe_input_buffer(s->pb, &s->iformat, filename,
212 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
213 AVPacketList **plast_pktl)
215 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
220 (*plast_pktl)->next = pktl;
222 *packet_buffer = pktl;
224 /* Add the packet in the buffered packet list. */
230 static int queue_attached_pictures(AVFormatContext *s)
233 for (i = 0; i < s->nb_streams; i++)
234 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
235 s->streams[i]->discard < AVDISCARD_ALL) {
236 AVPacket copy = s->streams[i]->attached_pic;
237 copy.buf = av_buffer_ref(copy.buf);
239 return AVERROR(ENOMEM);
241 add_to_pktbuf(&s->internal->raw_packet_buffer, ©,
242 &s->internal->raw_packet_buffer_end);
247 int avformat_open_input(AVFormatContext **ps, const char *filename,
248 AVInputFormat *fmt, AVDictionary **options)
250 AVFormatContext *s = *ps;
252 AVDictionary *tmp = NULL;
253 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
255 if (!s && !(s = avformat_alloc_context()))
256 return AVERROR(ENOMEM);
261 av_dict_copy(&tmp, *options, 0);
263 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
266 if ((ret = init_input(s, filename, &tmp)) < 0)
269 /* Check filename in case an image number is expected. */
270 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
271 if (!av_filename_number_test(filename)) {
272 ret = AVERROR(EINVAL);
277 s->duration = s->start_time = AV_NOPTS_VALUE;
278 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
280 /* Allocate private data. */
281 if (s->iformat->priv_data_size > 0) {
282 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
283 ret = AVERROR(ENOMEM);
286 if (s->iformat->priv_class) {
287 *(const AVClass **) s->priv_data = s->iformat->priv_class;
288 av_opt_set_defaults(s->priv_data);
289 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
294 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
296 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
298 if (s->iformat->read_header)
299 if ((ret = s->iformat->read_header(s)) < 0)
302 if (id3v2_extra_meta &&
303 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
305 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
307 if ((ret = queue_attached_pictures(s)) < 0)
310 if (s->pb && !s->internal->data_offset)
311 s->internal->data_offset = avio_tell(s->pb);
313 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
316 av_dict_free(options);
323 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
325 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
327 avformat_free_context(s);
332 /*******************************************************/
334 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
336 if (st->codec->codec_id == AV_CODEC_ID_PROBE) {
337 AVProbeData *pd = &st->probe_data;
338 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
343 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
344 AVPROBE_PADDING_SIZE)) < 0)
346 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
347 pd->buf_size += pkt->size;
348 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
350 st->probe_packets = 0;
352 av_log(s, AV_LOG_ERROR,
353 "nothing to probe for stream %d\n", st->index);
358 if (!st->probe_packets ||
359 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
360 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
361 ? AVPROBE_SCORE_MAX / 4 : 0);
362 if (st->codec->codec_id != AV_CODEC_ID_PROBE) {
365 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
372 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
378 AVPacketList *pktl = s->internal->raw_packet_buffer;
382 st = s->streams[pkt->stream_index];
383 if (st->codec->codec_id != AV_CODEC_ID_PROBE ||
384 !st->probe_packets ||
385 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
387 if (st->probe_packets)
388 if ((err = probe_codec(s, st, NULL)) < 0)
390 pd = &st->probe_data;
393 s->internal->raw_packet_buffer = pktl->next;
394 s->internal->raw_packet_buffer_remaining_size += pkt->size;
403 ret = s->iformat->read_packet(s, pkt);
405 if (!pktl || ret == AVERROR(EAGAIN))
407 for (i = 0; i < s->nb_streams; i++) {
409 if (st->probe_packets)
410 if ((err = probe_codec(s, st, NULL)) < 0)
416 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
417 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
418 av_log(s, AV_LOG_WARNING,
419 "Dropped corrupted packet (stream = %d)\n",
425 st = s->streams[pkt->stream_index];
427 switch (st->codec->codec_type) {
428 case AVMEDIA_TYPE_VIDEO:
429 if (s->video_codec_id)
430 st->codec->codec_id = s->video_codec_id;
432 case AVMEDIA_TYPE_AUDIO:
433 if (s->audio_codec_id)
434 st->codec->codec_id = s->audio_codec_id;
436 case AVMEDIA_TYPE_SUBTITLE:
437 if (s->subtitle_codec_id)
438 st->codec->codec_id = s->subtitle_codec_id;
442 if (!pktl && (st->codec->codec_id != AV_CODEC_ID_PROBE ||
446 add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
447 &s->internal->raw_packet_buffer_end);
448 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
450 if ((err = probe_codec(s, st, pkt)) < 0)
455 /**********************************************************/
458 * Return the frame duration in seconds. Return 0 if not available.
460 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
461 AVCodecParserContext *pc, AVPacket *pkt)
463 AVRational codec_framerate = s->iformat ? st->codec->framerate :
464 av_inv_q(st->codec->time_base);
469 switch (st->codec->codec_type) {
470 case AVMEDIA_TYPE_VIDEO:
471 if (st->avg_frame_rate.num) {
472 *pnum = st->avg_frame_rate.den;
473 *pden = st->avg_frame_rate.num;
474 } else if (st->time_base.num * 1000LL > st->time_base.den) {
475 *pnum = st->time_base.num;
476 *pden = st->time_base.den;
477 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
478 *pnum = codec_framerate.den;
479 *pden = codec_framerate.num;
480 if (pc && pc->repeat_pict) {
481 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
482 *pden /= 1 + pc->repeat_pict;
484 *pnum *= 1 + pc->repeat_pict;
486 /* If this codec can be interlaced or progressive then we need
487 * a parser to compute duration of a packet. Thus if we have
488 * no parser in such case leave duration undefined. */
489 if (st->codec->ticks_per_frame > 1 && !pc)
493 case AVMEDIA_TYPE_AUDIO:
494 frame_size = av_get_audio_frame_duration(st->codec, pkt->size);
495 if (frame_size <= 0 || st->codec->sample_rate <= 0)
498 *pden = st->codec->sample_rate;
505 static int is_intra_only(enum AVCodecID id)
507 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
510 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
515 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
516 int64_t dts, int64_t pts)
518 AVStream *st = s->streams[stream_index];
519 AVPacketList *pktl = s->internal->packet_buffer;
521 if (st->first_dts != AV_NOPTS_VALUE ||
522 dts == AV_NOPTS_VALUE ||
523 st->cur_dts == AV_NOPTS_VALUE)
526 st->first_dts = dts - st->cur_dts;
529 for (; pktl; pktl = pktl->next) {
530 if (pktl->pkt.stream_index != stream_index)
532 // FIXME: think more about this check
533 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
534 pktl->pkt.pts += st->first_dts;
536 if (pktl->pkt.dts != AV_NOPTS_VALUE)
537 pktl->pkt.dts += st->first_dts;
539 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
540 st->start_time = pktl->pkt.pts;
542 if (st->start_time == AV_NOPTS_VALUE)
543 st->start_time = pts;
546 static void update_initial_durations(AVFormatContext *s, AVStream *st,
547 int stream_index, int duration)
549 AVPacketList *pktl = s->internal->packet_buffer;
552 if (st->first_dts != AV_NOPTS_VALUE) {
553 cur_dts = st->first_dts;
554 for (; pktl; pktl = pktl->next) {
555 if (pktl->pkt.stream_index == stream_index) {
556 if (pktl->pkt.pts != pktl->pkt.dts ||
557 pktl->pkt.dts != AV_NOPTS_VALUE ||
563 pktl = s->internal->packet_buffer;
564 st->first_dts = cur_dts;
565 } else if (st->cur_dts)
568 for (; pktl; pktl = pktl->next) {
569 if (pktl->pkt.stream_index != stream_index)
571 if (pktl->pkt.pts == pktl->pkt.dts &&
572 pktl->pkt.dts == AV_NOPTS_VALUE &&
573 !pktl->pkt.duration) {
574 pktl->pkt.dts = cur_dts;
575 if (!st->codec->has_b_frames)
576 pktl->pkt.pts = cur_dts;
578 if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
579 pktl->pkt.duration = duration;
583 if (st->first_dts == AV_NOPTS_VALUE)
584 st->cur_dts = cur_dts;
587 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
588 AVCodecParserContext *pc, AVPacket *pkt)
590 int num, den, presentation_delayed, delay, i;
593 if (s->flags & AVFMT_FLAG_NOFILLIN)
596 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
597 pkt->dts = AV_NOPTS_VALUE;
599 /* do we have a video B-frame ? */
600 delay = st->codec->has_b_frames;
601 presentation_delayed = 0;
603 /* XXX: need has_b_frame, but cannot get it if the codec is
606 pc && pc->pict_type != AV_PICTURE_TYPE_B)
607 presentation_delayed = 1;
609 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
610 st->pts_wrap_bits < 63 &&
611 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
612 pkt->dts -= 1LL << st->pts_wrap_bits;
615 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
616 * We take the conservative approach and discard both.
617 * Note: If this is misbehaving for an H.264 file, then possibly
618 * presentation_delayed is not set correctly. */
619 if (delay == 1 && pkt->dts == pkt->pts &&
620 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
621 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
622 pkt->dts = AV_NOPTS_VALUE;
625 if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
626 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
628 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
629 den * (int64_t) st->time_base.num,
632 if (pkt->duration != 0 && s->internal->packet_buffer)
633 update_initial_durations(s, st, pkt->stream_index,
638 /* Correct timestamps with byte offset if demuxers only have timestamps
639 * on packet boundaries */
640 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
641 /* this will estimate bitrate based on this frame's duration and size */
642 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
643 if (pkt->pts != AV_NOPTS_VALUE)
645 if (pkt->dts != AV_NOPTS_VALUE)
649 /* This may be redundant, but it should not hurt. */
650 if (pkt->dts != AV_NOPTS_VALUE &&
651 pkt->pts != AV_NOPTS_VALUE &&
653 presentation_delayed = 1;
656 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
657 "cur_dts:%"PRId64" st:%d pc:%p\n",
658 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
659 pkt->stream_index, pc);
660 /* Interpolate PTS and DTS if they are not present. We skip H.264
661 * currently because delay and has_b_frames are not reliably set. */
662 if ((delay == 0 || (delay == 1 && pc)) &&
663 st->codec->codec_id != AV_CODEC_ID_H264) {
664 if (presentation_delayed) {
665 /* DTS = decompression timestamp */
666 /* PTS = presentation timestamp */
667 if (pkt->dts == AV_NOPTS_VALUE)
668 pkt->dts = st->last_IP_pts;
669 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
670 if (pkt->dts == AV_NOPTS_VALUE)
671 pkt->dts = st->cur_dts;
673 /* This is tricky: the dts must be incremented by the duration
674 * of the frame we are displaying, i.e. the last I- or P-frame. */
675 if (st->last_IP_duration == 0)
676 st->last_IP_duration = pkt->duration;
677 if (pkt->dts != AV_NOPTS_VALUE)
678 st->cur_dts = pkt->dts + st->last_IP_duration;
679 st->last_IP_duration = pkt->duration;
680 st->last_IP_pts = pkt->pts;
681 /* Cannot compute PTS if not present (we can compute it only
682 * by knowing the future. */
683 } else if (pkt->pts != AV_NOPTS_VALUE ||
684 pkt->dts != AV_NOPTS_VALUE ||
686 st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
687 int duration = pkt->duration;
688 if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
689 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
691 duration = av_rescale_rnd(1,
692 num * (int64_t) st->time_base.den,
693 den * (int64_t) st->time_base.num,
695 if (duration != 0 && s->internal->packet_buffer)
696 update_initial_durations(s, st, pkt->stream_index,
701 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
703 /* presentation is not delayed : PTS and DTS are the same */
704 if (pkt->pts == AV_NOPTS_VALUE)
706 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
708 if (pkt->pts == AV_NOPTS_VALUE)
709 pkt->pts = st->cur_dts;
711 if (pkt->pts != AV_NOPTS_VALUE)
712 st->cur_dts = pkt->pts + duration;
717 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
718 st->pts_buffer[0] = pkt->pts;
719 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
720 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
721 if (pkt->dts == AV_NOPTS_VALUE)
722 pkt->dts = st->pts_buffer[0];
723 // We skipped it above so we try here.
724 if (st->codec->codec_id == AV_CODEC_ID_H264)
725 // This should happen on the first packet
726 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
727 if (pkt->dts > st->cur_dts)
728 st->cur_dts = pkt->dts;
732 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
733 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
736 if (is_intra_only(st->codec->codec_id))
737 pkt->flags |= AV_PKT_FLAG_KEY;
739 pkt->convergence_duration = pc->convergence_duration;
742 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
745 AVPacketList *pktl = *pkt_buf;
746 *pkt_buf = pktl->next;
747 av_free_packet(&pktl->pkt);
754 * Parse a packet, add all split parts to parse_queue.
756 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
758 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
760 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
761 AVStream *st = s->streams[stream_index];
762 uint8_t *data = pkt ? pkt->data : NULL;
763 int size = pkt ? pkt->size : 0;
764 int ret = 0, got_output = 0;
767 av_init_packet(&flush_pkt);
772 while (size > 0 || (pkt == &flush_pkt && got_output)) {
775 av_init_packet(&out_pkt);
776 len = av_parser_parse2(st->parser, st->codec,
777 &out_pkt.data, &out_pkt.size, data, size,
778 pkt->pts, pkt->dts, pkt->pos);
780 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
781 /* increment read pointer */
785 got_output = !!out_pkt.size;
790 if (pkt->side_data) {
791 out_pkt.side_data = pkt->side_data;
792 out_pkt.side_data_elems = pkt->side_data_elems;
793 pkt->side_data = NULL;
794 pkt->side_data_elems = 0;
797 /* set the duration */
798 out_pkt.duration = 0;
799 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
800 if (st->codec->sample_rate > 0) {
802 av_rescale_q_rnd(st->parser->duration,
803 (AVRational) { 1, st->codec->sample_rate },
809 out_pkt.stream_index = st->index;
810 out_pkt.pts = st->parser->pts;
811 out_pkt.dts = st->parser->dts;
812 out_pkt.pos = st->parser->pos;
814 if (st->parser->key_frame == 1 ||
815 (st->parser->key_frame == -1 &&
816 st->parser->pict_type == AV_PICTURE_TYPE_I))
817 out_pkt.flags |= AV_PKT_FLAG_KEY;
819 compute_pkt_fields(s, st, st->parser, &out_pkt);
821 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
822 out_pkt.flags & AV_PKT_FLAG_KEY) {
823 ff_reduce_index(s, st->index);
824 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
825 0, 0, AVINDEX_KEYFRAME);
828 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
829 out_pkt.buf = pkt->buf;
831 #if FF_API_DESTRUCT_PACKET
832 FF_DISABLE_DEPRECATION_WARNINGS
833 out_pkt.destruct = pkt->destruct;
834 pkt->destruct = NULL;
835 FF_ENABLE_DEPRECATION_WARNINGS
838 if ((ret = av_dup_packet(&out_pkt)) < 0)
841 if (!add_to_pktbuf(&s->internal->parse_queue, &out_pkt, &s->internal->parse_queue_end)) {
842 av_free_packet(&out_pkt);
843 ret = AVERROR(ENOMEM);
848 /* end of the stream => close and free the parser */
849 if (pkt == &flush_pkt) {
850 av_parser_close(st->parser);
859 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
860 AVPacketList **pkt_buffer_end,
864 av_assert0(*pkt_buffer);
867 *pkt_buffer = pktl->next;
869 *pkt_buffer_end = NULL;
874 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
876 int ret = 0, i, got_packet = 0;
877 AVDictionary *metadata = NULL;
881 while (!got_packet && !s->internal->parse_queue) {
885 /* read next packet */
886 ret = ff_read_packet(s, &cur_pkt);
888 if (ret == AVERROR(EAGAIN))
890 /* flush the parsers */
891 for (i = 0; i < s->nb_streams; i++) {
893 if (st->parser && st->need_parsing)
894 parse_packet(s, NULL, st->index);
896 /* all remaining packets are now in parse_queue =>
897 * really terminate parsing */
901 st = s->streams[cur_pkt.stream_index];
903 if (cur_pkt.pts != AV_NOPTS_VALUE &&
904 cur_pkt.dts != AV_NOPTS_VALUE &&
905 cur_pkt.pts < cur_pkt.dts) {
906 av_log(s, AV_LOG_WARNING,
907 "Invalid timestamps stream=%d, pts=%"PRId64", "
908 "dts=%"PRId64", size=%d\n",
909 cur_pkt.stream_index, cur_pkt.pts,
910 cur_pkt.dts, cur_pkt.size);
912 if (s->debug & FF_FDEBUG_TS)
913 av_log(s, AV_LOG_DEBUG,
914 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
915 "size=%d, duration=%d, flags=%d\n",
916 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
917 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
919 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
920 st->parser = av_parser_init(st->codec->codec_id);
922 /* no parser available: just output the raw packets */
923 st->need_parsing = AVSTREAM_PARSE_NONE;
924 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
925 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
926 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
927 st->parser->flags |= PARSER_FLAG_ONCE;
930 if (!st->need_parsing || !st->parser) {
931 /* no parsing needed: we just output the packet as is */
933 compute_pkt_fields(s, st, NULL, pkt);
934 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
935 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
936 ff_reduce_index(s, st->index);
937 av_add_index_entry(st, pkt->pos, pkt->dts,
938 0, 0, AVINDEX_KEYFRAME);
941 } else if (st->discard < AVDISCARD_ALL) {
942 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
946 av_free_packet(&cur_pkt);
950 if (!got_packet && s->internal->parse_queue)
951 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
953 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
955 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
956 av_dict_copy(&s->metadata, metadata, 0);
957 av_dict_free(&metadata);
958 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
961 if (s->debug & FF_FDEBUG_TS)
962 av_log(s, AV_LOG_DEBUG,
963 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
964 "size=%d, duration=%d, flags=%d\n",
965 pkt->stream_index, pkt->pts, pkt->dts,
966 pkt->size, pkt->duration, pkt->flags);
971 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
973 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
977 return s->internal->packet_buffer
978 ? read_from_packet_buffer(&s->internal->packet_buffer,
979 &s->internal->packet_buffer_end, pkt)
980 : read_frame_internal(s, pkt);
984 AVPacketList *pktl = s->internal->packet_buffer;
987 AVPacket *next_pkt = &pktl->pkt;
989 if (next_pkt->dts != AV_NOPTS_VALUE) {
990 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
991 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
992 if (pktl->pkt.stream_index == next_pkt->stream_index &&
993 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
994 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
996 next_pkt->pts = pktl->pkt.dts;
1000 pktl = s->internal->packet_buffer;
1003 /* read packet from packet buffer, if there is data */
1004 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1005 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1006 return read_from_packet_buffer(&s->internal->packet_buffer,
1007 &s->internal->packet_buffer_end, pkt);
1010 ret = read_frame_internal(s, pkt);
1012 if (pktl && ret != AVERROR(EAGAIN)) {
1019 if (av_dup_packet(add_to_pktbuf(&s->internal->packet_buffer, pkt,
1020 &s->internal->packet_buffer_end)) < 0)
1021 return AVERROR(ENOMEM);
1025 /* XXX: suppress the packet queue */
1026 static void flush_packet_queue(AVFormatContext *s)
1028 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1029 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1030 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1032 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1035 /*******************************************************/
1038 int av_find_default_stream_index(AVFormatContext *s)
1040 int first_audio_index = -1;
1044 if (s->nb_streams <= 0)
1046 for (i = 0; i < s->nb_streams; i++) {
1048 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1049 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1052 if (first_audio_index < 0 &&
1053 st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1054 first_audio_index = i;
1056 return first_audio_index >= 0 ? first_audio_index : 0;
1059 /** Flush the frame reader. */
1060 void ff_read_frame_flush(AVFormatContext *s)
1065 flush_packet_queue(s);
1067 /* Reset read state for each stream. */
1068 for (i = 0; i < s->nb_streams; i++) {
1072 av_parser_close(st->parser);
1075 st->last_IP_pts = AV_NOPTS_VALUE;
1076 /* We set the current DTS to an unspecified origin. */
1077 st->cur_dts = AV_NOPTS_VALUE;
1079 st->probe_packets = MAX_PROBE_PACKETS;
1081 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1082 st->pts_buffer[j] = AV_NOPTS_VALUE;
1086 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1090 for (i = 0; i < s->nb_streams; i++) {
1091 AVStream *st = s->streams[i];
1094 av_rescale(timestamp,
1095 st->time_base.den * (int64_t) ref_st->time_base.num,
1096 st->time_base.num * (int64_t) ref_st->time_base.den);
1100 void ff_reduce_index(AVFormatContext *s, int stream_index)
1102 AVStream *st = s->streams[stream_index];
1103 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1105 if ((unsigned) st->nb_index_entries >= max_entries) {
1107 for (i = 0; 2 * i < st->nb_index_entries; i++)
1108 st->index_entries[i] = st->index_entries[2 * i];
1109 st->nb_index_entries = i;
1113 int ff_add_index_entry(AVIndexEntry **index_entries,
1114 int *nb_index_entries,
1115 unsigned int *index_entries_allocated_size,
1116 int64_t pos, int64_t timestamp,
1117 int size, int distance, int flags)
1119 AVIndexEntry *entries, *ie;
1122 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1125 entries = av_fast_realloc(*index_entries,
1126 index_entries_allocated_size,
1127 (*nb_index_entries + 1) *
1128 sizeof(AVIndexEntry));
1132 *index_entries = entries;
1134 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1135 timestamp, AVSEEK_FLAG_ANY);
1138 index = (*nb_index_entries)++;
1139 ie = &entries[index];
1140 assert(index == 0 || ie[-1].timestamp < timestamp);
1142 ie = &entries[index];
1143 if (ie->timestamp != timestamp) {
1144 if (ie->timestamp <= timestamp)
1146 memmove(entries + index + 1, entries + index,
1147 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1148 (*nb_index_entries)++;
1149 } else if (ie->pos == pos && distance < ie->min_distance)
1150 // do not reduce the distance
1151 distance = ie->min_distance;
1155 ie->timestamp = timestamp;
1156 ie->min_distance = distance;
1163 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1164 int size, int distance, int flags)
1166 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1167 &st->index_entries_allocated_size, pos,
1168 timestamp, size, distance, flags);
1171 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1172 int64_t wanted_timestamp, int flags)
1180 // Optimize appending index entries at the end.
1181 if (b && entries[b - 1].timestamp < wanted_timestamp)
1186 timestamp = entries[m].timestamp;
1187 if (timestamp >= wanted_timestamp)
1189 if (timestamp <= wanted_timestamp)
1192 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1194 if (!(flags & AVSEEK_FLAG_ANY))
1195 while (m >= 0 && m < nb_entries &&
1196 !(entries[m].flags & AVINDEX_KEYFRAME))
1197 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1199 if (m == nb_entries)
1204 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1206 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1207 wanted_timestamp, flags);
1210 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1211 int64_t target_ts, int flags)
1213 AVInputFormat *avif = s->iformat;
1214 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1215 int64_t ts_min, ts_max, ts;
1220 if (stream_index < 0)
1223 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1226 ts_min = AV_NOPTS_VALUE;
1227 pos_limit = -1; // GCC falsely says it may be uninitialized.
1229 st = s->streams[stream_index];
1230 if (st->index_entries) {
1233 /* FIXME: Whole function must be checked for non-keyframe entries in
1234 * index case, especially read_timestamp(). */
1235 index = av_index_search_timestamp(st, target_ts,
1236 flags | AVSEEK_FLAG_BACKWARD);
1237 index = FFMAX(index, 0);
1238 e = &st->index_entries[index];
1240 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1242 ts_min = e->timestamp;
1243 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1249 index = av_index_search_timestamp(st, target_ts,
1250 flags & ~AVSEEK_FLAG_BACKWARD);
1251 assert(index < st->nb_index_entries);
1253 e = &st->index_entries[index];
1254 assert(e->timestamp >= target_ts);
1256 ts_max = e->timestamp;
1257 pos_limit = pos_max - e->min_distance;
1258 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1259 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1263 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1264 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1269 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1272 ff_update_cur_dts(s, st, ts);
1277 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1278 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1279 int64_t ts_min, int64_t ts_max,
1280 int flags, int64_t *ts_ret,
1281 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1282 int64_t *, int64_t))
1285 int64_t start_pos, filesize;
1288 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1290 if (ts_min == AV_NOPTS_VALUE) {
1291 pos_min = s->internal->data_offset;
1292 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1293 if (ts_min == AV_NOPTS_VALUE)
1297 if (ts_max == AV_NOPTS_VALUE) {
1299 filesize = avio_size(s->pb);
1300 pos_max = filesize - 1;
1303 ts_max = read_timestamp(s, stream_index, &pos_max,
1306 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1307 if (ts_max == AV_NOPTS_VALUE)
1311 int64_t tmp_pos = pos_max + 1;
1312 int64_t tmp_ts = read_timestamp(s, stream_index,
1313 &tmp_pos, INT64_MAX);
1314 if (tmp_ts == AV_NOPTS_VALUE)
1318 if (tmp_pos >= filesize)
1321 pos_limit = pos_max;
1324 if (ts_min > ts_max)
1326 else if (ts_min == ts_max)
1327 pos_limit = pos_min;
1330 while (pos_min < pos_limit) {
1331 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1332 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1333 assert(pos_limit <= pos_max);
1335 if (no_change == 0) {
1336 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1337 // interpolate position (better than dichotomy)
1338 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1340 pos_min - approximate_keyframe_distance;
1341 } else if (no_change == 1) {
1342 // bisection if interpolation did not change min / max pos last time
1343 pos = (pos_min + pos_limit) >> 1;
1345 /* linear search if bisection failed, can only happen if there
1346 * are very few or no keyframes between min/max */
1351 else if (pos > pos_limit)
1355 // May pass pos_limit instead of -1.
1356 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1361 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1362 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1363 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1364 pos_limit, start_pos, no_change);
1365 if (ts == AV_NOPTS_VALUE) {
1366 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1369 assert(ts != AV_NOPTS_VALUE);
1370 if (target_ts <= ts) {
1371 pos_limit = start_pos - 1;
1375 if (target_ts >= ts) {
1381 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1382 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1384 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1386 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1387 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1388 pos, ts_min, target_ts, ts_max);
1393 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1394 int64_t pos, int flags)
1396 int64_t pos_min, pos_max;
1398 pos_min = s->internal->data_offset;
1399 pos_max = avio_size(s->pb) - 1;
1403 else if (pos > pos_max)
1406 avio_seek(s->pb, pos, SEEK_SET);
1411 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1412 int64_t timestamp, int flags)
1419 st = s->streams[stream_index];
1421 index = av_index_search_timestamp(st, timestamp, flags);
1423 if (index < 0 && st->nb_index_entries &&
1424 timestamp < st->index_entries[0].timestamp)
1427 if (index < 0 || index == st->nb_index_entries - 1) {
1430 if (st->nb_index_entries) {
1431 assert(st->index_entries);
1432 ie = &st->index_entries[st->nb_index_entries - 1];
1433 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1435 ff_update_cur_dts(s, st, ie->timestamp);
1437 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1443 read_status = av_read_frame(s, &pkt);
1444 } while (read_status == AVERROR(EAGAIN));
1445 if (read_status < 0)
1447 av_free_packet(&pkt);
1448 if (stream_index == pkt.stream_index)
1449 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1452 index = av_index_search_timestamp(st, timestamp, flags);
1457 ff_read_frame_flush(s);
1458 if (s->iformat->read_seek)
1459 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1461 ie = &st->index_entries[index];
1462 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1464 ff_update_cur_dts(s, st, ie->timestamp);
1469 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1470 int64_t timestamp, int flags)
1475 if (flags & AVSEEK_FLAG_BYTE) {
1476 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1478 ff_read_frame_flush(s);
1479 return seek_frame_byte(s, stream_index, timestamp, flags);
1482 if (stream_index < 0) {
1483 stream_index = av_find_default_stream_index(s);
1484 if (stream_index < 0)
1487 st = s->streams[stream_index];
1488 /* timestamp for default must be expressed in AV_TIME_BASE units */
1489 timestamp = av_rescale(timestamp, st->time_base.den,
1490 AV_TIME_BASE * (int64_t) st->time_base.num);
1493 /* first, we try the format specific seek */
1494 if (s->iformat->read_seek) {
1495 ff_read_frame_flush(s);
1496 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1502 if (s->iformat->read_timestamp &&
1503 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1504 ff_read_frame_flush(s);
1505 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1506 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1507 ff_read_frame_flush(s);
1508 return seek_frame_generic(s, stream_index, timestamp, flags);
1513 int av_seek_frame(AVFormatContext *s, int stream_index,
1514 int64_t timestamp, int flags)
1516 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1519 ret = queue_attached_pictures(s);
1524 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1525 int64_t ts, int64_t max_ts, int flags)
1527 if (min_ts > ts || max_ts < ts)
1530 if (s->iformat->read_seek2) {
1532 ff_read_frame_flush(s);
1533 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1537 ret = queue_attached_pictures(s);
1541 if (s->iformat->read_timestamp) {
1542 // try to seek via read_timestamp()
1545 // Fall back on old API if new is not implemented but old is.
1546 // Note the old API has somewhat different semantics.
1547 if (s->iformat->read_seek || 1)
1548 return av_seek_frame(s, stream_index, ts,
1549 flags | ((uint64_t) ts - min_ts >
1550 (uint64_t) max_ts - ts
1551 ? AVSEEK_FLAG_BACKWARD : 0));
1553 // try some generic seek like seek_frame_generic() but with new ts semantics
1556 /*******************************************************/
1559 * Return TRUE if the stream has accurate duration in any stream.
1561 * @return TRUE if the stream has accurate duration for at least one component.
1563 static int has_duration(AVFormatContext *ic)
1568 for (i = 0; i < ic->nb_streams; i++) {
1569 st = ic->streams[i];
1570 if (st->duration != AV_NOPTS_VALUE)
1573 if (ic->duration != AV_NOPTS_VALUE)
1579 * Estimate the stream timings from the one of each components.
1581 * Also computes the global bitrate if possible.
1583 static void update_stream_timings(AVFormatContext *ic)
1585 int64_t start_time, start_time1, end_time, end_time1;
1586 int64_t duration, duration1, filesize;
1590 start_time = INT64_MAX;
1591 end_time = INT64_MIN;
1592 duration = INT64_MIN;
1593 for (i = 0; i < ic->nb_streams; i++) {
1594 st = ic->streams[i];
1595 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1596 start_time1 = av_rescale_q(st->start_time, st->time_base,
1598 start_time = FFMIN(start_time, start_time1);
1599 if (st->duration != AV_NOPTS_VALUE) {
1600 end_time1 = start_time1 +
1601 av_rescale_q(st->duration, st->time_base,
1603 end_time = FFMAX(end_time, end_time1);
1606 if (st->duration != AV_NOPTS_VALUE) {
1607 duration1 = av_rescale_q(st->duration, st->time_base,
1609 duration = FFMAX(duration, duration1);
1612 if (start_time != INT64_MAX) {
1613 ic->start_time = start_time;
1614 if (end_time != INT64_MIN)
1615 duration = FFMAX(duration, end_time - start_time);
1617 if (duration != INT64_MIN) {
1618 ic->duration = duration;
1619 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1620 /* compute the bitrate */
1621 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1622 (double) ic->duration;
1626 static void fill_all_stream_timings(AVFormatContext *ic)
1631 update_stream_timings(ic);
1632 for (i = 0; i < ic->nb_streams; i++) {
1633 st = ic->streams[i];
1634 if (st->start_time == AV_NOPTS_VALUE) {
1635 if (ic->start_time != AV_NOPTS_VALUE)
1636 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1638 if (ic->duration != AV_NOPTS_VALUE)
1639 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1645 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1647 int64_t filesize, duration;
1651 /* if bit_rate is already set, we believe it */
1652 if (ic->bit_rate <= 0) {
1654 for (i = 0; i < ic->nb_streams; i++) {
1655 st = ic->streams[i];
1656 if (st->codec->bit_rate > 0) {
1657 if (INT_MAX - st->codec->bit_rate < bit_rate) {
1661 bit_rate += st->codec->bit_rate;
1664 ic->bit_rate = bit_rate;
1667 /* if duration is already set, we believe it */
1668 if (ic->duration == AV_NOPTS_VALUE &&
1669 ic->bit_rate != 0) {
1670 filesize = ic->pb ? avio_size(ic->pb) : 0;
1672 for (i = 0; i < ic->nb_streams; i++) {
1673 st = ic->streams[i];
1674 duration = av_rescale(8 * filesize, st->time_base.den,
1676 (int64_t) st->time_base.num);
1677 if (st->duration == AV_NOPTS_VALUE)
1678 st->duration = duration;
1684 #define DURATION_MAX_READ_SIZE 250000
1685 #define DURATION_MAX_RETRY 3
1687 /* only usable for MPEG-PS streams */
1688 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1690 AVPacket pkt1, *pkt = &pkt1;
1692 int read_size, i, ret;
1694 int64_t filesize, offset, duration;
1697 /* flush packet queue */
1698 flush_packet_queue(ic);
1700 for (i = 0; i < ic->nb_streams; i++) {
1701 st = ic->streams[i];
1702 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1703 av_log(st->codec, AV_LOG_WARNING,
1704 "start time is not set in estimate_timings_from_pts\n");
1707 av_parser_close(st->parser);
1712 /* estimate the end time (duration) */
1713 /* XXX: may need to support wrapping */
1714 filesize = ic->pb ? avio_size(ic->pb) : 0;
1715 end_time = AV_NOPTS_VALUE;
1717 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1721 avio_seek(ic->pb, offset, SEEK_SET);
1724 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1728 ret = ff_read_packet(ic, pkt);
1729 } while (ret == AVERROR(EAGAIN));
1732 read_size += pkt->size;
1733 st = ic->streams[pkt->stream_index];
1734 if (pkt->pts != AV_NOPTS_VALUE &&
1735 (st->start_time != AV_NOPTS_VALUE ||
1736 st->first_dts != AV_NOPTS_VALUE)) {
1737 duration = end_time = pkt->pts;
1738 if (st->start_time != AV_NOPTS_VALUE)
1739 duration -= st->start_time;
1741 duration -= st->first_dts;
1743 duration += 1LL << st->pts_wrap_bits;
1745 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1746 st->duration = duration;
1749 av_free_packet(pkt);
1751 } while (end_time == AV_NOPTS_VALUE &&
1752 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1753 ++retry <= DURATION_MAX_RETRY);
1755 fill_all_stream_timings(ic);
1757 avio_seek(ic->pb, old_offset, SEEK_SET);
1758 for (i = 0; i < ic->nb_streams; i++) {
1759 st = ic->streams[i];
1760 st->cur_dts = st->first_dts;
1761 st->last_IP_pts = AV_NOPTS_VALUE;
1765 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1769 /* get the file size, if possible */
1770 if (ic->iformat->flags & AVFMT_NOFILE) {
1773 file_size = avio_size(ic->pb);
1774 file_size = FFMAX(0, file_size);
1777 if ((!strcmp(ic->iformat->name, "mpeg") ||
1778 !strcmp(ic->iformat->name, "mpegts")) &&
1779 file_size && ic->pb->seekable) {
1780 /* get accurate estimate from the PTSes */
1781 estimate_timings_from_pts(ic, old_offset);
1782 } else if (has_duration(ic)) {
1783 /* at least one component has timings - we use them for all
1785 fill_all_stream_timings(ic);
1787 av_log(ic, AV_LOG_WARNING,
1788 "Estimating duration from bitrate, this may be inaccurate\n");
1789 /* less precise: use bitrate info */
1790 estimate_timings_from_bit_rate(ic);
1792 update_stream_timings(ic);
1796 AVStream av_unused *st;
1797 for (i = 0; i < ic->nb_streams; i++) {
1798 st = ic->streams[i];
1799 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
1800 (double) st->start_time / AV_TIME_BASE,
1801 (double) st->duration / AV_TIME_BASE);
1804 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1805 (double) ic->start_time / AV_TIME_BASE,
1806 (double) ic->duration / AV_TIME_BASE,
1807 ic->bit_rate / 1000);
1811 static int has_codec_parameters(AVStream *st)
1813 AVCodecContext *avctx = st->codec;
1816 switch (avctx->codec_type) {
1817 case AVMEDIA_TYPE_AUDIO:
1818 val = avctx->sample_rate && avctx->channels;
1819 if (st->info->found_decoder >= 0 &&
1820 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1823 case AVMEDIA_TYPE_VIDEO:
1825 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1832 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1835 static int has_decode_delay_been_guessed(AVStream *st)
1837 return st->codec->codec_id != AV_CODEC_ID_H264 ||
1838 st->info->nb_decoded_frames >= 6;
1841 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1842 static int try_decode_frame(AVStream *st, AVPacket *avpkt,
1843 AVDictionary **options)
1845 const AVCodec *codec;
1846 int got_picture = 1, ret = 0;
1847 AVFrame *frame = av_frame_alloc();
1848 AVPacket pkt = *avpkt;
1851 return AVERROR(ENOMEM);
1853 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
1854 AVDictionary *thread_opt = NULL;
1856 codec = st->codec->codec ? st->codec->codec
1857 : avcodec_find_decoder(st->codec->codec_id);
1860 st->info->found_decoder = -1;
1865 /* Force thread count to 1 since the H.264 decoder will not extract
1866 * SPS and PPS to extradata during multi-threaded decoding. */
1867 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1868 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
1870 av_dict_free(&thread_opt);
1872 st->info->found_decoder = -1;
1875 st->info->found_decoder = 1;
1876 } else if (!st->info->found_decoder)
1877 st->info->found_decoder = 1;
1879 if (st->info->found_decoder < 0) {
1884 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1886 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1887 (!st->codec_info_nb_frames &&
1888 st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
1890 switch (st->codec->codec_type) {
1891 case AVMEDIA_TYPE_VIDEO:
1892 ret = avcodec_decode_video2(st->codec, frame,
1893 &got_picture, &pkt);
1895 case AVMEDIA_TYPE_AUDIO:
1896 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
1903 st->info->nb_decoded_frames++;
1911 av_frame_free(&frame);
1915 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1917 while (tags->id != AV_CODEC_ID_NONE) {
1925 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1928 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1929 if (tag == tags[i].tag)
1931 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1932 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1934 return AV_CODEC_ID_NONE;
1937 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
1942 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
1944 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
1946 return AV_CODEC_ID_NONE;
1950 if (sflags & (1 << (bps - 1))) {
1953 return AV_CODEC_ID_PCM_S8;
1955 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
1957 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
1959 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
1961 return AV_CODEC_ID_NONE;
1966 return AV_CODEC_ID_PCM_U8;
1968 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
1970 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
1972 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
1974 return AV_CODEC_ID_NONE;
1980 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
1983 for (i = 0; tags && tags[i]; i++) {
1984 int tag = ff_codec_get_tag(tags[i], id);
1991 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
1994 for (i = 0; tags && tags[i]; i++) {
1995 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
1996 if (id != AV_CODEC_ID_NONE)
1999 return AV_CODEC_ID_NONE;
2002 static void compute_chapters_end(AVFormatContext *s)
2005 int64_t max_time = s->duration +
2006 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2008 for (i = 0; i < s->nb_chapters; i++)
2009 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2010 AVChapter *ch = s->chapters[i];
2011 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2015 for (j = 0; j < s->nb_chapters; j++) {
2016 AVChapter *ch1 = s->chapters[j];
2017 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2019 if (j != i && next_start > ch->start && next_start < end)
2022 ch->end = (end == INT64_MAX) ? ch->start : end;
2026 static int get_std_framerate(int i)
2029 return (i + 1) * 1001;
2031 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2034 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2036 int i, count, ret, read_size, j;
2038 AVPacket pkt1, *pkt;
2039 int64_t old_offset = avio_tell(ic->pb);
2040 // new streams might appear, no options for those
2041 int orig_nb_streams = ic->nb_streams;
2043 for (i = 0; i < ic->nb_streams; i++) {
2044 const AVCodec *codec;
2045 AVDictionary *thread_opt = NULL;
2046 st = ic->streams[i];
2048 // only for the split stuff
2049 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2050 st->parser = av_parser_init(st->codec->codec_id);
2051 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2052 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2054 codec = st->codec->codec ? st->codec->codec
2055 : avcodec_find_decoder(st->codec->codec_id);
2057 /* Force thread count to 1 since the H.264 decoder will not extract
2058 * SPS and PPS to extradata during multi-threaded decoding. */
2059 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2061 /* Ensure that subtitle_header is properly set. */
2062 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2063 && codec && !st->codec->codec)
2064 avcodec_open2(st->codec, codec,
2065 options ? &options[i] : &thread_opt);
2067 // Try to just open decoders, in case this is enough to get parameters.
2068 if (!has_codec_parameters(st)) {
2069 if (codec && !st->codec->codec)
2070 avcodec_open2(st->codec, codec,
2071 options ? &options[i] : &thread_opt);
2074 av_dict_free(&thread_opt);
2077 for (i = 0; i < ic->nb_streams; i++) {
2078 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2079 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2085 if (ff_check_interrupt(&ic->interrupt_callback)) {
2087 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2091 /* check if one codec still needs to be handled */
2092 for (i = 0; i < ic->nb_streams; i++) {
2093 int fps_analyze_framecount = 20;
2095 st = ic->streams[i];
2096 if (!has_codec_parameters(st))
2098 /* If the timebase is coarse (like the usual millisecond precision
2099 * of mkv), we need to analyze more frames to reliably arrive at
2100 * the correct fps. */
2101 if (av_q2d(st->time_base) > 0.0005)
2102 fps_analyze_framecount *= 2;
2103 if (ic->fps_probe_size >= 0)
2104 fps_analyze_framecount = ic->fps_probe_size;
2105 /* variable fps and no guess at the real fps */
2106 if (!st->avg_frame_rate.num &&
2107 st->codec_info_nb_frames < fps_analyze_framecount &&
2108 st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2110 if (st->parser && st->parser->parser->split &&
2111 !st->codec->extradata)
2113 if (st->first_dts == AV_NOPTS_VALUE &&
2114 st->codec_info_nb_frames < ic->max_ts_probe &&
2115 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2116 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2119 if (i == ic->nb_streams) {
2120 /* NOTE: If the format has no header, then we need to read some
2121 * packets to get most of the streams, so we cannot stop here. */
2122 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2123 /* If we found the info for all the codecs, we can stop. */
2125 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2129 /* We did not get all the codec info, but we read too much data. */
2130 if (read_size >= ic->probesize) {
2132 av_log(ic, AV_LOG_DEBUG,
2133 "Probe buffer size limit %d reached\n", ic->probesize);
2137 /* NOTE: A new stream can be added there if no header in file
2138 * (AVFMTCTX_NOHEADER). */
2139 ret = read_frame_internal(ic, &pkt1);
2140 if (ret == AVERROR(EAGAIN))
2145 AVPacket empty_pkt = { 0 };
2147 av_init_packet(&empty_pkt);
2149 /* We could not have all the codec parameters before EOF. */
2151 for (i = 0; i < ic->nb_streams; i++) {
2152 st = ic->streams[i];
2154 /* flush the decoders */
2155 if (st->info->found_decoder == 1) {
2157 err = try_decode_frame(st, &empty_pkt,
2158 (options && i < orig_nb_streams)
2159 ? &options[i] : NULL);
2160 } while (err > 0 && !has_codec_parameters(st));
2164 av_log(ic, AV_LOG_WARNING,
2165 "decoding for stream %d failed\n", st->index);
2166 } else if (!has_codec_parameters(st)) {
2168 avcodec_string(buf, sizeof(buf), st->codec, 0);
2169 av_log(ic, AV_LOG_WARNING,
2170 "Could not find codec parameters (%s)\n", buf);
2178 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2181 pkt = add_to_pktbuf(&ic->internal->packet_buffer, &pkt1,
2182 &ic->internal->packet_buffer_end);
2183 if ((ret = av_dup_packet(pkt)) < 0)
2184 goto find_stream_info_err;
2187 read_size += pkt->size;
2189 st = ic->streams[pkt->stream_index];
2190 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2191 /* check for non-increasing dts */
2192 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2193 st->info->fps_last_dts >= pkt->dts) {
2194 av_log(ic, AV_LOG_WARNING,
2195 "Non-increasing DTS in stream %d: packet %d with DTS "
2196 "%"PRId64", packet %d with DTS %"PRId64"\n",
2197 st->index, st->info->fps_last_dts_idx,
2198 st->info->fps_last_dts, st->codec_info_nb_frames,
2200 st->info->fps_first_dts =
2201 st->info->fps_last_dts = AV_NOPTS_VALUE;
2203 /* Check for a discontinuity in dts. If the difference in dts
2204 * is more than 1000 times the average packet duration in the
2205 * sequence, we treat it as a discontinuity. */
2206 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2207 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2208 (pkt->dts - st->info->fps_last_dts) / 1000 >
2209 (st->info->fps_last_dts - st->info->fps_first_dts) /
2210 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2211 av_log(ic, AV_LOG_WARNING,
2212 "DTS discontinuity in stream %d: packet %d with DTS "
2213 "%"PRId64", packet %d with DTS %"PRId64"\n",
2214 st->index, st->info->fps_last_dts_idx,
2215 st->info->fps_last_dts, st->codec_info_nb_frames,
2217 st->info->fps_first_dts =
2218 st->info->fps_last_dts = AV_NOPTS_VALUE;
2221 /* update stored dts values */
2222 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2223 st->info->fps_first_dts = pkt->dts;
2224 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2226 st->info->fps_last_dts = pkt->dts;
2227 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2229 /* check max_analyze_duration */
2230 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2231 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2232 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2233 ic->max_analyze_duration);
2234 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2235 av_packet_unref(pkt);
2239 if (st->parser && st->parser->parser->split && !st->codec->extradata) {
2240 int i = st->parser->parser->split(st->codec, pkt->data, pkt->size);
2241 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2242 st->codec->extradata_size = i;
2243 st->codec->extradata = av_mallocz(st->codec->extradata_size +
2244 FF_INPUT_BUFFER_PADDING_SIZE);
2245 if (!st->codec->extradata)
2246 return AVERROR(ENOMEM);
2247 memcpy(st->codec->extradata, pkt->data,
2248 st->codec->extradata_size);
2252 /* If still no information, we try to open the codec and to
2253 * decompress the frame. We try to avoid that in most cases as
2254 * it takes longer and uses more memory. For MPEG-4, we need to
2255 * decompress for QuickTime.
2257 * If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2258 * least one frame of codec data, this makes sure the codec initializes
2259 * the channel configuration and does not only trust the values from
2261 try_decode_frame(st, pkt,
2262 (options && i < orig_nb_streams) ? &options[i] : NULL);
2264 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2265 av_packet_unref(pkt);
2267 st->codec_info_nb_frames++;
2271 // close codecs which were opened in try_decode_frame()
2272 for (i = 0; i < ic->nb_streams; i++) {
2273 st = ic->streams[i];
2274 avcodec_close(st->codec);
2276 for (i = 0; i < ic->nb_streams; i++) {
2277 st = ic->streams[i];
2278 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2279 /* estimate average framerate if not set by demuxer */
2280 if (!st->avg_frame_rate.num &&
2281 st->info->fps_last_dts != st->info->fps_first_dts) {
2282 int64_t delta_dts = st->info->fps_last_dts -
2283 st->info->fps_first_dts;
2284 int delta_packets = st->info->fps_last_dts_idx -
2285 st->info->fps_first_dts_idx;
2287 double best_error = 0.01;
2289 if (delta_dts >= INT64_MAX / st->time_base.num ||
2290 delta_packets >= INT64_MAX / st->time_base.den ||
2293 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2294 delta_packets * (int64_t) st->time_base.den,
2295 delta_dts * (int64_t) st->time_base.num, 60000);
2297 /* Round guessed framerate to a "standard" framerate if it's
2298 * within 1% of the original estimate. */
2299 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2300 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2301 double error = fabs(av_q2d(st->avg_frame_rate) /
2302 av_q2d(std_fps) - 1);
2304 if (error < best_error) {
2306 best_fps = std_fps.num;
2310 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2311 best_fps, 12 * 1001, INT_MAX);
2313 } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2314 if (!st->codec->bits_per_coded_sample)
2315 st->codec->bits_per_coded_sample =
2316 av_get_bits_per_sample(st->codec->codec_id);
2317 // set stream disposition based on audio service type
2318 switch (st->codec->audio_service_type) {
2319 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2320 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2322 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2323 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2325 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2326 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2328 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2329 st->disposition = AV_DISPOSITION_COMMENT;
2331 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2332 st->disposition = AV_DISPOSITION_KARAOKE;
2338 estimate_timings(ic, old_offset);
2340 compute_chapters_end(ic);
2342 find_stream_info_err:
2343 for (i = 0; i < ic->nb_streams; i++) {
2344 ic->streams[i]->codec->thread_count = 0;
2345 av_freep(&ic->streams[i]->info);
2350 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2354 for (i = 0; i < ic->nb_programs; i++)
2355 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2356 if (ic->programs[i]->stream_index[j] == s)
2357 return ic->programs[i];
2361 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2362 int wanted_stream_nb, int related_stream,
2363 AVCodec **decoder_ret, int flags)
2365 int i, nb_streams = ic->nb_streams;
2366 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2367 unsigned *program = NULL;
2368 AVCodec *decoder = NULL, *best_decoder = NULL;
2370 if (related_stream >= 0 && wanted_stream_nb < 0) {
2371 AVProgram *p = find_program_from_stream(ic, related_stream);
2373 program = p->stream_index;
2374 nb_streams = p->nb_stream_indexes;
2377 for (i = 0; i < nb_streams; i++) {
2378 int real_stream_index = program ? program[i] : i;
2379 AVStream *st = ic->streams[real_stream_index];
2380 AVCodecContext *avctx = st->codec;
2381 if (avctx->codec_type != type)
2383 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2385 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2386 AV_DISPOSITION_VISUAL_IMPAIRED))
2389 decoder = avcodec_find_decoder(st->codec->codec_id);
2392 ret = AVERROR_DECODER_NOT_FOUND;
2396 if (best_count >= st->codec_info_nb_frames)
2398 best_count = st->codec_info_nb_frames;
2399 ret = real_stream_index;
2400 best_decoder = decoder;
2401 if (program && i == nb_streams - 1 && ret < 0) {
2403 nb_streams = ic->nb_streams;
2404 /* no related stream found, try again with everything */
2409 *decoder_ret = best_decoder;
2413 /*******************************************************/
2415 int av_read_play(AVFormatContext *s)
2417 if (s->iformat->read_play)
2418 return s->iformat->read_play(s);
2420 return avio_pause(s->pb, 0);
2421 return AVERROR(ENOSYS);
2424 int av_read_pause(AVFormatContext *s)
2426 if (s->iformat->read_pause)
2427 return s->iformat->read_pause(s);
2429 return avio_pause(s->pb, 1);
2430 return AVERROR(ENOSYS);
2433 void avformat_free_context(AVFormatContext *s)
2442 if (s->iformat && s->iformat->priv_class && s->priv_data)
2443 av_opt_free(s->priv_data);
2445 for (i = 0; i < s->nb_streams; i++) {
2446 /* free all data in a stream component */
2449 for (j = 0; j < st->nb_side_data; j++)
2450 av_freep(&st->side_data[j].data);
2451 av_freep(&st->side_data);
2452 st->nb_side_data = 0;
2455 av_parser_close(st->parser);
2457 if (st->attached_pic.data)
2458 av_free_packet(&st->attached_pic);
2459 av_dict_free(&st->metadata);
2460 av_freep(&st->probe_data.buf);
2461 av_free(st->index_entries);
2462 av_free(st->codec->extradata);
2463 av_free(st->codec->subtitle_header);
2465 av_free(st->priv_data);
2469 for (i = s->nb_programs - 1; i >= 0; i--) {
2470 av_dict_free(&s->programs[i]->metadata);
2471 av_freep(&s->programs[i]->stream_index);
2472 av_freep(&s->programs[i]);
2474 av_freep(&s->programs);
2475 av_freep(&s->priv_data);
2476 while (s->nb_chapters--) {
2477 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2478 av_free(s->chapters[s->nb_chapters]);
2480 av_freep(&s->chapters);
2481 av_dict_free(&s->metadata);
2482 av_freep(&s->streams);
2483 av_freep(&s->internal);
2487 void avformat_close_input(AVFormatContext **ps)
2489 AVFormatContext *s = *ps;
2490 AVIOContext *pb = s->pb;
2492 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2493 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2496 flush_packet_queue(s);
2499 if (s->iformat->read_close)
2500 s->iformat->read_close(s);
2502 avformat_free_context(s);
2509 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2514 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2515 sizeof(*s->streams)) < 0) {
2520 st = av_mallocz(sizeof(AVStream));
2523 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2528 st->codec = avcodec_alloc_context3(c);
2535 /* no default bitrate if decoding */
2536 st->codec->bit_rate = 0;
2538 /* default pts setting is MPEG-like */
2539 avpriv_set_pts_info(st, 33, 1, 90000);
2542 st->index = s->nb_streams;
2543 st->start_time = AV_NOPTS_VALUE;
2544 st->duration = AV_NOPTS_VALUE;
2545 /* we set the current DTS to 0 so that formats without any timestamps
2546 * but durations get some timestamps, formats with some unknown
2547 * timestamps have their first few packets buffered and the
2548 * timestamps corrected before they are returned to the user */
2550 st->first_dts = AV_NOPTS_VALUE;
2551 st->probe_packets = MAX_PROBE_PACKETS;
2553 st->last_IP_pts = AV_NOPTS_VALUE;
2554 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2555 st->pts_buffer[i] = AV_NOPTS_VALUE;
2557 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2559 st->info->fps_first_dts = AV_NOPTS_VALUE;
2560 st->info->fps_last_dts = AV_NOPTS_VALUE;
2562 s->streams[s->nb_streams++] = st;
2566 AVProgram *av_new_program(AVFormatContext *ac, int id)
2568 AVProgram *program = NULL;
2571 av_dlog(ac, "new_program: id=0x%04x\n", id);
2573 for (i = 0; i < ac->nb_programs; i++)
2574 if (ac->programs[i]->id == id)
2575 program = ac->programs[i];
2578 program = av_mallocz(sizeof(AVProgram));
2581 dynarray_add(&ac->programs, &ac->nb_programs, program);
2582 program->discard = AVDISCARD_NONE;
2589 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2590 int64_t start, int64_t end, const char *title)
2592 AVChapter *chapter = NULL;
2595 for (i = 0; i < s->nb_chapters; i++)
2596 if (s->chapters[i]->id == id)
2597 chapter = s->chapters[i];
2600 chapter = av_mallocz(sizeof(AVChapter));
2603 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2605 av_dict_set(&chapter->metadata, "title", title, 0);
2607 chapter->time_base = time_base;
2608 chapter->start = start;
2614 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2617 AVProgram *program = NULL;
2619 if (idx >= ac->nb_streams) {
2620 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2624 for (i = 0; i < ac->nb_programs; i++) {
2625 if (ac->programs[i]->id != progid)
2627 program = ac->programs[i];
2628 for (j = 0; j < program->nb_stream_indexes; j++)
2629 if (program->stream_index[j] == idx)
2632 if (av_reallocp_array(&program->stream_index,
2633 program->nb_stream_indexes + 1,
2634 sizeof(*program->stream_index)) < 0) {
2635 program->nb_stream_indexes = 0;
2638 program->stream_index[program->nb_stream_indexes++] = idx;
2643 uint64_t ff_ntp_time(void)
2645 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2648 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2651 char *q, buf1[20], c;
2652 int nd, len, percentd_found;
2664 while (av_isdigit(*p))
2665 nd = nd * 10 + *p++ - '0';
2667 } while (av_isdigit(c));
2676 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2678 if ((q - buf + len) > buf_size - 1)
2680 memcpy(q, buf1, len);
2688 if ((q - buf) < buf_size - 1)
2692 if (!percentd_found)
2701 void av_url_split(char *proto, int proto_size,
2702 char *authorization, int authorization_size,
2703 char *hostname, int hostname_size,
2704 int *port_ptr, char *path, int path_size, const char *url)
2706 const char *p, *ls, *at, *col, *brk;
2712 if (authorization_size > 0)
2713 authorization[0] = 0;
2714 if (hostname_size > 0)
2719 /* parse protocol */
2720 if ((p = strchr(url, ':'))) {
2721 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2728 /* no protocol means plain filename */
2729 av_strlcpy(path, url, path_size);
2733 /* separate path from hostname */
2734 ls = strchr(p, '/');
2736 ls = strchr(p, '?');
2738 av_strlcpy(path, ls, path_size);
2740 ls = &p[strlen(p)]; // XXX
2742 /* the rest is hostname, use that to parse auth/port */
2744 /* authorization (user[:pass]@hostname) */
2745 if ((at = strchr(p, '@')) && at < ls) {
2746 av_strlcpy(authorization, p,
2747 FFMIN(authorization_size, at + 1 - p));
2748 p = at + 1; /* skip '@' */
2751 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2753 av_strlcpy(hostname, p + 1,
2754 FFMIN(hostname_size, brk - p));
2755 if (brk[1] == ':' && port_ptr)
2756 *port_ptr = atoi(brk + 2);
2757 } else if ((col = strchr(p, ':')) && col < ls) {
2758 av_strlcpy(hostname, p,
2759 FFMIN(col + 1 - p, hostname_size));
2761 *port_ptr = atoi(col + 1);
2763 av_strlcpy(hostname, p,
2764 FFMIN(ls + 1 - p, hostname_size));
2768 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2771 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2774 'C', 'D', 'E', 'F' };
2775 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2778 'c', 'd', 'e', 'f' };
2779 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2781 for (i = 0; i < s; i++) {
2782 buff[i * 2] = hex_table[src[i] >> 4];
2783 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2789 int ff_hex_to_data(uint8_t *data, const char *p)
2796 p += strspn(p, SPACE_CHARS);
2799 c = av_toupper((unsigned char) *p++);
2800 if (c >= '0' && c <= '9')
2802 else if (c >= 'A' && c <= 'F')
2817 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2818 unsigned int pts_num, unsigned int pts_den)
2821 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2822 if (new_tb.num != pts_num)
2823 av_log(NULL, AV_LOG_DEBUG,
2824 "st:%d removing common factor %d from timebase\n",
2825 s->index, pts_num / new_tb.num);
2827 av_log(NULL, AV_LOG_WARNING,
2828 "st:%d has too large timebase, reducing\n", s->index);
2830 if (new_tb.num <= 0 || new_tb.den <= 0) {
2831 av_log(NULL, AV_LOG_ERROR,
2832 "Ignoring attempt to set invalid timebase for st:%d\n",
2836 s->time_base = new_tb;
2837 s->pts_wrap_bits = pts_wrap_bits;
2840 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
2843 const char *ptr = str;
2845 /* Parse key=value pairs. */
2848 char *dest = NULL, *dest_end;
2849 int key_len, dest_len = 0;
2851 /* Skip whitespace and potential commas. */
2852 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
2859 if (!(ptr = strchr(key, '=')))
2862 key_len = ptr - key;
2864 callback_get_buf(context, key, key_len, &dest, &dest_len);
2865 dest_end = dest + dest_len - 1;
2869 while (*ptr && *ptr != '\"') {
2873 if (dest && dest < dest_end)
2877 if (dest && dest < dest_end)
2885 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
2886 if (dest && dest < dest_end)
2894 int ff_find_stream_index(AVFormatContext *s, int id)
2897 for (i = 0; i < s->nb_streams; i++)
2898 if (s->streams[i]->id == id)
2903 int64_t ff_iso8601_to_unix_time(const char *datestr)
2906 struct tm time1 = { 0 }, time2 = { 0 };
2908 ret1 = strptime(datestr, "%Y - %m - %d %T", &time1);
2909 ret2 = strptime(datestr, "%Y - %m - %dT%T", &time2);
2911 return av_timegm(&time2);
2913 return av_timegm(&time1);
2915 av_log(NULL, AV_LOG_WARNING,
2916 "strptime() unavailable on this system, cannot convert "
2917 "the date string.\n");
2922 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
2926 if (ofmt->query_codec)
2927 return ofmt->query_codec(codec_id, std_compliance);
2928 else if (ofmt->codec_tag)
2929 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
2930 else if (codec_id == ofmt->video_codec ||
2931 codec_id == ofmt->audio_codec ||
2932 codec_id == ofmt->subtitle_codec)
2935 return AVERROR_PATCHWELCOME;
2938 int avformat_network_init(void)
2942 ff_network_inited_globally = 1;
2943 if ((ret = ff_network_init()) < 0)
2950 int avformat_network_deinit(void)
2959 int ff_add_param_change(AVPacket *pkt, int32_t channels,
2960 uint64_t channel_layout, int32_t sample_rate,
2961 int32_t width, int32_t height)
2967 return AVERROR(EINVAL);
2970 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
2972 if (channel_layout) {
2974 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
2978 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
2980 if (width || height) {
2982 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
2984 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
2986 return AVERROR(ENOMEM);
2987 bytestream_put_le32(&data, flags);
2989 bytestream_put_le32(&data, channels);
2991 bytestream_put_le64(&data, channel_layout);
2993 bytestream_put_le32(&data, sample_rate);
2994 if (width || height) {
2995 bytestream_put_le32(&data, width);
2996 bytestream_put_le32(&data, height);
3001 int ff_generate_avci_extradata(AVStream *st)
3003 static const uint8_t avci100_1080p_extradata[] = {
3005 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3006 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3007 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3008 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3009 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3010 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3011 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3012 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3013 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3015 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3018 static const uint8_t avci100_1080i_extradata[] = {
3020 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3021 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3022 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3023 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3024 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3025 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3026 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3027 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3028 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3029 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3030 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3032 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3035 static const uint8_t avci50_1080i_extradata[] = {
3037 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3038 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3039 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3040 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3041 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3042 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3043 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3044 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3045 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3046 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3047 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3049 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3052 static const uint8_t avci100_720p_extradata[] = {
3054 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3055 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3056 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3057 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3058 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3059 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3060 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3061 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3062 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3063 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3065 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3069 const uint8_t *data = NULL;
3072 if (st->codec->width == 1920) {
3073 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
3074 data = avci100_1080p_extradata;
3075 size = sizeof(avci100_1080p_extradata);
3077 data = avci100_1080i_extradata;
3078 size = sizeof(avci100_1080i_extradata);
3080 } else if (st->codec->width == 1440) {
3081 data = avci50_1080i_extradata;
3082 size = sizeof(avci50_1080i_extradata);
3083 } else if (st->codec->width == 1280) {
3084 data = avci100_720p_extradata;
3085 size = sizeof(avci100_720p_extradata);
3091 av_freep(&st->codec->extradata);
3092 st->codec->extradata_size = 0;
3093 st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
3094 if (!st->codec->extradata)
3095 return AVERROR(ENOMEM);
3097 memcpy(st->codec->extradata, data, size);
3098 st->codec->extradata_size = size;
3103 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3108 for (i = 0; i < st->nb_side_data; i++) {
3109 if (st->side_data[i].type == type) {
3111 *size = st->side_data[i].size;
3112 return st->side_data[i].data;
3118 uint8_t *ff_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type,
3121 AVPacketSideData *sd, *tmp;
3123 uint8_t *data = av_malloc(size);
3128 for (i = 0; i < st->nb_side_data; i++) {
3129 sd = &st->side_data[i];
3131 if (sd->type == type) {
3132 av_freep(&sd->data);
3139 tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
3145 st->side_data = tmp;
3148 sd = &st->side_data[st->nb_side_data - 1];