2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
42 #include "audiointerleave.h"
55 * various utility functions for use within Libav
58 unsigned avformat_version(void)
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return LIBAV_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
87 int prev_size = pkt->size;
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 read_size = FFMIN(size, chunk_size);
98 ret = av_grow_packet(pkt, read_size);
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
113 av_packet_unref(pkt);
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
122 pkt->pos = avio_tell(s);
124 return append_packet_chunked(s, pkt, size);
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
134 int av_filename_number_test(const char *filename)
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
144 static const struct {
147 enum AVMediaType type;
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codecpar->codec_id = fmt_id_type[i].id;
171 st->codecpar->codec_type = fmt_id_type[i].type;
172 #if FF_API_LAVF_AVCTX
173 FF_DISABLE_DEPRECATION_WARNINGS
174 st->codec->codec_type = st->codecpar->codec_type;
175 st->codec->codec_id = st->codecpar->codec_id;
176 FF_ENABLE_DEPRECATION_WARNINGS
185 /************************************************************/
186 /* input media file */
188 /* Open input file and probe the format if necessary. */
189 static int init_input(AVFormatContext *s, const char *filename,
190 AVDictionary **options)
193 AVProbeData pd = { filename, NULL, 0 };
196 s->flags |= AVFMT_FLAG_CUSTOM_IO;
198 return av_probe_input_buffer(s->pb, &s->iformat, filename,
200 else if (s->iformat->flags & AVFMT_NOFILE)
201 return AVERROR(EINVAL);
205 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
206 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
209 ret = s->io_open(s, &s->pb, filename, AVIO_FLAG_READ, options);
214 return av_probe_input_buffer(s->pb, &s->iformat, filename,
218 static int add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
219 AVPacketList **plast_pktl, int ref)
221 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
225 return AVERROR(ENOMEM);
228 if ((ret = av_packet_ref(&pktl->pkt, pkt)) < 0) {
237 (*plast_pktl)->next = pktl;
239 *packet_buffer = pktl;
241 /* Add the packet in the buffered packet list. */
246 static int queue_attached_pictures(AVFormatContext *s)
249 for (i = 0; i < s->nb_streams; i++)
250 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
251 s->streams[i]->discard < AVDISCARD_ALL) {
253 ret = add_to_pktbuf(&s->internal->raw_packet_buffer,
254 &s->streams[i]->attached_pic,
255 &s->internal->raw_packet_buffer_end, 1);
262 #if FF_API_LAVF_AVCTX
263 FF_DISABLE_DEPRECATION_WARNINGS
264 static int update_stream_avctx(AVFormatContext *s)
267 for (i = 0; i < s->nb_streams; i++) {
268 AVStream *st = s->streams[i];
270 if (!st->internal->need_codec_update)
273 ret = avcodec_parameters_to_context(st->codec, st->codecpar);
277 st->internal->need_codec_update = 0;
281 FF_ENABLE_DEPRECATION_WARNINGS
284 int avformat_open_input(AVFormatContext **ps, const char *filename,
285 AVInputFormat *fmt, AVDictionary **options)
287 AVFormatContext *s = *ps;
289 AVDictionary *tmp = NULL;
290 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
292 if (!s && !(s = avformat_alloc_context()))
293 return AVERROR(ENOMEM);
298 av_dict_copy(&tmp, *options, 0);
300 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
303 if ((ret = init_input(s, filename, &tmp)) < 0)
306 /* Check filename in case an image number is expected. */
307 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
308 if (!av_filename_number_test(filename)) {
309 ret = AVERROR(EINVAL);
314 s->duration = s->start_time = AV_NOPTS_VALUE;
315 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
317 /* Allocate private data. */
318 if (s->iformat->priv_data_size > 0) {
319 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
320 ret = AVERROR(ENOMEM);
323 if (s->iformat->priv_class) {
324 *(const AVClass **) s->priv_data = s->iformat->priv_class;
325 av_opt_set_defaults(s->priv_data);
326 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
331 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
333 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
335 if (s->iformat->read_header)
336 if ((ret = s->iformat->read_header(s)) < 0)
339 if (id3v2_extra_meta &&
340 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
342 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
344 if ((ret = queue_attached_pictures(s)) < 0)
347 if (s->pb && !s->internal->data_offset)
348 s->internal->data_offset = avio_tell(s->pb);
350 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
352 #if FF_API_LAVF_AVCTX
353 update_stream_avctx(s);
356 for (i = 0; i < s->nb_streams; i++)
357 s->streams[i]->internal->orig_codec_id = s->streams[i]->codecpar->codec_id;
360 av_dict_free(options);
367 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
369 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
371 avformat_free_context(s);
376 /*******************************************************/
378 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
380 if (st->codecpar->codec_id == AV_CODEC_ID_PROBE) {
381 AVProbeData *pd = &st->probe_data;
382 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
387 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
388 AVPROBE_PADDING_SIZE)) < 0)
390 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
391 pd->buf_size += pkt->size;
392 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
394 st->probe_packets = 0;
396 av_log(s, AV_LOG_ERROR,
397 "nothing to probe for stream %d\n", st->index);
402 if (!st->probe_packets ||
403 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
404 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
405 ? AVPROBE_SCORE_MAX / 4 : 0);
406 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE) {
409 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
416 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
422 AVPacketList *pktl = s->internal->raw_packet_buffer;
426 st = s->streams[pkt->stream_index];
427 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE ||
428 !st->probe_packets ||
429 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
431 if (st->probe_packets)
432 if ((err = probe_codec(s, st, NULL)) < 0)
434 pd = &st->probe_data;
437 s->internal->raw_packet_buffer = pktl->next;
438 s->internal->raw_packet_buffer_remaining_size += pkt->size;
447 ret = s->iformat->read_packet(s, pkt);
449 if (!pktl || ret == AVERROR(EAGAIN))
451 for (i = 0; i < s->nb_streams; i++) {
453 if (st->probe_packets)
454 if ((err = probe_codec(s, st, NULL)) < 0)
461 AVPacket tmp = { 0 };
462 ret = av_packet_ref(&tmp, pkt);
468 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
469 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
470 av_log(s, AV_LOG_WARNING,
471 "Dropped corrupted packet (stream = %d)\n",
473 av_packet_unref(pkt);
477 st = s->streams[pkt->stream_index];
479 switch (st->codecpar->codec_type) {
480 case AVMEDIA_TYPE_VIDEO:
481 if (s->video_codec_id)
482 st->codecpar->codec_id = s->video_codec_id;
484 case AVMEDIA_TYPE_AUDIO:
485 if (s->audio_codec_id)
486 st->codecpar->codec_id = s->audio_codec_id;
488 case AVMEDIA_TYPE_SUBTITLE:
489 if (s->subtitle_codec_id)
490 st->codecpar->codec_id = s->subtitle_codec_id;
494 if (!pktl && (st->codecpar->codec_id != AV_CODEC_ID_PROBE ||
498 err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
499 &s->internal->raw_packet_buffer_end, 0);
502 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
504 if ((err = probe_codec(s, st, pkt)) < 0)
509 /**********************************************************/
512 * Return the frame duration in seconds. Return 0 if not available.
514 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
515 AVCodecParserContext *pc, AVPacket *pkt)
517 AVRational codec_framerate = s->iformat ? st->internal->avctx->framerate :
518 (AVRational){ 0, 1 };
523 switch (st->codecpar->codec_type) {
524 case AVMEDIA_TYPE_VIDEO:
525 if (st->avg_frame_rate.num) {
526 *pnum = st->avg_frame_rate.den;
527 *pden = st->avg_frame_rate.num;
528 } else if (st->time_base.num * 1000LL > st->time_base.den) {
529 *pnum = st->time_base.num;
530 *pden = st->time_base.den;
531 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
532 *pnum = codec_framerate.den;
533 *pden = codec_framerate.num;
534 if (pc && pc->repeat_pict) {
535 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
536 *pden /= 1 + pc->repeat_pict;
538 *pnum *= 1 + pc->repeat_pict;
540 /* If this codec can be interlaced or progressive then we need
541 * a parser to compute duration of a packet. Thus if we have
542 * no parser in such case leave duration undefined. */
543 if (st->internal->avctx->ticks_per_frame > 1 && !pc)
547 case AVMEDIA_TYPE_AUDIO:
548 frame_size = av_get_audio_frame_duration2(st->codecpar, pkt->size);
549 if (frame_size <= 0 || st->codecpar->sample_rate <= 0)
552 *pden = st->codecpar->sample_rate;
559 static int is_intra_only(enum AVCodecID id)
561 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
564 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
569 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
570 int64_t dts, int64_t pts)
572 AVStream *st = s->streams[stream_index];
573 AVPacketList *pktl = s->internal->packet_buffer;
575 if (st->first_dts != AV_NOPTS_VALUE ||
576 dts == AV_NOPTS_VALUE ||
577 st->cur_dts == AV_NOPTS_VALUE)
580 st->first_dts = dts - st->cur_dts;
583 for (; pktl; pktl = pktl->next) {
584 if (pktl->pkt.stream_index != stream_index)
586 // FIXME: think more about this check
587 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
588 pktl->pkt.pts += st->first_dts;
590 if (pktl->pkt.dts != AV_NOPTS_VALUE)
591 pktl->pkt.dts += st->first_dts;
593 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
594 st->start_time = pktl->pkt.pts;
596 if (st->start_time == AV_NOPTS_VALUE)
597 st->start_time = pts;
600 static void update_initial_durations(AVFormatContext *s, AVStream *st,
601 int stream_index, int duration)
603 AVPacketList *pktl = s->internal->packet_buffer;
606 if (st->first_dts != AV_NOPTS_VALUE) {
607 cur_dts = st->first_dts;
608 for (; pktl; pktl = pktl->next) {
609 if (pktl->pkt.stream_index == stream_index) {
610 if (pktl->pkt.pts != pktl->pkt.dts ||
611 pktl->pkt.dts != AV_NOPTS_VALUE ||
617 pktl = s->internal->packet_buffer;
618 st->first_dts = cur_dts;
619 } else if (st->cur_dts)
622 for (; pktl; pktl = pktl->next) {
623 if (pktl->pkt.stream_index != stream_index)
625 if (pktl->pkt.pts == pktl->pkt.dts &&
626 pktl->pkt.dts == AV_NOPTS_VALUE &&
627 !pktl->pkt.duration) {
628 pktl->pkt.dts = cur_dts;
629 if (!st->internal->avctx->has_b_frames)
630 pktl->pkt.pts = cur_dts;
632 if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
633 pktl->pkt.duration = duration;
637 if (st->first_dts == AV_NOPTS_VALUE)
638 st->cur_dts = cur_dts;
641 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
642 AVCodecParserContext *pc, AVPacket *pkt)
644 int num, den, presentation_delayed, delay, i;
647 if (s->flags & AVFMT_FLAG_NOFILLIN)
650 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
651 pkt->dts = AV_NOPTS_VALUE;
653 /* do we have a video B-frame ? */
654 delay = st->internal->avctx->has_b_frames;
655 presentation_delayed = 0;
657 /* XXX: need has_b_frame, but cannot get it if the codec is
660 pc && pc->pict_type != AV_PICTURE_TYPE_B)
661 presentation_delayed = 1;
663 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
664 st->pts_wrap_bits < 63 &&
665 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
666 pkt->dts -= 1LL << st->pts_wrap_bits;
669 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
670 * We take the conservative approach and discard both.
671 * Note: If this is misbehaving for an H.264 file, then possibly
672 * presentation_delayed is not set correctly. */
673 if (delay == 1 && pkt->dts == pkt->pts &&
674 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
675 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
676 pkt->dts = AV_NOPTS_VALUE;
679 if (pkt->duration == 0 && st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
680 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
682 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
683 den * (int64_t) st->time_base.num,
686 if (pkt->duration != 0 && s->internal->packet_buffer)
687 update_initial_durations(s, st, pkt->stream_index,
692 /* Correct timestamps with byte offset if demuxers only have timestamps
693 * on packet boundaries */
694 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
695 /* this will estimate bitrate based on this frame's duration and size */
696 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
697 if (pkt->pts != AV_NOPTS_VALUE)
699 if (pkt->dts != AV_NOPTS_VALUE)
703 /* This may be redundant, but it should not hurt. */
704 if (pkt->dts != AV_NOPTS_VALUE &&
705 pkt->pts != AV_NOPTS_VALUE &&
707 presentation_delayed = 1;
709 av_log(NULL, AV_LOG_TRACE,
710 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
711 "cur_dts:%"PRId64" st:%d pc:%p\n",
712 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
713 pkt->stream_index, pc);
714 /* Interpolate PTS and DTS if they are not present. We skip H.264
715 * currently because delay and has_b_frames are not reliably set. */
716 if ((delay == 0 || (delay == 1 && pc)) &&
717 st->codecpar->codec_id != AV_CODEC_ID_H264) {
718 if (presentation_delayed) {
719 /* DTS = decompression timestamp */
720 /* PTS = presentation timestamp */
721 if (pkt->dts == AV_NOPTS_VALUE)
722 pkt->dts = st->last_IP_pts;
723 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
724 if (pkt->dts == AV_NOPTS_VALUE)
725 pkt->dts = st->cur_dts;
727 /* This is tricky: the dts must be incremented by the duration
728 * of the frame we are displaying, i.e. the last I- or P-frame. */
729 if (st->last_IP_duration == 0)
730 st->last_IP_duration = pkt->duration;
731 if (pkt->dts != AV_NOPTS_VALUE)
732 st->cur_dts = pkt->dts + st->last_IP_duration;
733 st->last_IP_duration = pkt->duration;
734 st->last_IP_pts = pkt->pts;
735 /* Cannot compute PTS if not present (we can compute it only
736 * by knowing the future. */
737 } else if (pkt->pts != AV_NOPTS_VALUE ||
738 pkt->dts != AV_NOPTS_VALUE ||
740 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
741 int duration = pkt->duration;
742 if (!duration && st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
743 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
745 duration = av_rescale_rnd(1,
746 num * (int64_t) st->time_base.den,
747 den * (int64_t) st->time_base.num,
749 if (duration != 0 && s->internal->packet_buffer)
750 update_initial_durations(s, st, pkt->stream_index,
755 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
757 /* presentation is not delayed : PTS and DTS are the same */
758 if (pkt->pts == AV_NOPTS_VALUE)
760 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
762 if (pkt->pts == AV_NOPTS_VALUE)
763 pkt->pts = st->cur_dts;
765 if (pkt->pts != AV_NOPTS_VALUE)
766 st->cur_dts = pkt->pts + duration;
771 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
772 st->pts_buffer[0] = pkt->pts;
773 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
774 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
775 if (pkt->dts == AV_NOPTS_VALUE)
776 pkt->dts = st->pts_buffer[0];
777 // We skipped it above so we try here.
778 if (st->codecpar->codec_id == AV_CODEC_ID_H264)
779 // This should happen on the first packet
780 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
781 if (pkt->dts > st->cur_dts)
782 st->cur_dts = pkt->dts;
785 av_log(NULL, AV_LOG_TRACE,
786 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
787 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
790 if (is_intra_only(st->codecpar->codec_id))
791 pkt->flags |= AV_PKT_FLAG_KEY;
792 #if FF_API_CONVERGENCE_DURATION
793 FF_DISABLE_DEPRECATION_WARNINGS
795 pkt->convergence_duration = pc->convergence_duration;
796 FF_ENABLE_DEPRECATION_WARNINGS
800 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
803 AVPacketList *pktl = *pkt_buf;
804 *pkt_buf = pktl->next;
805 av_packet_unref(&pktl->pkt);
812 * Parse a packet, add all split parts to parse_queue.
814 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
816 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
818 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
819 AVStream *st = s->streams[stream_index];
820 uint8_t *data = pkt ? pkt->data : NULL;
821 int size = pkt ? pkt->size : 0;
822 int ret = 0, got_output = 0;
825 av_init_packet(&flush_pkt);
830 while (size > 0 || (pkt == &flush_pkt && got_output)) {
833 av_init_packet(&out_pkt);
834 len = av_parser_parse2(st->parser, st->internal->avctx,
835 &out_pkt.data, &out_pkt.size, data, size,
836 pkt->pts, pkt->dts, pkt->pos);
838 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
839 /* increment read pointer */
843 got_output = !!out_pkt.size;
848 if (pkt->side_data) {
849 out_pkt.side_data = pkt->side_data;
850 out_pkt.side_data_elems = pkt->side_data_elems;
851 pkt->side_data = NULL;
852 pkt->side_data_elems = 0;
855 /* set the duration */
856 out_pkt.duration = 0;
857 if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
858 if (st->internal->avctx->sample_rate > 0) {
860 av_rescale_q_rnd(st->parser->duration,
861 (AVRational) { 1, st->internal->avctx->sample_rate },
867 out_pkt.stream_index = st->index;
868 out_pkt.pts = st->parser->pts;
869 out_pkt.dts = st->parser->dts;
870 out_pkt.pos = st->parser->pos;
872 if (st->parser->key_frame == 1 ||
873 (st->parser->key_frame == -1 &&
874 st->parser->pict_type == AV_PICTURE_TYPE_I))
875 out_pkt.flags |= AV_PKT_FLAG_KEY;
877 compute_pkt_fields(s, st, st->parser, &out_pkt);
879 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
880 out_pkt.flags & AV_PKT_FLAG_KEY) {
881 ff_reduce_index(s, st->index);
882 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
883 0, 0, AVINDEX_KEYFRAME);
886 if ((ret = add_to_pktbuf(&s->internal->parse_queue, &out_pkt,
887 &s->internal->parse_queue_end,
889 av_packet_unref(&out_pkt);
894 /* end of the stream => close and free the parser */
895 if (pkt == &flush_pkt) {
896 av_parser_close(st->parser);
901 av_packet_unref(pkt);
905 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
906 AVPacketList **pkt_buffer_end,
910 av_assert0(*pkt_buffer);
913 *pkt_buffer = pktl->next;
915 *pkt_buffer_end = NULL;
920 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
922 int ret = 0, i, got_packet = 0;
923 AVDictionary *metadata = NULL;
927 while (!got_packet && !s->internal->parse_queue) {
931 /* read next packet */
932 ret = ff_read_packet(s, &cur_pkt);
934 if (ret == AVERROR(EAGAIN))
936 /* flush the parsers */
937 for (i = 0; i < s->nb_streams; i++) {
939 if (st->parser && st->need_parsing)
940 parse_packet(s, NULL, st->index);
942 /* all remaining packets are now in parse_queue =>
943 * really terminate parsing */
947 st = s->streams[cur_pkt.stream_index];
949 if (cur_pkt.pts != AV_NOPTS_VALUE &&
950 cur_pkt.dts != AV_NOPTS_VALUE &&
951 cur_pkt.pts < cur_pkt.dts) {
952 av_log(s, AV_LOG_WARNING,
953 "Invalid timestamps stream=%d, pts=%"PRId64", "
954 "dts=%"PRId64", size=%d\n",
955 cur_pkt.stream_index, cur_pkt.pts,
956 cur_pkt.dts, cur_pkt.size);
958 if (s->debug & FF_FDEBUG_TS)
959 av_log(s, AV_LOG_DEBUG,
960 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
961 "size=%d, duration=%"PRId64", flags=%d\n",
962 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
963 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
965 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
966 st->parser = av_parser_init(st->codecpar->codec_id);
968 /* no parser available: just output the raw packets */
969 st->need_parsing = AVSTREAM_PARSE_NONE;
970 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
971 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
972 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
973 st->parser->flags |= PARSER_FLAG_ONCE;
976 if (!st->need_parsing || !st->parser) {
977 /* no parsing needed: we just output the packet as is */
979 compute_pkt_fields(s, st, NULL, pkt);
980 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
981 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
982 ff_reduce_index(s, st->index);
983 av_add_index_entry(st, pkt->pos, pkt->dts,
984 0, 0, AVINDEX_KEYFRAME);
987 } else if (st->discard < AVDISCARD_ALL) {
988 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
992 av_packet_unref(&cur_pkt);
996 if (!got_packet && s->internal->parse_queue)
997 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
999 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
1001 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
1002 av_dict_copy(&s->metadata, metadata, 0);
1003 av_dict_free(&metadata);
1004 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
1007 #if FF_API_LAVF_AVCTX
1008 update_stream_avctx(s);
1011 if (s->debug & FF_FDEBUG_TS)
1012 av_log(s, AV_LOG_DEBUG,
1013 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
1014 "size=%d, duration=%"PRId64", flags=%d\n",
1015 pkt->stream_index, pkt->pts, pkt->dts,
1016 pkt->size, pkt->duration, pkt->flags);
1021 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1023 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1027 return s->internal->packet_buffer
1028 ? read_from_packet_buffer(&s->internal->packet_buffer,
1029 &s->internal->packet_buffer_end, pkt)
1030 : read_frame_internal(s, pkt);
1034 AVPacketList *pktl = s->internal->packet_buffer;
1037 AVPacket *next_pkt = &pktl->pkt;
1039 if (next_pkt->dts != AV_NOPTS_VALUE) {
1040 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1041 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1042 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1043 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1044 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1046 next_pkt->pts = pktl->pkt.dts;
1050 pktl = s->internal->packet_buffer;
1053 /* read packet from packet buffer, if there is data */
1054 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1055 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1056 return read_from_packet_buffer(&s->internal->packet_buffer,
1057 &s->internal->packet_buffer_end, pkt);
1060 ret = read_frame_internal(s, pkt);
1062 if (pktl && ret != AVERROR(EAGAIN)) {
1069 ret = add_to_pktbuf(&s->internal->packet_buffer, pkt,
1070 &s->internal->packet_buffer_end, 1);
1076 /* XXX: suppress the packet queue */
1077 static void flush_packet_queue(AVFormatContext *s)
1079 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1080 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1081 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1083 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1086 /*******************************************************/
1089 int av_find_default_stream_index(AVFormatContext *s)
1091 int first_audio_index = -1;
1095 if (s->nb_streams <= 0)
1097 for (i = 0; i < s->nb_streams; i++) {
1099 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1100 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1103 if (first_audio_index < 0 &&
1104 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
1105 first_audio_index = i;
1107 return first_audio_index >= 0 ? first_audio_index : 0;
1110 /** Flush the frame reader. */
1111 void ff_read_frame_flush(AVFormatContext *s)
1116 flush_packet_queue(s);
1118 /* Reset read state for each stream. */
1119 for (i = 0; i < s->nb_streams; i++) {
1123 av_parser_close(st->parser);
1126 st->last_IP_pts = AV_NOPTS_VALUE;
1127 /* We set the current DTS to an unspecified origin. */
1128 st->cur_dts = AV_NOPTS_VALUE;
1130 st->probe_packets = MAX_PROBE_PACKETS;
1132 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1133 st->pts_buffer[j] = AV_NOPTS_VALUE;
1137 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1141 for (i = 0; i < s->nb_streams; i++) {
1142 AVStream *st = s->streams[i];
1145 av_rescale(timestamp,
1146 st->time_base.den * (int64_t) ref_st->time_base.num,
1147 st->time_base.num * (int64_t) ref_st->time_base.den);
1151 void ff_reduce_index(AVFormatContext *s, int stream_index)
1153 AVStream *st = s->streams[stream_index];
1154 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1156 if ((unsigned) st->nb_index_entries >= max_entries) {
1158 for (i = 0; 2 * i < st->nb_index_entries; i++)
1159 st->index_entries[i] = st->index_entries[2 * i];
1160 st->nb_index_entries = i;
1164 int ff_add_index_entry(AVIndexEntry **index_entries,
1165 int *nb_index_entries,
1166 unsigned int *index_entries_allocated_size,
1167 int64_t pos, int64_t timestamp,
1168 int size, int distance, int flags)
1170 AVIndexEntry *entries, *ie;
1173 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1176 entries = av_fast_realloc(*index_entries,
1177 index_entries_allocated_size,
1178 (*nb_index_entries + 1) *
1179 sizeof(AVIndexEntry));
1183 *index_entries = entries;
1185 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1186 timestamp, AVSEEK_FLAG_ANY);
1189 index = (*nb_index_entries)++;
1190 ie = &entries[index];
1191 assert(index == 0 || ie[-1].timestamp < timestamp);
1193 ie = &entries[index];
1194 if (ie->timestamp != timestamp) {
1195 if (ie->timestamp <= timestamp)
1197 memmove(entries + index + 1, entries + index,
1198 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1199 (*nb_index_entries)++;
1200 } else if (ie->pos == pos && distance < ie->min_distance)
1201 // do not reduce the distance
1202 distance = ie->min_distance;
1206 ie->timestamp = timestamp;
1207 ie->min_distance = distance;
1214 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1215 int size, int distance, int flags)
1217 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1218 &st->index_entries_allocated_size, pos,
1219 timestamp, size, distance, flags);
1222 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1223 int64_t wanted_timestamp, int flags)
1231 // Optimize appending index entries at the end.
1232 if (b && entries[b - 1].timestamp < wanted_timestamp)
1237 timestamp = entries[m].timestamp;
1238 if (timestamp >= wanted_timestamp)
1240 if (timestamp <= wanted_timestamp)
1243 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1245 if (!(flags & AVSEEK_FLAG_ANY))
1246 while (m >= 0 && m < nb_entries &&
1247 !(entries[m].flags & AVINDEX_KEYFRAME))
1248 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1250 if (m == nb_entries)
1255 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1257 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1258 wanted_timestamp, flags);
1261 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1262 int64_t target_ts, int flags)
1264 AVInputFormat *avif = s->iformat;
1265 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1266 int64_t ts_min, ts_max, ts;
1271 if (stream_index < 0)
1274 av_log(s, AV_LOG_TRACE, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1277 ts_min = AV_NOPTS_VALUE;
1278 pos_limit = -1; // GCC falsely says it may be uninitialized.
1280 st = s->streams[stream_index];
1281 if (st->index_entries) {
1284 /* FIXME: Whole function must be checked for non-keyframe entries in
1285 * index case, especially read_timestamp(). */
1286 index = av_index_search_timestamp(st, target_ts,
1287 flags | AVSEEK_FLAG_BACKWARD);
1288 index = FFMAX(index, 0);
1289 e = &st->index_entries[index];
1291 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1293 ts_min = e->timestamp;
1294 av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1300 index = av_index_search_timestamp(st, target_ts,
1301 flags & ~AVSEEK_FLAG_BACKWARD);
1302 assert(index < st->nb_index_entries);
1304 e = &st->index_entries[index];
1305 assert(e->timestamp >= target_ts);
1307 ts_max = e->timestamp;
1308 pos_limit = pos_max - e->min_distance;
1309 av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1310 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1314 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1315 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1320 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1323 ff_update_cur_dts(s, st, ts);
1328 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1329 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1330 int64_t ts_min, int64_t ts_max,
1331 int flags, int64_t *ts_ret,
1332 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1333 int64_t *, int64_t))
1336 int64_t start_pos, filesize;
1339 av_log(s, AV_LOG_TRACE, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1341 if (ts_min == AV_NOPTS_VALUE) {
1342 pos_min = s->internal->data_offset;
1343 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1344 if (ts_min == AV_NOPTS_VALUE)
1348 if (ts_max == AV_NOPTS_VALUE) {
1350 filesize = avio_size(s->pb);
1351 pos_max = filesize - 1;
1354 ts_max = read_timestamp(s, stream_index, &pos_max,
1357 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1358 if (ts_max == AV_NOPTS_VALUE)
1362 int64_t tmp_pos = pos_max + 1;
1363 int64_t tmp_ts = read_timestamp(s, stream_index,
1364 &tmp_pos, INT64_MAX);
1365 if (tmp_ts == AV_NOPTS_VALUE)
1369 if (tmp_pos >= filesize)
1372 pos_limit = pos_max;
1375 if (ts_min > ts_max)
1377 else if (ts_min == ts_max)
1378 pos_limit = pos_min;
1381 while (pos_min < pos_limit) {
1382 av_log(s, AV_LOG_TRACE, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1383 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1384 assert(pos_limit <= pos_max);
1386 if (no_change == 0) {
1387 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1388 // interpolate position (better than dichotomy)
1389 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1391 pos_min - approximate_keyframe_distance;
1392 } else if (no_change == 1) {
1393 // bisection if interpolation did not change min / max pos last time
1394 pos = (pos_min + pos_limit) >> 1;
1396 /* linear search if bisection failed, can only happen if there
1397 * are very few or no keyframes between min/max */
1402 else if (pos > pos_limit)
1406 // May pass pos_limit instead of -1.
1407 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1412 av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1413 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1414 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1415 pos_limit, start_pos, no_change);
1416 if (ts == AV_NOPTS_VALUE) {
1417 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1420 assert(ts != AV_NOPTS_VALUE);
1421 if (target_ts <= ts) {
1422 pos_limit = start_pos - 1;
1426 if (target_ts >= ts) {
1432 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1433 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1435 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1437 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1438 av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1439 pos, ts_min, target_ts, ts_max);
1444 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1445 int64_t pos, int flags)
1447 int64_t pos_min, pos_max;
1449 pos_min = s->internal->data_offset;
1450 pos_max = avio_size(s->pb) - 1;
1454 else if (pos > pos_max)
1457 avio_seek(s->pb, pos, SEEK_SET);
1462 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1463 int64_t timestamp, int flags)
1470 st = s->streams[stream_index];
1472 index = av_index_search_timestamp(st, timestamp, flags);
1474 if (index < 0 && st->nb_index_entries &&
1475 timestamp < st->index_entries[0].timestamp)
1478 if (index < 0 || index == st->nb_index_entries - 1) {
1481 if (st->nb_index_entries) {
1482 assert(st->index_entries);
1483 ie = &st->index_entries[st->nb_index_entries - 1];
1484 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1486 ff_update_cur_dts(s, st, ie->timestamp);
1488 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1494 read_status = av_read_frame(s, &pkt);
1495 } while (read_status == AVERROR(EAGAIN));
1496 if (read_status < 0)
1498 av_packet_unref(&pkt);
1499 if (stream_index == pkt.stream_index)
1500 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1503 index = av_index_search_timestamp(st, timestamp, flags);
1508 ff_read_frame_flush(s);
1509 if (s->iformat->read_seek)
1510 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1512 ie = &st->index_entries[index];
1513 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1515 ff_update_cur_dts(s, st, ie->timestamp);
1520 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1521 int64_t timestamp, int flags)
1526 if (flags & AVSEEK_FLAG_BYTE) {
1527 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1529 ff_read_frame_flush(s);
1530 return seek_frame_byte(s, stream_index, timestamp, flags);
1533 if (stream_index < 0) {
1534 stream_index = av_find_default_stream_index(s);
1535 if (stream_index < 0)
1538 st = s->streams[stream_index];
1539 /* timestamp for default must be expressed in AV_TIME_BASE units */
1540 timestamp = av_rescale(timestamp, st->time_base.den,
1541 AV_TIME_BASE * (int64_t) st->time_base.num);
1544 /* first, we try the format specific seek */
1545 if (s->iformat->read_seek) {
1546 ff_read_frame_flush(s);
1547 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1553 if (s->iformat->read_timestamp &&
1554 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1555 ff_read_frame_flush(s);
1556 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1557 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1558 ff_read_frame_flush(s);
1559 return seek_frame_generic(s, stream_index, timestamp, flags);
1564 int av_seek_frame(AVFormatContext *s, int stream_index,
1565 int64_t timestamp, int flags)
1567 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1570 ret = queue_attached_pictures(s);
1575 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1576 int64_t ts, int64_t max_ts, int flags)
1578 if (min_ts > ts || max_ts < ts)
1581 if (s->iformat->read_seek2) {
1583 ff_read_frame_flush(s);
1584 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1588 ret = queue_attached_pictures(s);
1592 if (s->iformat->read_timestamp) {
1593 // try to seek via read_timestamp()
1596 // Fall back on old API if new is not implemented but old is.
1597 // Note the old API has somewhat different semantics.
1598 if (s->iformat->read_seek || 1)
1599 return av_seek_frame(s, stream_index, ts,
1600 flags | ((uint64_t) ts - min_ts >
1601 (uint64_t) max_ts - ts
1602 ? AVSEEK_FLAG_BACKWARD : 0));
1604 // try some generic seek like seek_frame_generic() but with new ts semantics
1607 /*******************************************************/
1610 * Return TRUE if the stream has accurate duration in any stream.
1612 * @return TRUE if the stream has accurate duration for at least one component.
1614 static int has_duration(AVFormatContext *ic)
1619 for (i = 0; i < ic->nb_streams; i++) {
1620 st = ic->streams[i];
1621 if (st->duration != AV_NOPTS_VALUE)
1624 if (ic->duration != AV_NOPTS_VALUE)
1630 * Estimate the stream timings from the one of each components.
1632 * Also computes the global bitrate if possible.
1634 static void update_stream_timings(AVFormatContext *ic)
1636 int64_t start_time, start_time1, end_time, end_time1;
1637 int64_t duration, duration1, filesize;
1641 start_time = INT64_MAX;
1642 end_time = INT64_MIN;
1643 duration = INT64_MIN;
1644 for (i = 0; i < ic->nb_streams; i++) {
1645 st = ic->streams[i];
1646 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1647 start_time1 = av_rescale_q(st->start_time, st->time_base,
1649 start_time = FFMIN(start_time, start_time1);
1650 if (st->duration != AV_NOPTS_VALUE) {
1651 end_time1 = start_time1 +
1652 av_rescale_q(st->duration, st->time_base,
1654 end_time = FFMAX(end_time, end_time1);
1657 if (st->duration != AV_NOPTS_VALUE) {
1658 duration1 = av_rescale_q(st->duration, st->time_base,
1660 duration = FFMAX(duration, duration1);
1663 if (start_time != INT64_MAX) {
1664 ic->start_time = start_time;
1665 if (end_time != INT64_MIN)
1666 duration = FFMAX(duration, end_time - start_time);
1668 if (duration != INT64_MIN) {
1669 ic->duration = duration;
1670 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1671 /* compute the bitrate */
1672 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1673 (double) ic->duration;
1677 static void fill_all_stream_timings(AVFormatContext *ic)
1682 update_stream_timings(ic);
1683 for (i = 0; i < ic->nb_streams; i++) {
1684 st = ic->streams[i];
1685 if (st->start_time == AV_NOPTS_VALUE) {
1686 if (ic->start_time != AV_NOPTS_VALUE)
1687 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1689 if (ic->duration != AV_NOPTS_VALUE)
1690 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1696 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1698 int64_t filesize, duration;
1702 /* if bit_rate is already set, we believe it */
1703 if (ic->bit_rate <= 0) {
1705 for (i = 0; i < ic->nb_streams; i++) {
1706 st = ic->streams[i];
1707 if (st->codecpar->bit_rate > 0) {
1708 if (INT_MAX - st->codecpar->bit_rate < bit_rate) {
1712 bit_rate += st->codecpar->bit_rate;
1715 ic->bit_rate = bit_rate;
1718 /* if duration is already set, we believe it */
1719 if (ic->duration == AV_NOPTS_VALUE &&
1720 ic->bit_rate != 0) {
1721 filesize = ic->pb ? avio_size(ic->pb) : 0;
1723 for (i = 0; i < ic->nb_streams; i++) {
1724 st = ic->streams[i];
1725 duration = av_rescale(8 * filesize, st->time_base.den,
1727 (int64_t) st->time_base.num);
1728 if (st->duration == AV_NOPTS_VALUE)
1729 st->duration = duration;
1735 #define DURATION_MAX_READ_SIZE 250000
1736 #define DURATION_MAX_RETRY 3
1738 /* only usable for MPEG-PS streams */
1739 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1741 AVPacket pkt1, *pkt = &pkt1;
1743 int read_size, i, ret;
1745 int64_t filesize, offset, duration;
1748 /* flush packet queue */
1749 flush_packet_queue(ic);
1751 for (i = 0; i < ic->nb_streams; i++) {
1752 st = ic->streams[i];
1753 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1754 av_log(ic, AV_LOG_WARNING,
1755 "start time is not set in estimate_timings_from_pts\n");
1758 av_parser_close(st->parser);
1763 /* estimate the end time (duration) */
1764 /* XXX: may need to support wrapping */
1765 filesize = ic->pb ? avio_size(ic->pb) : 0;
1766 end_time = AV_NOPTS_VALUE;
1768 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1772 avio_seek(ic->pb, offset, SEEK_SET);
1775 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1779 ret = ff_read_packet(ic, pkt);
1780 } while (ret == AVERROR(EAGAIN));
1783 read_size += pkt->size;
1784 st = ic->streams[pkt->stream_index];
1785 if (pkt->pts != AV_NOPTS_VALUE &&
1786 (st->start_time != AV_NOPTS_VALUE ||
1787 st->first_dts != AV_NOPTS_VALUE)) {
1788 duration = end_time = pkt->pts;
1789 if (st->start_time != AV_NOPTS_VALUE)
1790 duration -= st->start_time;
1792 duration -= st->first_dts;
1794 duration += 1LL << st->pts_wrap_bits;
1796 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1797 st->duration = duration;
1800 av_packet_unref(pkt);
1802 } while (end_time == AV_NOPTS_VALUE &&
1803 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1804 ++retry <= DURATION_MAX_RETRY);
1806 fill_all_stream_timings(ic);
1808 avio_seek(ic->pb, old_offset, SEEK_SET);
1809 for (i = 0; i < ic->nb_streams; i++) {
1810 st = ic->streams[i];
1811 st->cur_dts = st->first_dts;
1812 st->last_IP_pts = AV_NOPTS_VALUE;
1816 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1820 /* get the file size, if possible */
1821 if (ic->iformat->flags & AVFMT_NOFILE) {
1824 file_size = avio_size(ic->pb);
1825 file_size = FFMAX(0, file_size);
1828 if ((!strcmp(ic->iformat->name, "mpeg") ||
1829 !strcmp(ic->iformat->name, "mpegts")) &&
1830 file_size && (ic->pb->seekable & AVIO_SEEKABLE_NORMAL)) {
1831 /* get accurate estimate from the PTSes */
1832 estimate_timings_from_pts(ic, old_offset);
1833 } else if (has_duration(ic)) {
1834 /* at least one component has timings - we use them for all
1836 fill_all_stream_timings(ic);
1838 av_log(ic, AV_LOG_WARNING,
1839 "Estimating duration from bitrate, this may be inaccurate\n");
1840 /* less precise: use bitrate info */
1841 estimate_timings_from_bit_rate(ic);
1843 update_stream_timings(ic);
1847 AVStream av_unused *st;
1848 for (i = 0; i < ic->nb_streams; i++) {
1849 st = ic->streams[i];
1850 av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
1851 (double) st->start_time / AV_TIME_BASE,
1852 (double) st->duration / AV_TIME_BASE);
1854 av_log(ic, AV_LOG_TRACE,
1855 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1856 (double) ic->start_time / AV_TIME_BASE,
1857 (double) ic->duration / AV_TIME_BASE,
1858 ic->bit_rate / 1000);
1862 static int has_codec_parameters(AVStream *st)
1864 AVCodecContext *avctx = st->internal->avctx;
1867 switch (avctx->codec_type) {
1868 case AVMEDIA_TYPE_AUDIO:
1869 val = avctx->sample_rate && avctx->channels;
1870 if (st->info->found_decoder >= 0 &&
1871 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1874 case AVMEDIA_TYPE_VIDEO:
1876 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1883 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1886 static int has_decode_delay_been_guessed(AVStream *st)
1888 return st->internal->avctx->codec_id != AV_CODEC_ID_H264 ||
1889 st->info->nb_decoded_frames >= 6;
1892 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1893 static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt,
1894 AVDictionary **options)
1896 AVCodecContext *avctx = st->internal->avctx;
1897 const AVCodec *codec;
1898 int got_picture = 1, ret = 0;
1899 AVFrame *frame = av_frame_alloc();
1900 AVPacket pkt = *avpkt;
1903 return AVERROR(ENOMEM);
1905 if (!avcodec_is_open(avctx) && !st->info->found_decoder) {
1906 AVDictionary *thread_opt = NULL;
1908 #if FF_API_LAVF_AVCTX
1909 FF_DISABLE_DEPRECATION_WARNINGS
1910 codec = st->codec->codec ? st->codec->codec
1911 : avcodec_find_decoder(st->codecpar->codec_id);
1912 FF_ENABLE_DEPRECATION_WARNINGS
1914 codec = avcodec_find_decoder(st->codecpar->codec_id);
1918 st->info->found_decoder = -1;
1923 /* Force thread count to 1 since the H.264 decoder will not extract
1924 * SPS and PPS to extradata during multi-threaded decoding. */
1925 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1926 ret = avcodec_open2(avctx, codec, options ? options : &thread_opt);
1928 av_dict_free(&thread_opt);
1930 st->info->found_decoder = -1;
1933 st->info->found_decoder = 1;
1934 } else if (!st->info->found_decoder)
1935 st->info->found_decoder = 1;
1937 if (st->info->found_decoder < 0) {
1942 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1944 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1945 (!st->codec_info_nb_frames &&
1946 (avctx->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
1948 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO ||
1949 avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1950 ret = avcodec_send_packet(avctx, &pkt);
1951 if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1955 ret = avcodec_receive_frame(avctx, frame);
1958 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
1963 st->info->nb_decoded_frames++;
1969 av_frame_free(&frame);
1973 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1975 while (tags->id != AV_CODEC_ID_NONE) {
1983 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1986 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1987 if (tag == tags[i].tag)
1989 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1990 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1992 return AV_CODEC_ID_NONE;
1995 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2000 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2002 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2004 return AV_CODEC_ID_NONE;
2008 if (sflags & (1 << (bps - 1))) {
2011 return AV_CODEC_ID_PCM_S8;
2013 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2015 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2017 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2019 return AV_CODEC_ID_NONE;
2024 return AV_CODEC_ID_PCM_U8;
2026 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2028 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2030 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2032 return AV_CODEC_ID_NONE;
2038 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
2041 for (i = 0; tags && tags[i]; i++) {
2042 int tag = ff_codec_get_tag(tags[i], id);
2049 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2052 for (i = 0; tags && tags[i]; i++) {
2053 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2054 if (id != AV_CODEC_ID_NONE)
2057 return AV_CODEC_ID_NONE;
2060 static void compute_chapters_end(AVFormatContext *s)
2063 int64_t max_time = s->duration +
2064 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2066 for (i = 0; i < s->nb_chapters; i++)
2067 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2068 AVChapter *ch = s->chapters[i];
2069 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2073 for (j = 0; j < s->nb_chapters; j++) {
2074 AVChapter *ch1 = s->chapters[j];
2075 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2077 if (j != i && next_start > ch->start && next_start < end)
2080 ch->end = (end == INT64_MAX) ? ch->start : end;
2084 static int get_std_framerate(int i)
2087 return (i + 1) * 1001;
2089 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2092 static int extract_extradata_init(AVStream *st)
2094 AVStreamInternal *i = st->internal;
2095 const AVBitStreamFilter *f;
2098 f = av_bsf_get_by_name("extract_extradata");
2102 /* check that the codec id is supported */
2104 const enum AVCodecID *ids;
2105 for (ids = f->codec_ids; *ids != AV_CODEC_ID_NONE; ids++)
2106 if (*ids == st->codecpar->codec_id)
2108 if (*ids == AV_CODEC_ID_NONE)
2112 i->extract_extradata.pkt = av_packet_alloc();
2113 if (!i->extract_extradata.pkt)
2114 return AVERROR(ENOMEM);
2116 ret = av_bsf_alloc(f, &i->extract_extradata.bsf);
2120 ret = avcodec_parameters_copy(i->extract_extradata.bsf->par_in,
2125 i->extract_extradata.bsf->time_base_in = st->time_base;
2127 /* if init fails here, we assume extracting extradata is just not
2128 * supported for this codec, so we return success */
2129 ret = av_bsf_init(i->extract_extradata.bsf);
2131 av_bsf_free(&i->extract_extradata.bsf);
2136 i->extract_extradata.inited = 1;
2140 av_bsf_free(&i->extract_extradata.bsf);
2141 av_packet_free(&i->extract_extradata.pkt);
2145 static int extract_extradata(AVStream *st, AVPacket *pkt)
2147 AVStreamInternal *i = st->internal;
2151 if (!i->extract_extradata.inited) {
2152 ret = extract_extradata_init(st);
2157 if (i->extract_extradata.inited && !i->extract_extradata.bsf)
2160 pkt_ref = i->extract_extradata.pkt;
2161 ret = av_packet_ref(pkt_ref, pkt);
2165 ret = av_bsf_send_packet(i->extract_extradata.bsf, pkt_ref);
2167 av_packet_unref(pkt_ref);
2171 while (ret >= 0 && !i->avctx->extradata) {
2175 ret = av_bsf_receive_packet(i->extract_extradata.bsf, pkt_ref);
2177 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2182 extradata = av_packet_get_side_data(pkt_ref, AV_PKT_DATA_NEW_EXTRADATA,
2186 i->avctx->extradata = av_mallocz(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
2187 if (!i->avctx->extradata) {
2188 av_packet_unref(pkt_ref);
2189 return AVERROR(ENOMEM);
2191 memcpy(i->avctx->extradata, extradata, extradata_size);
2192 i->avctx->extradata_size = extradata_size;
2194 av_packet_unref(pkt_ref);
2200 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2202 int i, count, ret, read_size, j;
2204 AVCodecContext *avctx;
2205 AVPacket pkt1, *pkt;
2206 int64_t old_offset = avio_tell(ic->pb);
2207 // new streams might appear, no options for those
2208 int orig_nb_streams = ic->nb_streams;
2210 for (i = 0; i < ic->nb_streams; i++) {
2211 const AVCodec *codec;
2212 AVDictionary *thread_opt = NULL;
2213 st = ic->streams[i];
2214 avctx = st->internal->avctx;
2216 // only for the split stuff
2217 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2218 st->parser = av_parser_init(st->codecpar->codec_id);
2219 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2220 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2223 /* check if the caller has overridden the codec id */
2224 #if FF_API_LAVF_AVCTX
2225 FF_DISABLE_DEPRECATION_WARNINGS
2226 if (st->codec->codec_id != st->internal->orig_codec_id) {
2227 st->codecpar->codec_id = st->codec->codec_id;
2228 st->codecpar->codec_type = st->codec->codec_type;
2229 st->internal->orig_codec_id = st->codec->codec_id;
2231 FF_ENABLE_DEPRECATION_WARNINGS
2233 if (st->codecpar->codec_id != st->internal->orig_codec_id)
2234 st->internal->orig_codec_id = st->codecpar->codec_id;
2236 ret = avcodec_parameters_to_context(avctx, st->codecpar);
2238 goto find_stream_info_err;
2239 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE &&
2240 st->codecpar->codec_id != AV_CODEC_ID_NONE)
2241 st->internal->avctx_inited = 1;
2243 #if FF_API_LAVF_AVCTX
2244 FF_DISABLE_DEPRECATION_WARNINGS
2245 codec = st->codec->codec ? st->codec->codec
2246 : avcodec_find_decoder(st->codecpar->codec_id);
2247 FF_ENABLE_DEPRECATION_WARNINGS
2249 codec = avcodec_find_decoder(st->codecpar->codec_id);
2252 /* Force thread count to 1 since the H.264 decoder will not extract
2253 * SPS and PPS to extradata during multi-threaded decoding. */
2254 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2256 /* Ensure that subtitle_header is properly set. */
2257 if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE
2258 && codec && !avctx->codec)
2259 avcodec_open2(avctx, codec,
2260 options ? &options[i] : &thread_opt);
2262 // Try to just open decoders, in case this is enough to get parameters.
2263 if (!has_codec_parameters(st)) {
2264 if (codec && !avctx->codec)
2265 avcodec_open2(avctx, codec,
2266 options ? &options[i] : &thread_opt);
2269 av_dict_free(&thread_opt);
2272 for (i = 0; i < ic->nb_streams; i++) {
2273 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2274 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2280 if (ff_check_interrupt(&ic->interrupt_callback)) {
2282 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2286 /* check if one codec still needs to be handled */
2287 for (i = 0; i < ic->nb_streams; i++) {
2288 int fps_analyze_framecount = 20;
2290 st = ic->streams[i];
2291 if (!has_codec_parameters(st))
2293 /* If the timebase is coarse (like the usual millisecond precision
2294 * of mkv), we need to analyze more frames to reliably arrive at
2295 * the correct fps. */
2296 if (av_q2d(st->time_base) > 0.0005)
2297 fps_analyze_framecount *= 2;
2298 if (ic->fps_probe_size >= 0)
2299 fps_analyze_framecount = ic->fps_probe_size;
2300 /* variable fps and no guess at the real fps */
2301 if (!st->avg_frame_rate.num &&
2302 st->codec_info_nb_frames < fps_analyze_framecount &&
2303 st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
2305 if (!st->codecpar->extradata &&
2306 !st->internal->avctx->extradata &&
2307 (!st->internal->extract_extradata.inited ||
2308 st->internal->extract_extradata.bsf))
2310 if (st->first_dts == AV_NOPTS_VALUE &&
2311 st->codec_info_nb_frames < ic->max_ts_probe &&
2312 (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
2313 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO))
2316 if (i == ic->nb_streams) {
2317 /* NOTE: If the format has no header, then we need to read some
2318 * packets to get most of the streams, so we cannot stop here. */
2319 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2320 /* If we found the info for all the codecs, we can stop. */
2322 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2326 /* We did not get all the codec info, but we read too much data. */
2327 if (read_size >= ic->probesize) {
2329 av_log(ic, AV_LOG_DEBUG,
2330 "Probe buffer size limit %d reached\n", ic->probesize);
2334 /* NOTE: A new stream can be added there if no header in file
2335 * (AVFMTCTX_NOHEADER). */
2336 ret = read_frame_internal(ic, &pkt1);
2337 if (ret == AVERROR(EAGAIN))
2342 AVPacket empty_pkt = { 0 };
2344 av_init_packet(&empty_pkt);
2346 /* We could not have all the codec parameters before EOF. */
2348 for (i = 0; i < ic->nb_streams; i++) {
2349 st = ic->streams[i];
2351 /* flush the decoders */
2352 if (st->info->found_decoder == 1) {
2354 err = try_decode_frame(ic, st, &empty_pkt,
2355 (options && i < orig_nb_streams)
2356 ? &options[i] : NULL);
2357 } while (err > 0 && !has_codec_parameters(st));
2361 av_log(ic, AV_LOG_WARNING,
2362 "decoding for stream %d failed\n", st->index);
2363 } else if (!has_codec_parameters(st)) {
2365 avcodec_string(buf, sizeof(buf), st->internal->avctx, 0);
2366 av_log(ic, AV_LOG_WARNING,
2367 "Could not find codec parameters (%s)\n", buf);
2377 if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) {
2378 ret = add_to_pktbuf(&ic->internal->packet_buffer, pkt,
2379 &ic->internal->packet_buffer_end, 0);
2381 goto find_stream_info_err;
2384 read_size += pkt->size;
2386 st = ic->streams[pkt->stream_index];
2387 avctx = st->internal->avctx;
2388 if (!st->internal->avctx_inited) {
2389 ret = avcodec_parameters_to_context(avctx, st->codecpar);
2391 goto find_stream_info_err;
2392 st->internal->avctx_inited = 1;
2395 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2396 /* check for non-increasing dts */
2397 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2398 st->info->fps_last_dts >= pkt->dts) {
2399 av_log(ic, AV_LOG_WARNING,
2400 "Non-increasing DTS in stream %d: packet %d with DTS "
2401 "%"PRId64", packet %d with DTS %"PRId64"\n",
2402 st->index, st->info->fps_last_dts_idx,
2403 st->info->fps_last_dts, st->codec_info_nb_frames,
2405 st->info->fps_first_dts =
2406 st->info->fps_last_dts = AV_NOPTS_VALUE;
2408 /* Check for a discontinuity in dts. If the difference in dts
2409 * is more than 1000 times the average packet duration in the
2410 * sequence, we treat it as a discontinuity. */
2411 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2412 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2413 (pkt->dts - st->info->fps_last_dts) / 1000 >
2414 (st->info->fps_last_dts - st->info->fps_first_dts) /
2415 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2416 av_log(ic, AV_LOG_WARNING,
2417 "DTS discontinuity in stream %d: packet %d with DTS "
2418 "%"PRId64", packet %d with DTS %"PRId64"\n",
2419 st->index, st->info->fps_last_dts_idx,
2420 st->info->fps_last_dts, st->codec_info_nb_frames,
2422 st->info->fps_first_dts =
2423 st->info->fps_last_dts = AV_NOPTS_VALUE;
2426 /* update stored dts values */
2427 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2428 st->info->fps_first_dts = pkt->dts;
2429 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2431 st->info->fps_last_dts = pkt->dts;
2432 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2434 /* check max_analyze_duration */
2435 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2436 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2437 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2438 ic->max_analyze_duration);
2439 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2440 av_packet_unref(pkt);
2444 if (!st->internal->avctx->extradata) {
2445 ret = extract_extradata(st, pkt);
2447 goto find_stream_info_err;
2450 /* If still no information, we try to open the codec and to
2451 * decompress the frame. We try to avoid that in most cases as
2452 * it takes longer and uses more memory. For MPEG-4, we need to
2453 * decompress for QuickTime.
2455 * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2456 * least one frame of codec data, this makes sure the codec initializes
2457 * the channel configuration and does not only trust the values from
2459 try_decode_frame(ic, st, pkt,
2460 (options && i < orig_nb_streams) ? &options[i] : NULL);
2462 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2463 av_packet_unref(pkt);
2465 st->codec_info_nb_frames++;
2469 // close codecs which were opened in try_decode_frame()
2470 for (i = 0; i < ic->nb_streams; i++) {
2471 st = ic->streams[i];
2472 avcodec_close(st->internal->avctx);
2474 for (i = 0; i < ic->nb_streams; i++) {
2475 st = ic->streams[i];
2476 avctx = st->internal->avctx;
2477 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2478 /* estimate average framerate if not set by demuxer */
2479 if (!st->avg_frame_rate.num &&
2480 st->info->fps_last_dts != st->info->fps_first_dts) {
2481 int64_t delta_dts = st->info->fps_last_dts -
2482 st->info->fps_first_dts;
2483 int delta_packets = st->info->fps_last_dts_idx -
2484 st->info->fps_first_dts_idx;
2486 double best_error = 0.01;
2488 if (delta_dts >= INT64_MAX / st->time_base.num ||
2489 delta_packets >= INT64_MAX / st->time_base.den ||
2492 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2493 delta_packets * (int64_t) st->time_base.den,
2494 delta_dts * (int64_t) st->time_base.num, 60000);
2496 /* Round guessed framerate to a "standard" framerate if it's
2497 * within 1% of the original estimate. */
2498 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2499 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2500 double error = fabs(av_q2d(st->avg_frame_rate) /
2501 av_q2d(std_fps) - 1);
2503 if (error < best_error) {
2505 best_fps = std_fps.num;
2509 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2510 best_fps, 12 * 1001, INT_MAX);
2512 } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2513 if (!avctx->bits_per_coded_sample)
2514 avctx->bits_per_coded_sample =
2515 av_get_bits_per_sample(avctx->codec_id);
2516 // set stream disposition based on audio service type
2517 switch (avctx->audio_service_type) {
2518 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2519 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2521 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2522 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2524 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2525 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2527 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2528 st->disposition = AV_DISPOSITION_COMMENT;
2530 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2531 st->disposition = AV_DISPOSITION_KARAOKE;
2537 compute_chapters_end(ic);
2539 /* update the stream parameters from the internal codec contexts */
2540 for (i = 0; i < ic->nb_streams; i++) {
2541 st = ic->streams[i];
2542 if (!st->internal->avctx_inited)
2545 ret = avcodec_parameters_from_context(st->codecpar, st->internal->avctx);
2547 goto find_stream_info_err;
2549 #if FF_API_LAVF_AVCTX
2550 FF_DISABLE_DEPRECATION_WARNINGS
2551 ret = avcodec_parameters_to_context(st->codec, st->codecpar);
2553 goto find_stream_info_err;
2555 if (st->internal->avctx->subtitle_header) {
2556 st->codec->subtitle_header = av_malloc(st->internal->avctx->subtitle_header_size);
2557 if (!st->codec->subtitle_header)
2558 goto find_stream_info_err;
2559 st->codec->subtitle_header_size = st->internal->avctx->subtitle_header_size;
2560 memcpy(st->codec->subtitle_header, st->internal->avctx->subtitle_header,
2561 st->codec->subtitle_header_size);
2563 FF_ENABLE_DEPRECATION_WARNINGS
2566 st->internal->avctx_inited = 0;
2569 estimate_timings(ic, old_offset);
2571 find_stream_info_err:
2572 for (i = 0; i < ic->nb_streams; i++) {
2573 av_freep(&ic->streams[i]->info);
2574 av_bsf_free(&ic->streams[i]->internal->extract_extradata.bsf);
2575 av_packet_free(&ic->streams[i]->internal->extract_extradata.pkt);
2580 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2584 for (i = 0; i < ic->nb_programs; i++)
2585 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2586 if (ic->programs[i]->stream_index[j] == s)
2587 return ic->programs[i];
2591 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2592 int wanted_stream_nb, int related_stream,
2593 AVCodec **decoder_ret, int flags)
2595 int i, nb_streams = ic->nb_streams;
2596 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2597 unsigned *program = NULL;
2598 AVCodec *decoder = NULL, *best_decoder = NULL;
2600 if (related_stream >= 0 && wanted_stream_nb < 0) {
2601 AVProgram *p = find_program_from_stream(ic, related_stream);
2603 program = p->stream_index;
2604 nb_streams = p->nb_stream_indexes;
2607 for (i = 0; i < nb_streams; i++) {
2608 int real_stream_index = program ? program[i] : i;
2609 AVStream *st = ic->streams[real_stream_index];
2610 AVCodecParameters *par = st->codecpar;
2611 if (par->codec_type != type)
2613 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2615 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2616 AV_DISPOSITION_VISUAL_IMPAIRED))
2619 decoder = avcodec_find_decoder(par->codec_id);
2622 ret = AVERROR_DECODER_NOT_FOUND;
2626 if (best_count >= st->codec_info_nb_frames)
2628 best_count = st->codec_info_nb_frames;
2629 ret = real_stream_index;
2630 best_decoder = decoder;
2631 if (program && i == nb_streams - 1 && ret < 0) {
2633 nb_streams = ic->nb_streams;
2634 /* no related stream found, try again with everything */
2639 *decoder_ret = best_decoder;
2643 /*******************************************************/
2645 int av_read_play(AVFormatContext *s)
2647 if (s->iformat->read_play)
2648 return s->iformat->read_play(s);
2650 return avio_pause(s->pb, 0);
2651 return AVERROR(ENOSYS);
2654 int av_read_pause(AVFormatContext *s)
2656 if (s->iformat->read_pause)
2657 return s->iformat->read_pause(s);
2659 return avio_pause(s->pb, 1);
2660 return AVERROR(ENOSYS);
2663 static void free_stream(AVStream **pst)
2665 AVStream *st = *pst;
2671 for (i = 0; i < st->nb_side_data; i++)
2672 av_freep(&st->side_data[i].data);
2673 av_freep(&st->side_data);
2676 av_parser_close(st->parser);
2678 if (st->attached_pic.data)
2679 av_packet_unref(&st->attached_pic);
2682 avcodec_free_context(&st->internal->avctx);
2683 av_bsf_free(&st->internal->extract_extradata.bsf);
2684 av_packet_free(&st->internal->extract_extradata.pkt);
2686 av_freep(&st->internal);
2688 av_dict_free(&st->metadata);
2689 avcodec_parameters_free(&st->codecpar);
2690 av_freep(&st->probe_data.buf);
2691 av_free(st->index_entries);
2692 #if FF_API_LAVF_AVCTX
2693 FF_DISABLE_DEPRECATION_WARNINGS
2694 av_free(st->codec->extradata);
2695 av_free(st->codec->subtitle_header);
2697 FF_ENABLE_DEPRECATION_WARNINGS
2699 av_free(st->priv_data);
2705 void avformat_free_context(AVFormatContext *s)
2713 if (s->iformat && s->iformat->priv_class && s->priv_data)
2714 av_opt_free(s->priv_data);
2716 for (i = 0; i < s->nb_streams; i++)
2717 free_stream(&s->streams[i]);
2719 for (i = s->nb_programs - 1; i >= 0; i--) {
2720 av_dict_free(&s->programs[i]->metadata);
2721 av_freep(&s->programs[i]->stream_index);
2722 av_freep(&s->programs[i]);
2724 av_freep(&s->programs);
2725 av_freep(&s->priv_data);
2726 while (s->nb_chapters--) {
2727 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2728 av_free(s->chapters[s->nb_chapters]);
2730 av_freep(&s->chapters);
2731 av_dict_free(&s->metadata);
2732 av_freep(&s->streams);
2733 av_freep(&s->internal);
2737 void avformat_close_input(AVFormatContext **ps)
2739 AVFormatContext *s = *ps;
2740 AVIOContext *pb = s->pb;
2742 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2743 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2746 flush_packet_queue(s);
2749 if (s->iformat->read_close)
2750 s->iformat->read_close(s);
2752 avformat_free_context(s);
2759 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2764 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2765 sizeof(*s->streams)) < 0) {
2770 st = av_mallocz(sizeof(AVStream));
2773 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2778 #if FF_API_LAVF_AVCTX
2779 FF_DISABLE_DEPRECATION_WARNINGS
2780 st->codec = avcodec_alloc_context3(c);
2786 FF_ENABLE_DEPRECATION_WARNINGS
2789 st->internal = av_mallocz(sizeof(*st->internal));
2794 #if FF_API_LAVF_AVCTX
2795 FF_DISABLE_DEPRECATION_WARNINGS
2796 /* no default bitrate if decoding */
2797 st->codec->bit_rate = 0;
2798 FF_ENABLE_DEPRECATION_WARNINGS
2801 /* default pts setting is MPEG-like */
2802 avpriv_set_pts_info(st, 33, 1, 90000);
2803 /* we set the current DTS to 0 so that formats without any timestamps
2804 * but durations get some timestamps, formats with some unknown
2805 * timestamps have their first few packets buffered and the
2806 * timestamps corrected before they are returned to the user */
2809 st->cur_dts = AV_NOPTS_VALUE;
2812 st->codecpar = avcodec_parameters_alloc();
2816 st->internal->avctx = avcodec_alloc_context3(NULL);
2817 if (!st->internal->avctx)
2820 st->index = s->nb_streams;
2821 st->start_time = AV_NOPTS_VALUE;
2822 st->duration = AV_NOPTS_VALUE;
2823 st->first_dts = AV_NOPTS_VALUE;
2824 st->probe_packets = MAX_PROBE_PACKETS;
2826 st->last_IP_pts = AV_NOPTS_VALUE;
2827 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2828 st->pts_buffer[i] = AV_NOPTS_VALUE;
2830 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2832 st->info->fps_first_dts = AV_NOPTS_VALUE;
2833 st->info->fps_last_dts = AV_NOPTS_VALUE;
2835 #if FF_API_LAVF_AVCTX
2836 st->internal->need_codec_update = 1;
2839 s->streams[s->nb_streams++] = st;
2846 AVProgram *av_new_program(AVFormatContext *ac, int id)
2848 AVProgram *program = NULL;
2851 av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
2853 for (i = 0; i < ac->nb_programs; i++)
2854 if (ac->programs[i]->id == id)
2855 program = ac->programs[i];
2858 program = av_mallocz(sizeof(AVProgram));
2861 dynarray_add(&ac->programs, &ac->nb_programs, program);
2862 program->discard = AVDISCARD_NONE;
2869 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2870 int64_t start, int64_t end, const char *title)
2872 AVChapter *chapter = NULL;
2875 for (i = 0; i < s->nb_chapters; i++)
2876 if (s->chapters[i]->id == id)
2877 chapter = s->chapters[i];
2880 chapter = av_mallocz(sizeof(AVChapter));
2883 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2885 av_dict_set(&chapter->metadata, "title", title, 0);
2887 chapter->time_base = time_base;
2888 chapter->start = start;
2894 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2897 AVProgram *program = NULL;
2899 if (idx >= ac->nb_streams) {
2900 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2904 for (i = 0; i < ac->nb_programs; i++) {
2905 if (ac->programs[i]->id != progid)
2907 program = ac->programs[i];
2908 for (j = 0; j < program->nb_stream_indexes; j++)
2909 if (program->stream_index[j] == idx)
2912 if (av_reallocp_array(&program->stream_index,
2913 program->nb_stream_indexes + 1,
2914 sizeof(*program->stream_index)) < 0) {
2915 program->nb_stream_indexes = 0;
2918 program->stream_index[program->nb_stream_indexes++] = idx;
2923 uint64_t ff_ntp_time(void)
2925 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2928 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2931 char *q, buf1[20], c;
2932 int nd, len, percentd_found;
2944 while (av_isdigit(*p))
2945 nd = nd * 10 + *p++ - '0';
2947 } while (av_isdigit(c));
2956 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2958 if ((q - buf + len) > buf_size - 1)
2960 memcpy(q, buf1, len);
2968 if ((q - buf) < buf_size - 1)
2972 if (!percentd_found)
2981 void av_url_split(char *proto, int proto_size,
2982 char *authorization, int authorization_size,
2983 char *hostname, int hostname_size,
2984 int *port_ptr, char *path, int path_size, const char *url)
2986 const char *p, *ls, *at, *col, *brk;
2992 if (authorization_size > 0)
2993 authorization[0] = 0;
2994 if (hostname_size > 0)
2999 /* parse protocol */
3000 if ((p = strchr(url, ':'))) {
3001 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3008 /* no protocol means plain filename */
3009 av_strlcpy(path, url, path_size);
3013 /* separate path from hostname */
3014 ls = strchr(p, '/');
3016 ls = strchr(p, '?');
3018 av_strlcpy(path, ls, path_size);
3020 ls = &p[strlen(p)]; // XXX
3022 /* the rest is hostname, use that to parse auth/port */
3024 /* authorization (user[:pass]@hostname) */
3025 if ((at = strchr(p, '@')) && at < ls) {
3026 av_strlcpy(authorization, p,
3027 FFMIN(authorization_size, at + 1 - p));
3028 p = at + 1; /* skip '@' */
3031 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3033 av_strlcpy(hostname, p + 1,
3034 FFMIN(hostname_size, brk - p));
3035 if (brk[1] == ':' && port_ptr)
3036 *port_ptr = atoi(brk + 2);
3037 } else if ((col = strchr(p, ':')) && col < ls) {
3038 av_strlcpy(hostname, p,
3039 FFMIN(col + 1 - p, hostname_size));
3041 *port_ptr = atoi(col + 1);
3043 av_strlcpy(hostname, p,
3044 FFMIN(ls + 1 - p, hostname_size));
3048 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3051 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3054 'C', 'D', 'E', 'F' };
3055 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3058 'c', 'd', 'e', 'f' };
3059 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3061 for (i = 0; i < s; i++) {
3062 buff[i * 2] = hex_table[src[i] >> 4];
3063 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3069 int ff_hex_to_data(uint8_t *data, const char *p)
3076 p += strspn(p, SPACE_CHARS);
3079 c = av_toupper((unsigned char) *p++);
3080 if (c >= '0' && c <= '9')
3082 else if (c >= 'A' && c <= 'F')
3097 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3098 unsigned int pts_num, unsigned int pts_den)
3101 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
3102 if (new_tb.num != pts_num)
3103 av_log(NULL, AV_LOG_DEBUG,
3104 "st:%d removing common factor %d from timebase\n",
3105 s->index, pts_num / new_tb.num);
3107 av_log(NULL, AV_LOG_WARNING,
3108 "st:%d has too large timebase, reducing\n", s->index);
3110 if (new_tb.num <= 0 || new_tb.den <= 0) {
3111 av_log(NULL, AV_LOG_ERROR,
3112 "Ignoring attempt to set invalid timebase for st:%d\n",
3116 s->time_base = new_tb;
3117 s->pts_wrap_bits = pts_wrap_bits;
3120 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3123 const char *ptr = str;
3125 /* Parse key=value pairs. */
3128 char *dest = NULL, *dest_end;
3129 int key_len, dest_len = 0;
3131 /* Skip whitespace and potential commas. */
3132 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3139 if (!(ptr = strchr(key, '=')))
3142 key_len = ptr - key;
3144 callback_get_buf(context, key, key_len, &dest, &dest_len);
3145 dest_end = dest + dest_len - 1;
3149 while (*ptr && *ptr != '\"') {
3153 if (dest && dest < dest_end)
3157 if (dest && dest < dest_end)
3165 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3166 if (dest && dest < dest_end)
3174 int ff_find_stream_index(AVFormatContext *s, int id)
3177 for (i = 0; i < s->nb_streams; i++)
3178 if (s->streams[i]->id == id)
3183 int64_t ff_iso8601_to_unix_time(const char *datestr)
3185 struct tm time1 = { 0 }, time2 = { 0 };
3186 const char *ret1, *ret2;
3187 ret1 = av_small_strptime(datestr, "%Y - %m - %d %T", &time1);
3188 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%T", &time2);
3190 return av_timegm(&time2);
3192 return av_timegm(&time1);
3195 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
3199 if (ofmt->query_codec)
3200 return ofmt->query_codec(codec_id, std_compliance);
3201 else if (ofmt->codec_tag)
3202 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
3203 else if (codec_id == ofmt->video_codec ||
3204 codec_id == ofmt->audio_codec ||
3205 codec_id == ofmt->subtitle_codec)
3208 return AVERROR_PATCHWELCOME;
3211 int avformat_network_init(void)
3215 ff_network_inited_globally = 1;
3216 if ((ret = ff_network_init()) < 0)
3223 int avformat_network_deinit(void)
3232 int ff_add_param_change(AVPacket *pkt, int32_t channels,
3233 uint64_t channel_layout, int32_t sample_rate,
3234 int32_t width, int32_t height)
3240 return AVERROR(EINVAL);
3243 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
3245 if (channel_layout) {
3247 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
3251 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
3253 if (width || height) {
3255 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
3257 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
3259 return AVERROR(ENOMEM);
3260 bytestream_put_le32(&data, flags);
3262 bytestream_put_le32(&data, channels);
3264 bytestream_put_le64(&data, channel_layout);
3266 bytestream_put_le32(&data, sample_rate);
3267 if (width || height) {
3268 bytestream_put_le32(&data, width);
3269 bytestream_put_le32(&data, height);
3274 int ff_generate_avci_extradata(AVStream *st)
3276 static const uint8_t avci100_1080p_extradata[] = {
3278 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3279 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3280 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3281 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3282 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3283 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3284 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3285 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3286 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3288 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3291 static const uint8_t avci100_1080i_extradata[] = {
3293 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3294 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3295 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3296 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3297 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3298 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3299 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3300 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3301 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3302 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3303 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3305 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3308 static const uint8_t avci50_1080i_extradata[] = {
3310 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3311 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3312 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3313 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3314 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3315 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3316 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3317 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3318 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3319 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3320 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3322 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3325 static const uint8_t avci100_720p_extradata[] = {
3327 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3328 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3329 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3330 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3331 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3332 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3333 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3334 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3335 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3336 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3338 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3342 const uint8_t *data = NULL;
3345 if (st->codecpar->width == 1920) {
3346 if (st->codecpar->field_order == AV_FIELD_PROGRESSIVE) {
3347 data = avci100_1080p_extradata;
3348 size = sizeof(avci100_1080p_extradata);
3350 data = avci100_1080i_extradata;
3351 size = sizeof(avci100_1080i_extradata);
3353 } else if (st->codecpar->width == 1440) {
3354 data = avci50_1080i_extradata;
3355 size = sizeof(avci50_1080i_extradata);
3356 } else if (st->codecpar->width == 1280) {
3357 data = avci100_720p_extradata;
3358 size = sizeof(avci100_720p_extradata);
3364 av_freep(&st->codecpar->extradata);
3365 st->codecpar->extradata_size = 0;
3366 st->codecpar->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
3367 if (!st->codecpar->extradata)
3368 return AVERROR(ENOMEM);
3370 memcpy(st->codecpar->extradata, data, size);
3371 st->codecpar->extradata_size = size;
3376 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3381 for (i = 0; i < st->nb_side_data; i++) {
3382 if (st->side_data[i].type == type) {
3384 *size = st->side_data[i].size;
3385 return st->side_data[i].data;
3391 uint8_t *av_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type,
3394 AVPacketSideData *sd, *tmp;
3396 uint8_t *data = av_malloc(size);
3401 for (i = 0; i < st->nb_side_data; i++) {
3402 sd = &st->side_data[i];
3404 if (sd->type == type) {
3405 av_freep(&sd->data);
3412 tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
3418 st->side_data = tmp;
3421 sd = &st->side_data[st->nb_side_data - 1];
3428 void ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
3431 s->io_close(s, *pb);