2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
42 #include "audiointerleave.h"
55 * various utility functions for use within Libav
58 unsigned avformat_version(void)
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return LIBAV_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
87 int prev_size = pkt->size;
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 read_size = FFMIN(size, chunk_size);
98 ret = av_grow_packet(pkt, read_size);
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
113 av_packet_unref(pkt);
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
122 pkt->pos = avio_tell(s);
124 return append_packet_chunked(s, pkt, size);
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
134 int av_filename_number_test(const char *filename)
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
144 static const struct {
147 enum AVMediaType type;
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codecpar->codec_id = fmt_id_type[i].id;
171 st->codecpar->codec_type = fmt_id_type[i].type;
172 #if FF_API_LAVF_AVCTX
173 FF_DISABLE_DEPRECATION_WARNINGS
174 st->codec->codec_type = st->codecpar->codec_type;
175 st->codec->codec_id = st->codecpar->codec_id;
176 FF_ENABLE_DEPRECATION_WARNINGS
185 /************************************************************/
186 /* input media file */
188 /* Open input file and probe the format if necessary. */
189 static int init_input(AVFormatContext *s, const char *filename,
190 AVDictionary **options)
193 AVProbeData pd = { filename, NULL, 0 };
196 s->flags |= AVFMT_FLAG_CUSTOM_IO;
198 return av_probe_input_buffer(s->pb, &s->iformat, filename,
200 else if (s->iformat->flags & AVFMT_NOFILE)
201 return AVERROR(EINVAL);
205 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
206 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
209 ret = s->io_open(s, &s->pb, filename, AVIO_FLAG_READ, options);
214 return av_probe_input_buffer(s->pb, &s->iformat, filename,
218 static int add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
219 AVPacketList **plast_pktl, int ref)
221 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
225 return AVERROR(ENOMEM);
228 if ((ret = av_packet_ref(&pktl->pkt, pkt)) < 0) {
237 (*plast_pktl)->next = pktl;
239 *packet_buffer = pktl;
241 /* Add the packet in the buffered packet list. */
246 static int queue_attached_pictures(AVFormatContext *s)
249 for (i = 0; i < s->nb_streams; i++)
250 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
251 s->streams[i]->discard < AVDISCARD_ALL) {
253 ret = add_to_pktbuf(&s->internal->raw_packet_buffer,
254 &s->streams[i]->attached_pic,
255 &s->internal->raw_packet_buffer_end, 1);
262 #if FF_API_LAVF_AVCTX
263 FF_DISABLE_DEPRECATION_WARNINGS
264 static int update_stream_avctx(AVFormatContext *s)
267 for (i = 0; i < s->nb_streams; i++) {
268 AVStream *st = s->streams[i];
270 if (!st->internal->need_codec_update)
273 ret = avcodec_parameters_to_context(st->codec, st->codecpar);
277 st->internal->need_codec_update = 0;
281 FF_ENABLE_DEPRECATION_WARNINGS
284 int avformat_open_input(AVFormatContext **ps, const char *filename,
285 AVInputFormat *fmt, AVDictionary **options)
287 AVFormatContext *s = *ps;
289 AVDictionary *tmp = NULL;
290 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
292 if (!s && !(s = avformat_alloc_context()))
293 return AVERROR(ENOMEM);
298 av_dict_copy(&tmp, *options, 0);
300 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
303 if ((ret = init_input(s, filename, &tmp)) < 0)
306 /* Check filename in case an image number is expected. */
307 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
308 if (!av_filename_number_test(filename)) {
309 ret = AVERROR(EINVAL);
314 s->duration = s->start_time = AV_NOPTS_VALUE;
315 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
317 /* Allocate private data. */
318 if (s->iformat->priv_data_size > 0) {
319 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
320 ret = AVERROR(ENOMEM);
323 if (s->iformat->priv_class) {
324 *(const AVClass **) s->priv_data = s->iformat->priv_class;
325 av_opt_set_defaults(s->priv_data);
326 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
331 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
333 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
335 if (s->iformat->read_header)
336 if ((ret = s->iformat->read_header(s)) < 0)
339 if (id3v2_extra_meta &&
340 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
342 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
344 if ((ret = queue_attached_pictures(s)) < 0)
347 if (s->pb && !s->internal->data_offset)
348 s->internal->data_offset = avio_tell(s->pb);
350 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
352 #if FF_API_LAVF_AVCTX
353 update_stream_avctx(s);
356 for (i = 0; i < s->nb_streams; i++)
357 s->streams[i]->internal->orig_codec_id = s->streams[i]->codecpar->codec_id;
360 av_dict_free(options);
367 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
369 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
371 avformat_free_context(s);
376 /*******************************************************/
378 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
380 if (st->codecpar->codec_id == AV_CODEC_ID_PROBE) {
381 AVProbeData *pd = &st->probe_data;
382 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
387 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
388 AVPROBE_PADDING_SIZE)) < 0)
390 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
391 pd->buf_size += pkt->size;
392 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
394 st->probe_packets = 0;
396 av_log(s, AV_LOG_ERROR,
397 "nothing to probe for stream %d\n", st->index);
402 if (!st->probe_packets ||
403 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
404 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
405 ? AVPROBE_SCORE_MAX / 4 : 0);
406 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE) {
409 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
416 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
422 AVPacketList *pktl = s->internal->raw_packet_buffer;
426 st = s->streams[pkt->stream_index];
427 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE ||
428 !st->probe_packets ||
429 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
431 if (st->probe_packets)
432 if ((err = probe_codec(s, st, NULL)) < 0)
434 pd = &st->probe_data;
437 s->internal->raw_packet_buffer = pktl->next;
438 s->internal->raw_packet_buffer_remaining_size += pkt->size;
447 ret = s->iformat->read_packet(s, pkt);
449 if (!pktl || ret == AVERROR(EAGAIN))
451 for (i = 0; i < s->nb_streams; i++) {
453 if (st->probe_packets)
454 if ((err = probe_codec(s, st, NULL)) < 0)
461 AVPacket tmp = { 0 };
462 ret = av_packet_ref(&tmp, pkt);
468 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
469 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
470 av_log(s, AV_LOG_WARNING,
471 "Dropped corrupted packet (stream = %d)\n",
473 av_packet_unref(pkt);
477 st = s->streams[pkt->stream_index];
479 switch (st->codecpar->codec_type) {
480 case AVMEDIA_TYPE_VIDEO:
481 if (s->video_codec_id)
482 st->codecpar->codec_id = s->video_codec_id;
484 case AVMEDIA_TYPE_AUDIO:
485 if (s->audio_codec_id)
486 st->codecpar->codec_id = s->audio_codec_id;
488 case AVMEDIA_TYPE_SUBTITLE:
489 if (s->subtitle_codec_id)
490 st->codecpar->codec_id = s->subtitle_codec_id;
494 if (!pktl && (st->codecpar->codec_id != AV_CODEC_ID_PROBE ||
498 err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
499 &s->internal->raw_packet_buffer_end, 0);
502 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
504 if ((err = probe_codec(s, st, pkt)) < 0)
509 /**********************************************************/
512 * Return the frame duration in seconds. Return 0 if not available.
514 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
515 AVCodecParserContext *pc, AVPacket *pkt)
517 AVRational codec_framerate = s->iformat ? st->internal->avctx->framerate :
518 (AVRational){ 0, 1 };
523 switch (st->codecpar->codec_type) {
524 case AVMEDIA_TYPE_VIDEO:
525 if (st->avg_frame_rate.num) {
526 *pnum = st->avg_frame_rate.den;
527 *pden = st->avg_frame_rate.num;
528 } else if (st->time_base.num * 1000LL > st->time_base.den) {
529 *pnum = st->time_base.num;
530 *pden = st->time_base.den;
531 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
532 *pnum = codec_framerate.den;
533 *pden = codec_framerate.num;
534 if (pc && pc->repeat_pict) {
535 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
536 *pden /= 1 + pc->repeat_pict;
538 *pnum *= 1 + pc->repeat_pict;
540 /* If this codec can be interlaced or progressive then we need
541 * a parser to compute duration of a packet. Thus if we have
542 * no parser in such case leave duration undefined. */
543 if (st->internal->avctx->ticks_per_frame > 1 && !pc)
547 case AVMEDIA_TYPE_AUDIO:
548 frame_size = av_get_audio_frame_duration2(st->codecpar, pkt->size);
549 if (frame_size <= 0 || st->codecpar->sample_rate <= 0)
552 *pden = st->codecpar->sample_rate;
559 static int is_intra_only(enum AVCodecID id)
561 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
564 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
569 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
570 int64_t dts, int64_t pts)
572 AVStream *st = s->streams[stream_index];
573 AVPacketList *pktl = s->internal->packet_buffer;
575 if (st->first_dts != AV_NOPTS_VALUE ||
576 dts == AV_NOPTS_VALUE ||
577 st->cur_dts == AV_NOPTS_VALUE)
580 st->first_dts = dts - st->cur_dts;
583 for (; pktl; pktl = pktl->next) {
584 if (pktl->pkt.stream_index != stream_index)
586 // FIXME: think more about this check
587 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
588 pktl->pkt.pts += st->first_dts;
590 if (pktl->pkt.dts != AV_NOPTS_VALUE)
591 pktl->pkt.dts += st->first_dts;
593 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
594 st->start_time = pktl->pkt.pts;
596 if (st->start_time == AV_NOPTS_VALUE)
597 st->start_time = pts;
600 static void update_initial_durations(AVFormatContext *s, AVStream *st,
601 int stream_index, int duration)
603 AVPacketList *pktl = s->internal->packet_buffer;
606 if (st->first_dts != AV_NOPTS_VALUE) {
607 cur_dts = st->first_dts;
608 for (; pktl; pktl = pktl->next) {
609 if (pktl->pkt.stream_index == stream_index) {
610 if (pktl->pkt.pts != pktl->pkt.dts ||
611 pktl->pkt.dts != AV_NOPTS_VALUE ||
617 pktl = s->internal->packet_buffer;
618 st->first_dts = cur_dts;
619 } else if (st->cur_dts)
622 for (; pktl; pktl = pktl->next) {
623 if (pktl->pkt.stream_index != stream_index)
625 if (pktl->pkt.pts == pktl->pkt.dts &&
626 pktl->pkt.dts == AV_NOPTS_VALUE &&
627 !pktl->pkt.duration) {
628 pktl->pkt.dts = cur_dts;
629 if (!st->internal->avctx->has_b_frames)
630 pktl->pkt.pts = cur_dts;
632 if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
633 pktl->pkt.duration = duration;
637 if (st->first_dts == AV_NOPTS_VALUE)
638 st->cur_dts = cur_dts;
641 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
642 AVCodecParserContext *pc, AVPacket *pkt)
644 int num, den, presentation_delayed, delay, i;
647 if (s->flags & AVFMT_FLAG_NOFILLIN)
650 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
651 pkt->dts = AV_NOPTS_VALUE;
653 /* do we have a video B-frame ? */
654 delay = st->internal->avctx->has_b_frames;
655 presentation_delayed = 0;
657 /* XXX: need has_b_frame, but cannot get it if the codec is
660 pc && pc->pict_type != AV_PICTURE_TYPE_B)
661 presentation_delayed = 1;
663 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
664 st->pts_wrap_bits < 63 &&
665 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
666 pkt->dts -= 1LL << st->pts_wrap_bits;
669 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
670 * We take the conservative approach and discard both.
671 * Note: If this is misbehaving for an H.264 file, then possibly
672 * presentation_delayed is not set correctly. */
673 if (delay == 1 && pkt->dts == pkt->pts &&
674 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
675 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
676 pkt->dts = AV_NOPTS_VALUE;
679 if (pkt->duration == 0 && st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
680 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
682 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
683 den * (int64_t) st->time_base.num,
686 if (pkt->duration != 0 && s->internal->packet_buffer)
687 update_initial_durations(s, st, pkt->stream_index,
692 /* Correct timestamps with byte offset if demuxers only have timestamps
693 * on packet boundaries */
694 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
695 /* this will estimate bitrate based on this frame's duration and size */
696 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
697 if (pkt->pts != AV_NOPTS_VALUE)
699 if (pkt->dts != AV_NOPTS_VALUE)
703 /* This may be redundant, but it should not hurt. */
704 if (pkt->dts != AV_NOPTS_VALUE &&
705 pkt->pts != AV_NOPTS_VALUE &&
707 presentation_delayed = 1;
709 av_log(NULL, AV_LOG_TRACE,
710 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
711 "cur_dts:%"PRId64" st:%d pc:%p\n",
712 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
713 pkt->stream_index, pc);
714 /* Interpolate PTS and DTS if they are not present. We skip H.264
715 * currently because delay and has_b_frames are not reliably set. */
716 if ((delay == 0 || (delay == 1 && pc)) &&
717 st->codecpar->codec_id != AV_CODEC_ID_H264) {
718 if (presentation_delayed) {
719 /* DTS = decompression timestamp */
720 /* PTS = presentation timestamp */
721 if (pkt->dts == AV_NOPTS_VALUE)
722 pkt->dts = st->last_IP_pts;
723 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
724 if (pkt->dts == AV_NOPTS_VALUE)
725 pkt->dts = st->cur_dts;
727 /* This is tricky: the dts must be incremented by the duration
728 * of the frame we are displaying, i.e. the last I- or P-frame. */
729 if (st->last_IP_duration == 0)
730 st->last_IP_duration = pkt->duration;
731 if (pkt->dts != AV_NOPTS_VALUE)
732 st->cur_dts = pkt->dts + st->last_IP_duration;
733 st->last_IP_duration = pkt->duration;
734 st->last_IP_pts = pkt->pts;
735 /* Cannot compute PTS if not present (we can compute it only
736 * by knowing the future. */
737 } else if (pkt->pts != AV_NOPTS_VALUE ||
738 pkt->dts != AV_NOPTS_VALUE ||
740 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
741 int duration = pkt->duration;
742 if (!duration && st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
743 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
745 duration = av_rescale_rnd(1,
746 num * (int64_t) st->time_base.den,
747 den * (int64_t) st->time_base.num,
749 if (duration != 0 && s->internal->packet_buffer)
750 update_initial_durations(s, st, pkt->stream_index,
755 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
757 /* presentation is not delayed : PTS and DTS are the same */
758 if (pkt->pts == AV_NOPTS_VALUE)
760 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
762 if (pkt->pts == AV_NOPTS_VALUE)
763 pkt->pts = st->cur_dts;
765 if (pkt->pts != AV_NOPTS_VALUE)
766 st->cur_dts = pkt->pts + duration;
771 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
772 st->pts_buffer[0] = pkt->pts;
773 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
774 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
775 if (pkt->dts == AV_NOPTS_VALUE)
776 pkt->dts = st->pts_buffer[0];
777 // We skipped it above so we try here.
778 if (st->codecpar->codec_id == AV_CODEC_ID_H264)
779 // This should happen on the first packet
780 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
781 if (pkt->dts > st->cur_dts)
782 st->cur_dts = pkt->dts;
785 av_log(NULL, AV_LOG_TRACE,
786 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
787 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
790 if (is_intra_only(st->codecpar->codec_id))
791 pkt->flags |= AV_PKT_FLAG_KEY;
792 #if FF_API_CONVERGENCE_DURATION
793 FF_DISABLE_DEPRECATION_WARNINGS
795 pkt->convergence_duration = pc->convergence_duration;
796 FF_ENABLE_DEPRECATION_WARNINGS
800 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
803 AVPacketList *pktl = *pkt_buf;
804 *pkt_buf = pktl->next;
805 av_packet_unref(&pktl->pkt);
812 * Parse a packet, add all split parts to parse_queue.
814 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
816 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
818 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
819 AVStream *st = s->streams[stream_index];
820 uint8_t *data = pkt ? pkt->data : NULL;
821 int size = pkt ? pkt->size : 0;
822 int ret = 0, got_output = 0;
825 av_init_packet(&flush_pkt);
830 while (size > 0 || (pkt == &flush_pkt && got_output)) {
833 av_init_packet(&out_pkt);
834 len = av_parser_parse2(st->parser, st->internal->avctx,
835 &out_pkt.data, &out_pkt.size, data, size,
836 pkt->pts, pkt->dts, pkt->pos);
838 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
839 /* increment read pointer */
843 got_output = !!out_pkt.size;
848 if (pkt->side_data) {
849 out_pkt.side_data = pkt->side_data;
850 out_pkt.side_data_elems = pkt->side_data_elems;
851 pkt->side_data = NULL;
852 pkt->side_data_elems = 0;
855 /* set the duration */
856 out_pkt.duration = 0;
857 if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
858 if (st->internal->avctx->sample_rate > 0) {
860 av_rescale_q_rnd(st->parser->duration,
861 (AVRational) { 1, st->internal->avctx->sample_rate },
867 out_pkt.stream_index = st->index;
868 out_pkt.pts = st->parser->pts;
869 out_pkt.dts = st->parser->dts;
870 out_pkt.pos = st->parser->pos;
872 if (st->parser->key_frame == 1 ||
873 (st->parser->key_frame == -1 &&
874 st->parser->pict_type == AV_PICTURE_TYPE_I))
875 out_pkt.flags |= AV_PKT_FLAG_KEY;
877 compute_pkt_fields(s, st, st->parser, &out_pkt);
879 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
880 out_pkt.flags & AV_PKT_FLAG_KEY) {
881 ff_reduce_index(s, st->index);
882 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
883 0, 0, AVINDEX_KEYFRAME);
886 if ((ret = add_to_pktbuf(&s->internal->parse_queue, &out_pkt,
887 &s->internal->parse_queue_end,
889 av_packet_unref(&out_pkt);
894 /* end of the stream => close and free the parser */
895 if (pkt == &flush_pkt) {
896 av_parser_close(st->parser);
901 av_packet_unref(pkt);
905 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
906 AVPacketList **pkt_buffer_end,
910 av_assert0(*pkt_buffer);
913 *pkt_buffer = pktl->next;
915 *pkt_buffer_end = NULL;
920 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
922 int ret = 0, i, got_packet = 0;
923 AVDictionary *metadata = NULL;
927 while (!got_packet && !s->internal->parse_queue) {
931 /* read next packet */
932 ret = ff_read_packet(s, &cur_pkt);
934 if (ret == AVERROR(EAGAIN))
936 /* flush the parsers */
937 for (i = 0; i < s->nb_streams; i++) {
939 if (st->parser && st->need_parsing)
940 parse_packet(s, NULL, st->index);
942 /* all remaining packets are now in parse_queue =>
943 * really terminate parsing */
947 st = s->streams[cur_pkt.stream_index];
949 if (cur_pkt.pts != AV_NOPTS_VALUE &&
950 cur_pkt.dts != AV_NOPTS_VALUE &&
951 cur_pkt.pts < cur_pkt.dts) {
952 av_log(s, AV_LOG_WARNING,
953 "Invalid timestamps stream=%d, pts=%"PRId64", "
954 "dts=%"PRId64", size=%d\n",
955 cur_pkt.stream_index, cur_pkt.pts,
956 cur_pkt.dts, cur_pkt.size);
958 if (s->debug & FF_FDEBUG_TS)
959 av_log(s, AV_LOG_DEBUG,
960 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
961 "size=%d, duration=%"PRId64", flags=%d\n",
962 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
963 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
965 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
966 st->parser = av_parser_init(st->codecpar->codec_id);
968 /* no parser available: just output the raw packets */
969 st->need_parsing = AVSTREAM_PARSE_NONE;
970 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
971 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
972 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
973 st->parser->flags |= PARSER_FLAG_ONCE;
976 if (!st->need_parsing || !st->parser) {
977 /* no parsing needed: we just output the packet as is */
979 compute_pkt_fields(s, st, NULL, pkt);
980 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
981 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
982 ff_reduce_index(s, st->index);
983 av_add_index_entry(st, pkt->pos, pkt->dts,
984 0, 0, AVINDEX_KEYFRAME);
987 } else if (st->discard < AVDISCARD_ALL) {
988 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
992 av_packet_unref(&cur_pkt);
996 if (!got_packet && s->internal->parse_queue)
997 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
999 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
1001 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
1002 av_dict_copy(&s->metadata, metadata, 0);
1003 av_dict_free(&metadata);
1004 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
1007 #if FF_API_LAVF_AVCTX
1008 update_stream_avctx(s);
1011 if (s->debug & FF_FDEBUG_TS)
1012 av_log(s, AV_LOG_DEBUG,
1013 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
1014 "size=%d, duration=%"PRId64", flags=%d\n",
1015 pkt->stream_index, pkt->pts, pkt->dts,
1016 pkt->size, pkt->duration, pkt->flags);
1021 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1023 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1027 return s->internal->packet_buffer
1028 ? read_from_packet_buffer(&s->internal->packet_buffer,
1029 &s->internal->packet_buffer_end, pkt)
1030 : read_frame_internal(s, pkt);
1034 AVPacketList *pktl = s->internal->packet_buffer;
1037 AVPacket *next_pkt = &pktl->pkt;
1039 if (next_pkt->dts != AV_NOPTS_VALUE) {
1040 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1041 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1042 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1043 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1044 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1046 next_pkt->pts = pktl->pkt.dts;
1050 pktl = s->internal->packet_buffer;
1053 /* read packet from packet buffer, if there is data */
1054 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1055 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1056 return read_from_packet_buffer(&s->internal->packet_buffer,
1057 &s->internal->packet_buffer_end, pkt);
1060 ret = read_frame_internal(s, pkt);
1062 if (pktl && ret != AVERROR(EAGAIN)) {
1069 ret = add_to_pktbuf(&s->internal->packet_buffer, pkt,
1070 &s->internal->packet_buffer_end, 1);
1076 /* XXX: suppress the packet queue */
1077 static void flush_packet_queue(AVFormatContext *s)
1079 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1080 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1081 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1083 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1086 /*******************************************************/
1089 int av_find_default_stream_index(AVFormatContext *s)
1091 int first_audio_index = -1;
1095 if (s->nb_streams <= 0)
1097 for (i = 0; i < s->nb_streams; i++) {
1099 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1100 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1103 if (first_audio_index < 0 &&
1104 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
1105 first_audio_index = i;
1107 return first_audio_index >= 0 ? first_audio_index : 0;
1110 /** Flush the frame reader. */
1111 void ff_read_frame_flush(AVFormatContext *s)
1116 flush_packet_queue(s);
1118 /* Reset read state for each stream. */
1119 for (i = 0; i < s->nb_streams; i++) {
1123 av_parser_close(st->parser);
1126 st->last_IP_pts = AV_NOPTS_VALUE;
1127 /* We set the current DTS to an unspecified origin. */
1128 st->cur_dts = AV_NOPTS_VALUE;
1130 st->probe_packets = MAX_PROBE_PACKETS;
1132 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1133 st->pts_buffer[j] = AV_NOPTS_VALUE;
1137 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1141 for (i = 0; i < s->nb_streams; i++) {
1142 AVStream *st = s->streams[i];
1145 av_rescale(timestamp,
1146 st->time_base.den * (int64_t) ref_st->time_base.num,
1147 st->time_base.num * (int64_t) ref_st->time_base.den);
1151 void ff_reduce_index(AVFormatContext *s, int stream_index)
1153 AVStream *st = s->streams[stream_index];
1154 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1156 if ((unsigned) st->nb_index_entries >= max_entries) {
1158 for (i = 0; 2 * i < st->nb_index_entries; i++)
1159 st->index_entries[i] = st->index_entries[2 * i];
1160 st->nb_index_entries = i;
1164 int ff_add_index_entry(AVIndexEntry **index_entries,
1165 int *nb_index_entries,
1166 unsigned int *index_entries_allocated_size,
1167 int64_t pos, int64_t timestamp,
1168 int size, int distance, int flags)
1170 AVIndexEntry *entries, *ie;
1173 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1176 entries = av_fast_realloc(*index_entries,
1177 index_entries_allocated_size,
1178 (*nb_index_entries + 1) *
1179 sizeof(AVIndexEntry));
1183 *index_entries = entries;
1185 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1186 timestamp, AVSEEK_FLAG_ANY);
1189 index = (*nb_index_entries)++;
1190 ie = &entries[index];
1191 assert(index == 0 || ie[-1].timestamp < timestamp);
1193 ie = &entries[index];
1194 if (ie->timestamp != timestamp) {
1195 if (ie->timestamp <= timestamp)
1197 memmove(entries + index + 1, entries + index,
1198 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1199 (*nb_index_entries)++;
1200 } else if (ie->pos == pos && distance < ie->min_distance)
1201 // do not reduce the distance
1202 distance = ie->min_distance;
1206 ie->timestamp = timestamp;
1207 ie->min_distance = distance;
1214 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1215 int size, int distance, int flags)
1217 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1218 &st->index_entries_allocated_size, pos,
1219 timestamp, size, distance, flags);
1222 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1223 int64_t wanted_timestamp, int flags)
1231 // Optimize appending index entries at the end.
1232 if (b && entries[b - 1].timestamp < wanted_timestamp)
1237 timestamp = entries[m].timestamp;
1238 if (timestamp >= wanted_timestamp)
1240 if (timestamp <= wanted_timestamp)
1243 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1245 if (!(flags & AVSEEK_FLAG_ANY))
1246 while (m >= 0 && m < nb_entries &&
1247 !(entries[m].flags & AVINDEX_KEYFRAME))
1248 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1250 if (m == nb_entries)
1255 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1257 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1258 wanted_timestamp, flags);
1261 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1262 int64_t target_ts, int flags)
1264 AVInputFormat *avif = s->iformat;
1265 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1266 int64_t ts_min, ts_max, ts;
1271 if (stream_index < 0)
1274 av_log(s, AV_LOG_TRACE, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1277 ts_min = AV_NOPTS_VALUE;
1278 pos_limit = -1; // GCC falsely says it may be uninitialized.
1280 st = s->streams[stream_index];
1281 if (st->index_entries) {
1284 /* FIXME: Whole function must be checked for non-keyframe entries in
1285 * index case, especially read_timestamp(). */
1286 index = av_index_search_timestamp(st, target_ts,
1287 flags | AVSEEK_FLAG_BACKWARD);
1288 index = FFMAX(index, 0);
1289 e = &st->index_entries[index];
1291 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1293 ts_min = e->timestamp;
1294 av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1300 index = av_index_search_timestamp(st, target_ts,
1301 flags & ~AVSEEK_FLAG_BACKWARD);
1302 assert(index < st->nb_index_entries);
1304 e = &st->index_entries[index];
1305 assert(e->timestamp >= target_ts);
1307 ts_max = e->timestamp;
1308 pos_limit = pos_max - e->min_distance;
1309 av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1310 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1314 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1315 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1320 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1323 ff_update_cur_dts(s, st, ts);
1328 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1329 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1330 int64_t ts_min, int64_t ts_max,
1331 int flags, int64_t *ts_ret,
1332 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1333 int64_t *, int64_t))
1336 int64_t start_pos, filesize;
1339 av_log(s, AV_LOG_TRACE, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1341 if (ts_min == AV_NOPTS_VALUE) {
1342 pos_min = s->internal->data_offset;
1343 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1344 if (ts_min == AV_NOPTS_VALUE)
1348 if (ts_max == AV_NOPTS_VALUE) {
1350 filesize = avio_size(s->pb);
1351 pos_max = filesize - 1;
1354 ts_max = read_timestamp(s, stream_index, &pos_max,
1357 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1358 if (ts_max == AV_NOPTS_VALUE)
1362 int64_t tmp_pos = pos_max + 1;
1363 int64_t tmp_ts = read_timestamp(s, stream_index,
1364 &tmp_pos, INT64_MAX);
1365 if (tmp_ts == AV_NOPTS_VALUE)
1369 if (tmp_pos >= filesize)
1372 pos_limit = pos_max;
1375 if (ts_min > ts_max)
1377 else if (ts_min == ts_max)
1378 pos_limit = pos_min;
1381 while (pos_min < pos_limit) {
1382 av_log(s, AV_LOG_TRACE, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1383 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1384 assert(pos_limit <= pos_max);
1386 if (no_change == 0) {
1387 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1388 // interpolate position (better than dichotomy)
1389 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1391 pos_min - approximate_keyframe_distance;
1392 } else if (no_change == 1) {
1393 // bisection if interpolation did not change min / max pos last time
1394 pos = (pos_min + pos_limit) >> 1;
1396 /* linear search if bisection failed, can only happen if there
1397 * are very few or no keyframes between min/max */
1402 else if (pos > pos_limit)
1406 // May pass pos_limit instead of -1.
1407 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1412 av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1413 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1414 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1415 pos_limit, start_pos, no_change);
1416 if (ts == AV_NOPTS_VALUE) {
1417 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1420 assert(ts != AV_NOPTS_VALUE);
1421 if (target_ts <= ts) {
1422 pos_limit = start_pos - 1;
1426 if (target_ts >= ts) {
1432 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1433 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1435 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1437 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1438 av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1439 pos, ts_min, target_ts, ts_max);
1444 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1445 int64_t pos, int flags)
1447 int64_t pos_min, pos_max;
1449 pos_min = s->internal->data_offset;
1450 pos_max = avio_size(s->pb) - 1;
1454 else if (pos > pos_max)
1457 avio_seek(s->pb, pos, SEEK_SET);
1462 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1463 int64_t timestamp, int flags)
1470 st = s->streams[stream_index];
1472 index = av_index_search_timestamp(st, timestamp, flags);
1474 if (index < 0 && st->nb_index_entries &&
1475 timestamp < st->index_entries[0].timestamp)
1478 if (index < 0 || index == st->nb_index_entries - 1) {
1481 if (st->nb_index_entries) {
1482 assert(st->index_entries);
1483 ie = &st->index_entries[st->nb_index_entries - 1];
1484 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1486 ff_update_cur_dts(s, st, ie->timestamp);
1488 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1494 read_status = av_read_frame(s, &pkt);
1495 } while (read_status == AVERROR(EAGAIN));
1496 if (read_status < 0)
1498 av_packet_unref(&pkt);
1499 if (stream_index == pkt.stream_index)
1500 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1503 index = av_index_search_timestamp(st, timestamp, flags);
1508 ff_read_frame_flush(s);
1509 if (s->iformat->read_seek)
1510 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1512 ie = &st->index_entries[index];
1513 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1515 ff_update_cur_dts(s, st, ie->timestamp);
1520 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1521 int64_t timestamp, int flags)
1526 if (flags & AVSEEK_FLAG_BYTE) {
1527 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1529 ff_read_frame_flush(s);
1530 return seek_frame_byte(s, stream_index, timestamp, flags);
1533 if (stream_index < 0) {
1534 stream_index = av_find_default_stream_index(s);
1535 if (stream_index < 0)
1538 st = s->streams[stream_index];
1539 /* timestamp for default must be expressed in AV_TIME_BASE units */
1540 timestamp = av_rescale(timestamp, st->time_base.den,
1541 AV_TIME_BASE * (int64_t) st->time_base.num);
1544 /* first, we try the format specific seek */
1545 if (s->iformat->read_seek) {
1546 ff_read_frame_flush(s);
1547 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1553 if (s->iformat->read_timestamp &&
1554 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1555 ff_read_frame_flush(s);
1556 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1557 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1558 ff_read_frame_flush(s);
1559 return seek_frame_generic(s, stream_index, timestamp, flags);
1564 int av_seek_frame(AVFormatContext *s, int stream_index,
1565 int64_t timestamp, int flags)
1567 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1570 ret = queue_attached_pictures(s);
1575 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1576 int64_t ts, int64_t max_ts, int flags)
1578 if (min_ts > ts || max_ts < ts)
1581 if (s->iformat->read_seek2) {
1583 ff_read_frame_flush(s);
1584 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1588 ret = queue_attached_pictures(s);
1592 if (s->iformat->read_timestamp) {
1593 // try to seek via read_timestamp()
1596 // Fall back on old API if new is not implemented but old is.
1597 // Note the old API has somewhat different semantics.
1598 if (s->iformat->read_seek || 1)
1599 return av_seek_frame(s, stream_index, ts,
1600 flags | ((uint64_t) ts - min_ts >
1601 (uint64_t) max_ts - ts
1602 ? AVSEEK_FLAG_BACKWARD : 0));
1604 // try some generic seek like seek_frame_generic() but with new ts semantics
1607 /*******************************************************/
1610 * Return TRUE if the stream has accurate duration in any stream.
1612 * @return TRUE if the stream has accurate duration for at least one component.
1614 static int has_duration(AVFormatContext *ic)
1619 for (i = 0; i < ic->nb_streams; i++) {
1620 st = ic->streams[i];
1621 if (st->duration != AV_NOPTS_VALUE)
1624 if (ic->duration != AV_NOPTS_VALUE)
1630 * Estimate the stream timings from the one of each components.
1632 * Also computes the global bitrate if possible.
1634 static void update_stream_timings(AVFormatContext *ic)
1636 int64_t start_time, start_time1, end_time, end_time1;
1637 int64_t duration, duration1, filesize;
1641 start_time = INT64_MAX;
1642 end_time = INT64_MIN;
1643 duration = INT64_MIN;
1644 for (i = 0; i < ic->nb_streams; i++) {
1645 st = ic->streams[i];
1646 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1647 start_time1 = av_rescale_q(st->start_time, st->time_base,
1649 start_time = FFMIN(start_time, start_time1);
1650 if (st->duration != AV_NOPTS_VALUE) {
1651 end_time1 = start_time1 +
1652 av_rescale_q(st->duration, st->time_base,
1654 end_time = FFMAX(end_time, end_time1);
1657 if (st->duration != AV_NOPTS_VALUE) {
1658 duration1 = av_rescale_q(st->duration, st->time_base,
1660 duration = FFMAX(duration, duration1);
1663 if (start_time != INT64_MAX) {
1664 ic->start_time = start_time;
1665 if (end_time != INT64_MIN)
1666 duration = FFMAX(duration, end_time - start_time);
1668 if (duration != INT64_MIN) {
1669 ic->duration = duration;
1670 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1671 /* compute the bitrate */
1672 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1673 (double) ic->duration;
1677 static void fill_all_stream_timings(AVFormatContext *ic)
1682 update_stream_timings(ic);
1683 for (i = 0; i < ic->nb_streams; i++) {
1684 st = ic->streams[i];
1685 if (st->start_time == AV_NOPTS_VALUE) {
1686 if (ic->start_time != AV_NOPTS_VALUE)
1687 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1689 if (ic->duration != AV_NOPTS_VALUE)
1690 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1696 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1698 int64_t filesize, duration;
1702 /* if bit_rate is already set, we believe it */
1703 if (ic->bit_rate <= 0) {
1705 for (i = 0; i < ic->nb_streams; i++) {
1706 st = ic->streams[i];
1707 if (st->codecpar->bit_rate > 0) {
1708 if (INT_MAX - st->codecpar->bit_rate < bit_rate) {
1712 bit_rate += st->codecpar->bit_rate;
1715 ic->bit_rate = bit_rate;
1718 /* if duration is already set, we believe it */
1719 if (ic->duration == AV_NOPTS_VALUE &&
1720 ic->bit_rate != 0) {
1721 filesize = ic->pb ? avio_size(ic->pb) : 0;
1723 for (i = 0; i < ic->nb_streams; i++) {
1724 st = ic->streams[i];
1725 duration = av_rescale(8 * filesize, st->time_base.den,
1727 (int64_t) st->time_base.num);
1728 if (st->duration == AV_NOPTS_VALUE)
1729 st->duration = duration;
1735 #define DURATION_MAX_READ_SIZE 250000
1736 #define DURATION_MAX_RETRY 3
1738 /* only usable for MPEG-PS streams */
1739 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1741 AVPacket pkt1, *pkt = &pkt1;
1743 int read_size, i, ret;
1745 int64_t filesize, offset, duration;
1748 /* flush packet queue */
1749 flush_packet_queue(ic);
1751 for (i = 0; i < ic->nb_streams; i++) {
1752 st = ic->streams[i];
1753 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1754 av_log(ic, AV_LOG_WARNING,
1755 "start time is not set in estimate_timings_from_pts\n");
1758 av_parser_close(st->parser);
1763 /* estimate the end time (duration) */
1764 /* XXX: may need to support wrapping */
1765 filesize = ic->pb ? avio_size(ic->pb) : 0;
1766 end_time = AV_NOPTS_VALUE;
1768 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1772 avio_seek(ic->pb, offset, SEEK_SET);
1775 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1779 ret = ff_read_packet(ic, pkt);
1780 } while (ret == AVERROR(EAGAIN));
1783 read_size += pkt->size;
1784 st = ic->streams[pkt->stream_index];
1785 if (pkt->pts != AV_NOPTS_VALUE &&
1786 (st->start_time != AV_NOPTS_VALUE ||
1787 st->first_dts != AV_NOPTS_VALUE)) {
1788 duration = end_time = pkt->pts;
1789 if (st->start_time != AV_NOPTS_VALUE)
1790 duration -= st->start_time;
1792 duration -= st->first_dts;
1794 duration += 1LL << st->pts_wrap_bits;
1796 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1797 st->duration = duration;
1800 av_packet_unref(pkt);
1802 } while (end_time == AV_NOPTS_VALUE &&
1803 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1804 ++retry <= DURATION_MAX_RETRY);
1806 fill_all_stream_timings(ic);
1808 avio_seek(ic->pb, old_offset, SEEK_SET);
1809 for (i = 0; i < ic->nb_streams; i++) {
1810 st = ic->streams[i];
1811 st->cur_dts = st->first_dts;
1812 st->last_IP_pts = AV_NOPTS_VALUE;
1816 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1820 /* get the file size, if possible */
1821 if (ic->iformat->flags & AVFMT_NOFILE) {
1824 file_size = avio_size(ic->pb);
1825 file_size = FFMAX(0, file_size);
1828 if ((!strcmp(ic->iformat->name, "mpeg") ||
1829 !strcmp(ic->iformat->name, "mpegts")) &&
1830 file_size && (ic->pb->seekable & AVIO_SEEKABLE_NORMAL)) {
1831 /* get accurate estimate from the PTSes */
1832 estimate_timings_from_pts(ic, old_offset);
1833 } else if (has_duration(ic)) {
1834 /* at least one component has timings - we use them for all
1836 fill_all_stream_timings(ic);
1838 av_log(ic, AV_LOG_WARNING,
1839 "Estimating duration from bitrate, this may be inaccurate\n");
1840 /* less precise: use bitrate info */
1841 estimate_timings_from_bit_rate(ic);
1843 update_stream_timings(ic);
1847 AVStream av_unused *st;
1848 for (i = 0; i < ic->nb_streams; i++) {
1849 st = ic->streams[i];
1850 av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
1851 (double) st->start_time / AV_TIME_BASE,
1852 (double) st->duration / AV_TIME_BASE);
1854 av_log(ic, AV_LOG_TRACE,
1855 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1856 (double) ic->start_time / AV_TIME_BASE,
1857 (double) ic->duration / AV_TIME_BASE,
1858 ic->bit_rate / 1000);
1862 static int has_codec_parameters(AVStream *st)
1864 AVCodecContext *avctx = st->internal->avctx;
1867 switch (avctx->codec_type) {
1868 case AVMEDIA_TYPE_AUDIO:
1869 val = avctx->sample_rate && avctx->channels;
1870 if (st->info->found_decoder >= 0 &&
1871 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1874 case AVMEDIA_TYPE_VIDEO:
1876 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1883 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1886 static int has_decode_delay_been_guessed(AVStream *st)
1888 return st->internal->avctx->codec_id != AV_CODEC_ID_H264 ||
1889 st->info->nb_decoded_frames >= 6;
1892 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1893 static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt,
1894 AVDictionary **options)
1896 AVCodecContext *avctx = st->internal->avctx;
1897 const AVCodec *codec;
1898 int got_picture = 1, ret = 0;
1899 AVFrame *frame = av_frame_alloc();
1900 AVPacket pkt = *avpkt;
1903 return AVERROR(ENOMEM);
1905 if (!avcodec_is_open(avctx) && !st->info->found_decoder) {
1906 AVDictionary *thread_opt = NULL;
1908 #if FF_API_LAVF_AVCTX
1909 FF_DISABLE_DEPRECATION_WARNINGS
1910 codec = st->codec->codec ? st->codec->codec
1911 : avcodec_find_decoder(st->codecpar->codec_id);
1912 FF_ENABLE_DEPRECATION_WARNINGS
1914 codec = avcodec_find_decoder(st->codecpar->codec_id);
1918 st->info->found_decoder = -1;
1923 /* Force thread count to 1 since the H.264 decoder will not extract
1924 * SPS and PPS to extradata during multi-threaded decoding. */
1925 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1926 ret = avcodec_open2(avctx, codec, options ? options : &thread_opt);
1928 av_dict_free(&thread_opt);
1930 st->info->found_decoder = -1;
1933 st->info->found_decoder = 1;
1934 } else if (!st->info->found_decoder)
1935 st->info->found_decoder = 1;
1937 if (st->info->found_decoder < 0) {
1942 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1944 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1945 (!st->codec_info_nb_frames &&
1946 (avctx->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
1948 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO ||
1949 avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1950 ret = avcodec_send_packet(avctx, &pkt);
1951 if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1955 ret = avcodec_receive_frame(avctx, frame);
1958 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
1963 st->info->nb_decoded_frames++;
1969 av_frame_free(&frame);
1973 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1975 while (tags->id != AV_CODEC_ID_NONE) {
1983 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1986 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1987 if (tag == tags[i].tag)
1989 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1990 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1992 return AV_CODEC_ID_NONE;
1995 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2000 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2002 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2004 return AV_CODEC_ID_NONE;
2008 if (sflags & (1 << (bps - 1))) {
2011 return AV_CODEC_ID_PCM_S8;
2013 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2015 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2017 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2019 return AV_CODEC_ID_NONE;
2024 return AV_CODEC_ID_PCM_U8;
2026 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2028 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2030 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2032 return AV_CODEC_ID_NONE;
2038 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
2041 for (i = 0; tags && tags[i]; i++) {
2042 int tag = ff_codec_get_tag(tags[i], id);
2049 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2052 for (i = 0; tags && tags[i]; i++) {
2053 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2054 if (id != AV_CODEC_ID_NONE)
2057 return AV_CODEC_ID_NONE;
2060 static void compute_chapters_end(AVFormatContext *s)
2063 int64_t max_time = s->duration +
2064 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2066 for (i = 0; i < s->nb_chapters; i++)
2067 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2068 AVChapter *ch = s->chapters[i];
2069 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2073 for (j = 0; j < s->nb_chapters; j++) {
2074 AVChapter *ch1 = s->chapters[j];
2075 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2077 if (j != i && next_start > ch->start && next_start < end)
2080 ch->end = (end == INT64_MAX) ? ch->start : end;
2084 static int get_std_framerate(int i)
2087 return (i + 1) * 1001;
2089 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2092 static int extract_extradata_init(AVStream *st)
2094 AVStreamInternal *i = st->internal;
2095 const AVBitStreamFilter *f;
2098 f = av_bsf_get_by_name("extract_extradata");
2102 i->extract_extradata.pkt = av_packet_alloc();
2103 if (!i->extract_extradata.pkt)
2104 return AVERROR(ENOMEM);
2106 ret = av_bsf_alloc(f, &i->extract_extradata.bsf);
2110 ret = avcodec_parameters_copy(i->extract_extradata.bsf->par_in,
2115 i->extract_extradata.bsf->time_base_in = st->time_base;
2117 /* if init fails here, we assume extracting extradata is just not
2118 * supported for this codec, so we return success */
2119 ret = av_bsf_init(i->extract_extradata.bsf);
2121 av_bsf_free(&i->extract_extradata.bsf);
2126 i->extract_extradata.inited = 1;
2130 av_bsf_free(&i->extract_extradata.bsf);
2131 av_packet_free(&i->extract_extradata.pkt);
2135 static int extract_extradata(AVStream *st, AVPacket *pkt)
2137 AVStreamInternal *i = st->internal;
2141 if (!i->extract_extradata.inited) {
2142 ret = extract_extradata_init(st);
2147 if (i->extract_extradata.inited && !i->extract_extradata.bsf)
2150 pkt_ref = i->extract_extradata.pkt;
2151 ret = av_packet_ref(pkt_ref, pkt);
2155 ret = av_bsf_send_packet(i->extract_extradata.bsf, pkt_ref);
2157 av_packet_unref(pkt_ref);
2161 while (ret >= 0 && !i->avctx->extradata) {
2165 ret = av_bsf_receive_packet(i->extract_extradata.bsf, pkt_ref);
2167 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2172 extradata = av_packet_get_side_data(pkt_ref, AV_PKT_DATA_NEW_EXTRADATA,
2176 i->avctx->extradata = av_mallocz(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
2177 if (!i->avctx->extradata) {
2178 av_packet_unref(pkt_ref);
2179 return AVERROR(ENOMEM);
2181 memcpy(i->avctx->extradata, extradata, extradata_size);
2182 i->avctx->extradata_size = extradata_size;
2184 av_packet_unref(pkt_ref);
2190 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2192 int i, count, ret, read_size, j;
2194 AVCodecContext *avctx;
2195 AVPacket pkt1, *pkt;
2196 int64_t old_offset = avio_tell(ic->pb);
2197 // new streams might appear, no options for those
2198 int orig_nb_streams = ic->nb_streams;
2200 for (i = 0; i < ic->nb_streams; i++) {
2201 const AVCodec *codec;
2202 AVDictionary *thread_opt = NULL;
2203 st = ic->streams[i];
2204 avctx = st->internal->avctx;
2206 // only for the split stuff
2207 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2208 st->parser = av_parser_init(st->codecpar->codec_id);
2209 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2210 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2213 /* check if the caller has overridden the codec id */
2214 #if FF_API_LAVF_AVCTX
2215 FF_DISABLE_DEPRECATION_WARNINGS
2216 if (st->codec->codec_id != st->internal->orig_codec_id) {
2217 st->codecpar->codec_id = st->codec->codec_id;
2218 st->codecpar->codec_type = st->codec->codec_type;
2219 st->internal->orig_codec_id = st->codec->codec_id;
2221 FF_ENABLE_DEPRECATION_WARNINGS
2223 if (st->codecpar->codec_id != st->internal->orig_codec_id)
2224 st->internal->orig_codec_id = st->codecpar->codec_id;
2226 ret = avcodec_parameters_to_context(avctx, st->codecpar);
2228 goto find_stream_info_err;
2229 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE &&
2230 st->codecpar->codec_id != AV_CODEC_ID_NONE)
2231 st->internal->avctx_inited = 1;
2233 #if FF_API_LAVF_AVCTX
2234 FF_DISABLE_DEPRECATION_WARNINGS
2235 codec = st->codec->codec ? st->codec->codec
2236 : avcodec_find_decoder(st->codecpar->codec_id);
2237 FF_ENABLE_DEPRECATION_WARNINGS
2239 codec = avcodec_find_decoder(st->codecpar->codec_id);
2242 /* Force thread count to 1 since the H.264 decoder will not extract
2243 * SPS and PPS to extradata during multi-threaded decoding. */
2244 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2246 /* Ensure that subtitle_header is properly set. */
2247 if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE
2248 && codec && !avctx->codec)
2249 avcodec_open2(avctx, codec,
2250 options ? &options[i] : &thread_opt);
2252 // Try to just open decoders, in case this is enough to get parameters.
2253 if (!has_codec_parameters(st)) {
2254 if (codec && !avctx->codec)
2255 avcodec_open2(avctx, codec,
2256 options ? &options[i] : &thread_opt);
2259 av_dict_free(&thread_opt);
2262 for (i = 0; i < ic->nb_streams; i++) {
2263 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2264 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2270 if (ff_check_interrupt(&ic->interrupt_callback)) {
2272 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2276 /* check if one codec still needs to be handled */
2277 for (i = 0; i < ic->nb_streams; i++) {
2278 int fps_analyze_framecount = 20;
2280 st = ic->streams[i];
2281 if (!has_codec_parameters(st))
2283 /* If the timebase is coarse (like the usual millisecond precision
2284 * of mkv), we need to analyze more frames to reliably arrive at
2285 * the correct fps. */
2286 if (av_q2d(st->time_base) > 0.0005)
2287 fps_analyze_framecount *= 2;
2288 if (ic->fps_probe_size >= 0)
2289 fps_analyze_framecount = ic->fps_probe_size;
2290 /* variable fps and no guess at the real fps */
2291 if (!st->avg_frame_rate.num &&
2292 st->codec_info_nb_frames < fps_analyze_framecount &&
2293 st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
2295 if (!st->codecpar->extradata &&
2296 !st->internal->avctx->extradata &&
2297 (!st->internal->extract_extradata.inited ||
2298 st->internal->extract_extradata.bsf))
2300 if (st->first_dts == AV_NOPTS_VALUE &&
2301 st->codec_info_nb_frames < ic->max_ts_probe &&
2302 (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
2303 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO))
2306 if (i == ic->nb_streams) {
2307 /* NOTE: If the format has no header, then we need to read some
2308 * packets to get most of the streams, so we cannot stop here. */
2309 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2310 /* If we found the info for all the codecs, we can stop. */
2312 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2316 /* We did not get all the codec info, but we read too much data. */
2317 if (read_size >= ic->probesize) {
2319 av_log(ic, AV_LOG_DEBUG,
2320 "Probe buffer size limit %d reached\n", ic->probesize);
2324 /* NOTE: A new stream can be added there if no header in file
2325 * (AVFMTCTX_NOHEADER). */
2326 ret = read_frame_internal(ic, &pkt1);
2327 if (ret == AVERROR(EAGAIN))
2332 AVPacket empty_pkt = { 0 };
2334 av_init_packet(&empty_pkt);
2336 /* We could not have all the codec parameters before EOF. */
2338 for (i = 0; i < ic->nb_streams; i++) {
2339 st = ic->streams[i];
2341 /* flush the decoders */
2342 if (st->info->found_decoder == 1) {
2344 err = try_decode_frame(ic, st, &empty_pkt,
2345 (options && i < orig_nb_streams)
2346 ? &options[i] : NULL);
2347 } while (err > 0 && !has_codec_parameters(st));
2351 av_log(ic, AV_LOG_WARNING,
2352 "decoding for stream %d failed\n", st->index);
2353 } else if (!has_codec_parameters(st)) {
2355 avcodec_string(buf, sizeof(buf), st->internal->avctx, 0);
2356 av_log(ic, AV_LOG_WARNING,
2357 "Could not find codec parameters (%s)\n", buf);
2367 if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) {
2368 ret = add_to_pktbuf(&ic->internal->packet_buffer, pkt,
2369 &ic->internal->packet_buffer_end, 0);
2371 goto find_stream_info_err;
2374 read_size += pkt->size;
2376 st = ic->streams[pkt->stream_index];
2377 avctx = st->internal->avctx;
2378 if (!st->internal->avctx_inited) {
2379 ret = avcodec_parameters_to_context(avctx, st->codecpar);
2381 goto find_stream_info_err;
2382 st->internal->avctx_inited = 1;
2385 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2386 /* check for non-increasing dts */
2387 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2388 st->info->fps_last_dts >= pkt->dts) {
2389 av_log(ic, AV_LOG_WARNING,
2390 "Non-increasing DTS in stream %d: packet %d with DTS "
2391 "%"PRId64", packet %d with DTS %"PRId64"\n",
2392 st->index, st->info->fps_last_dts_idx,
2393 st->info->fps_last_dts, st->codec_info_nb_frames,
2395 st->info->fps_first_dts =
2396 st->info->fps_last_dts = AV_NOPTS_VALUE;
2398 /* Check for a discontinuity in dts. If the difference in dts
2399 * is more than 1000 times the average packet duration in the
2400 * sequence, we treat it as a discontinuity. */
2401 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2402 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2403 (pkt->dts - st->info->fps_last_dts) / 1000 >
2404 (st->info->fps_last_dts - st->info->fps_first_dts) /
2405 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2406 av_log(ic, AV_LOG_WARNING,
2407 "DTS discontinuity in stream %d: packet %d with DTS "
2408 "%"PRId64", packet %d with DTS %"PRId64"\n",
2409 st->index, st->info->fps_last_dts_idx,
2410 st->info->fps_last_dts, st->codec_info_nb_frames,
2412 st->info->fps_first_dts =
2413 st->info->fps_last_dts = AV_NOPTS_VALUE;
2416 /* update stored dts values */
2417 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2418 st->info->fps_first_dts = pkt->dts;
2419 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2421 st->info->fps_last_dts = pkt->dts;
2422 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2424 /* check max_analyze_duration */
2425 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2426 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2427 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2428 ic->max_analyze_duration);
2429 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2430 av_packet_unref(pkt);
2434 if (!st->internal->avctx->extradata) {
2435 ret = extract_extradata(st, pkt);
2437 goto find_stream_info_err;
2440 /* If still no information, we try to open the codec and to
2441 * decompress the frame. We try to avoid that in most cases as
2442 * it takes longer and uses more memory. For MPEG-4, we need to
2443 * decompress for QuickTime.
2445 * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2446 * least one frame of codec data, this makes sure the codec initializes
2447 * the channel configuration and does not only trust the values from
2449 try_decode_frame(ic, st, pkt,
2450 (options && i < orig_nb_streams) ? &options[i] : NULL);
2452 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2453 av_packet_unref(pkt);
2455 st->codec_info_nb_frames++;
2459 // close codecs which were opened in try_decode_frame()
2460 for (i = 0; i < ic->nb_streams; i++) {
2461 st = ic->streams[i];
2462 avcodec_close(st->internal->avctx);
2464 for (i = 0; i < ic->nb_streams; i++) {
2465 st = ic->streams[i];
2466 avctx = st->internal->avctx;
2467 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2468 /* estimate average framerate if not set by demuxer */
2469 if (!st->avg_frame_rate.num &&
2470 st->info->fps_last_dts != st->info->fps_first_dts) {
2471 int64_t delta_dts = st->info->fps_last_dts -
2472 st->info->fps_first_dts;
2473 int delta_packets = st->info->fps_last_dts_idx -
2474 st->info->fps_first_dts_idx;
2476 double best_error = 0.01;
2478 if (delta_dts >= INT64_MAX / st->time_base.num ||
2479 delta_packets >= INT64_MAX / st->time_base.den ||
2482 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2483 delta_packets * (int64_t) st->time_base.den,
2484 delta_dts * (int64_t) st->time_base.num, 60000);
2486 /* Round guessed framerate to a "standard" framerate if it's
2487 * within 1% of the original estimate. */
2488 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2489 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2490 double error = fabs(av_q2d(st->avg_frame_rate) /
2491 av_q2d(std_fps) - 1);
2493 if (error < best_error) {
2495 best_fps = std_fps.num;
2499 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2500 best_fps, 12 * 1001, INT_MAX);
2502 } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2503 if (!avctx->bits_per_coded_sample)
2504 avctx->bits_per_coded_sample =
2505 av_get_bits_per_sample(avctx->codec_id);
2506 // set stream disposition based on audio service type
2507 switch (avctx->audio_service_type) {
2508 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2509 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2511 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2512 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2514 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2515 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2517 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2518 st->disposition = AV_DISPOSITION_COMMENT;
2520 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2521 st->disposition = AV_DISPOSITION_KARAOKE;
2527 compute_chapters_end(ic);
2529 /* update the stream parameters from the internal codec contexts */
2530 for (i = 0; i < ic->nb_streams; i++) {
2531 st = ic->streams[i];
2532 if (!st->internal->avctx_inited)
2535 ret = avcodec_parameters_from_context(st->codecpar, st->internal->avctx);
2537 goto find_stream_info_err;
2539 #if FF_API_LAVF_AVCTX
2540 FF_DISABLE_DEPRECATION_WARNINGS
2541 ret = avcodec_parameters_to_context(st->codec, st->codecpar);
2543 goto find_stream_info_err;
2545 if (st->internal->avctx->subtitle_header) {
2546 st->codec->subtitle_header = av_malloc(st->internal->avctx->subtitle_header_size);
2547 if (!st->codec->subtitle_header)
2548 goto find_stream_info_err;
2549 st->codec->subtitle_header_size = st->internal->avctx->subtitle_header_size;
2550 memcpy(st->codec->subtitle_header, st->internal->avctx->subtitle_header,
2551 st->codec->subtitle_header_size);
2553 FF_ENABLE_DEPRECATION_WARNINGS
2556 st->internal->avctx_inited = 0;
2559 estimate_timings(ic, old_offset);
2561 find_stream_info_err:
2562 for (i = 0; i < ic->nb_streams; i++) {
2563 av_freep(&ic->streams[i]->info);
2564 av_bsf_free(&ic->streams[i]->internal->extract_extradata.bsf);
2565 av_packet_free(&ic->streams[i]->internal->extract_extradata.pkt);
2570 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2574 for (i = 0; i < ic->nb_programs; i++)
2575 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2576 if (ic->programs[i]->stream_index[j] == s)
2577 return ic->programs[i];
2581 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2582 int wanted_stream_nb, int related_stream,
2583 AVCodec **decoder_ret, int flags)
2585 int i, nb_streams = ic->nb_streams;
2586 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2587 unsigned *program = NULL;
2588 AVCodec *decoder = NULL, *best_decoder = NULL;
2590 if (related_stream >= 0 && wanted_stream_nb < 0) {
2591 AVProgram *p = find_program_from_stream(ic, related_stream);
2593 program = p->stream_index;
2594 nb_streams = p->nb_stream_indexes;
2597 for (i = 0; i < nb_streams; i++) {
2598 int real_stream_index = program ? program[i] : i;
2599 AVStream *st = ic->streams[real_stream_index];
2600 AVCodecParameters *par = st->codecpar;
2601 if (par->codec_type != type)
2603 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2605 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2606 AV_DISPOSITION_VISUAL_IMPAIRED))
2609 decoder = avcodec_find_decoder(par->codec_id);
2612 ret = AVERROR_DECODER_NOT_FOUND;
2616 if (best_count >= st->codec_info_nb_frames)
2618 best_count = st->codec_info_nb_frames;
2619 ret = real_stream_index;
2620 best_decoder = decoder;
2621 if (program && i == nb_streams - 1 && ret < 0) {
2623 nb_streams = ic->nb_streams;
2624 /* no related stream found, try again with everything */
2629 *decoder_ret = best_decoder;
2633 /*******************************************************/
2635 int av_read_play(AVFormatContext *s)
2637 if (s->iformat->read_play)
2638 return s->iformat->read_play(s);
2640 return avio_pause(s->pb, 0);
2641 return AVERROR(ENOSYS);
2644 int av_read_pause(AVFormatContext *s)
2646 if (s->iformat->read_pause)
2647 return s->iformat->read_pause(s);
2649 return avio_pause(s->pb, 1);
2650 return AVERROR(ENOSYS);
2653 static void free_stream(AVStream **pst)
2655 AVStream *st = *pst;
2661 for (i = 0; i < st->nb_side_data; i++)
2662 av_freep(&st->side_data[i].data);
2663 av_freep(&st->side_data);
2666 av_parser_close(st->parser);
2668 if (st->attached_pic.data)
2669 av_packet_unref(&st->attached_pic);
2672 avcodec_free_context(&st->internal->avctx);
2673 av_bsf_free(&st->internal->extract_extradata.bsf);
2674 av_packet_free(&st->internal->extract_extradata.pkt);
2676 av_freep(&st->internal);
2678 av_dict_free(&st->metadata);
2679 avcodec_parameters_free(&st->codecpar);
2680 av_freep(&st->probe_data.buf);
2681 av_free(st->index_entries);
2682 #if FF_API_LAVF_AVCTX
2683 FF_DISABLE_DEPRECATION_WARNINGS
2684 av_free(st->codec->extradata);
2685 av_free(st->codec->subtitle_header);
2687 FF_ENABLE_DEPRECATION_WARNINGS
2689 av_free(st->priv_data);
2695 void avformat_free_context(AVFormatContext *s)
2703 if (s->iformat && s->iformat->priv_class && s->priv_data)
2704 av_opt_free(s->priv_data);
2706 for (i = 0; i < s->nb_streams; i++)
2707 free_stream(&s->streams[i]);
2709 for (i = s->nb_programs - 1; i >= 0; i--) {
2710 av_dict_free(&s->programs[i]->metadata);
2711 av_freep(&s->programs[i]->stream_index);
2712 av_freep(&s->programs[i]);
2714 av_freep(&s->programs);
2715 av_freep(&s->priv_data);
2716 while (s->nb_chapters--) {
2717 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2718 av_free(s->chapters[s->nb_chapters]);
2720 av_freep(&s->chapters);
2721 av_dict_free(&s->metadata);
2722 av_freep(&s->streams);
2723 av_freep(&s->internal);
2727 void avformat_close_input(AVFormatContext **ps)
2729 AVFormatContext *s = *ps;
2730 AVIOContext *pb = s->pb;
2732 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2733 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2736 flush_packet_queue(s);
2739 if (s->iformat->read_close)
2740 s->iformat->read_close(s);
2742 avformat_free_context(s);
2749 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2754 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2755 sizeof(*s->streams)) < 0) {
2760 st = av_mallocz(sizeof(AVStream));
2763 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2768 #if FF_API_LAVF_AVCTX
2769 FF_DISABLE_DEPRECATION_WARNINGS
2770 st->codec = avcodec_alloc_context3(c);
2776 FF_ENABLE_DEPRECATION_WARNINGS
2779 st->internal = av_mallocz(sizeof(*st->internal));
2784 #if FF_API_LAVF_AVCTX
2785 FF_DISABLE_DEPRECATION_WARNINGS
2786 /* no default bitrate if decoding */
2787 st->codec->bit_rate = 0;
2788 FF_ENABLE_DEPRECATION_WARNINGS
2791 /* default pts setting is MPEG-like */
2792 avpriv_set_pts_info(st, 33, 1, 90000);
2793 /* we set the current DTS to 0 so that formats without any timestamps
2794 * but durations get some timestamps, formats with some unknown
2795 * timestamps have their first few packets buffered and the
2796 * timestamps corrected before they are returned to the user */
2799 st->cur_dts = AV_NOPTS_VALUE;
2802 st->codecpar = avcodec_parameters_alloc();
2806 st->internal->avctx = avcodec_alloc_context3(NULL);
2807 if (!st->internal->avctx)
2810 st->index = s->nb_streams;
2811 st->start_time = AV_NOPTS_VALUE;
2812 st->duration = AV_NOPTS_VALUE;
2813 st->first_dts = AV_NOPTS_VALUE;
2814 st->probe_packets = MAX_PROBE_PACKETS;
2816 st->last_IP_pts = AV_NOPTS_VALUE;
2817 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2818 st->pts_buffer[i] = AV_NOPTS_VALUE;
2820 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2822 st->info->fps_first_dts = AV_NOPTS_VALUE;
2823 st->info->fps_last_dts = AV_NOPTS_VALUE;
2825 #if FF_API_LAVF_AVCTX
2826 st->internal->need_codec_update = 1;
2829 s->streams[s->nb_streams++] = st;
2836 AVProgram *av_new_program(AVFormatContext *ac, int id)
2838 AVProgram *program = NULL;
2841 av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
2843 for (i = 0; i < ac->nb_programs; i++)
2844 if (ac->programs[i]->id == id)
2845 program = ac->programs[i];
2848 program = av_mallocz(sizeof(AVProgram));
2851 dynarray_add(&ac->programs, &ac->nb_programs, program);
2852 program->discard = AVDISCARD_NONE;
2859 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2860 int64_t start, int64_t end, const char *title)
2862 AVChapter *chapter = NULL;
2865 for (i = 0; i < s->nb_chapters; i++)
2866 if (s->chapters[i]->id == id)
2867 chapter = s->chapters[i];
2870 chapter = av_mallocz(sizeof(AVChapter));
2873 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2875 av_dict_set(&chapter->metadata, "title", title, 0);
2877 chapter->time_base = time_base;
2878 chapter->start = start;
2884 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2887 AVProgram *program = NULL;
2889 if (idx >= ac->nb_streams) {
2890 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2894 for (i = 0; i < ac->nb_programs; i++) {
2895 if (ac->programs[i]->id != progid)
2897 program = ac->programs[i];
2898 for (j = 0; j < program->nb_stream_indexes; j++)
2899 if (program->stream_index[j] == idx)
2902 if (av_reallocp_array(&program->stream_index,
2903 program->nb_stream_indexes + 1,
2904 sizeof(*program->stream_index)) < 0) {
2905 program->nb_stream_indexes = 0;
2908 program->stream_index[program->nb_stream_indexes++] = idx;
2913 uint64_t ff_ntp_time(void)
2915 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2918 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2921 char *q, buf1[20], c;
2922 int nd, len, percentd_found;
2934 while (av_isdigit(*p))
2935 nd = nd * 10 + *p++ - '0';
2937 } while (av_isdigit(c));
2946 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2948 if ((q - buf + len) > buf_size - 1)
2950 memcpy(q, buf1, len);
2958 if ((q - buf) < buf_size - 1)
2962 if (!percentd_found)
2971 void av_url_split(char *proto, int proto_size,
2972 char *authorization, int authorization_size,
2973 char *hostname, int hostname_size,
2974 int *port_ptr, char *path, int path_size, const char *url)
2976 const char *p, *ls, *at, *col, *brk;
2982 if (authorization_size > 0)
2983 authorization[0] = 0;
2984 if (hostname_size > 0)
2989 /* parse protocol */
2990 if ((p = strchr(url, ':'))) {
2991 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2998 /* no protocol means plain filename */
2999 av_strlcpy(path, url, path_size);
3003 /* separate path from hostname */
3004 ls = strchr(p, '/');
3006 ls = strchr(p, '?');
3008 av_strlcpy(path, ls, path_size);
3010 ls = &p[strlen(p)]; // XXX
3012 /* the rest is hostname, use that to parse auth/port */
3014 /* authorization (user[:pass]@hostname) */
3015 if ((at = strchr(p, '@')) && at < ls) {
3016 av_strlcpy(authorization, p,
3017 FFMIN(authorization_size, at + 1 - p));
3018 p = at + 1; /* skip '@' */
3021 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3023 av_strlcpy(hostname, p + 1,
3024 FFMIN(hostname_size, brk - p));
3025 if (brk[1] == ':' && port_ptr)
3026 *port_ptr = atoi(brk + 2);
3027 } else if ((col = strchr(p, ':')) && col < ls) {
3028 av_strlcpy(hostname, p,
3029 FFMIN(col + 1 - p, hostname_size));
3031 *port_ptr = atoi(col + 1);
3033 av_strlcpy(hostname, p,
3034 FFMIN(ls + 1 - p, hostname_size));
3038 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3041 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3044 'C', 'D', 'E', 'F' };
3045 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3048 'c', 'd', 'e', 'f' };
3049 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3051 for (i = 0; i < s; i++) {
3052 buff[i * 2] = hex_table[src[i] >> 4];
3053 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3059 int ff_hex_to_data(uint8_t *data, const char *p)
3066 p += strspn(p, SPACE_CHARS);
3069 c = av_toupper((unsigned char) *p++);
3070 if (c >= '0' && c <= '9')
3072 else if (c >= 'A' && c <= 'F')
3087 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3088 unsigned int pts_num, unsigned int pts_den)
3091 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
3092 if (new_tb.num != pts_num)
3093 av_log(NULL, AV_LOG_DEBUG,
3094 "st:%d removing common factor %d from timebase\n",
3095 s->index, pts_num / new_tb.num);
3097 av_log(NULL, AV_LOG_WARNING,
3098 "st:%d has too large timebase, reducing\n", s->index);
3100 if (new_tb.num <= 0 || new_tb.den <= 0) {
3101 av_log(NULL, AV_LOG_ERROR,
3102 "Ignoring attempt to set invalid timebase for st:%d\n",
3106 s->time_base = new_tb;
3107 s->pts_wrap_bits = pts_wrap_bits;
3110 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3113 const char *ptr = str;
3115 /* Parse key=value pairs. */
3118 char *dest = NULL, *dest_end;
3119 int key_len, dest_len = 0;
3121 /* Skip whitespace and potential commas. */
3122 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3129 if (!(ptr = strchr(key, '=')))
3132 key_len = ptr - key;
3134 callback_get_buf(context, key, key_len, &dest, &dest_len);
3135 dest_end = dest + dest_len - 1;
3139 while (*ptr && *ptr != '\"') {
3143 if (dest && dest < dest_end)
3147 if (dest && dest < dest_end)
3155 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3156 if (dest && dest < dest_end)
3164 int ff_find_stream_index(AVFormatContext *s, int id)
3167 for (i = 0; i < s->nb_streams; i++)
3168 if (s->streams[i]->id == id)
3173 int64_t ff_iso8601_to_unix_time(const char *datestr)
3175 struct tm time1 = { 0 }, time2 = { 0 };
3176 const char *ret1, *ret2;
3177 ret1 = av_small_strptime(datestr, "%Y - %m - %d %T", &time1);
3178 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%T", &time2);
3180 return av_timegm(&time2);
3182 return av_timegm(&time1);
3185 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
3189 if (ofmt->query_codec)
3190 return ofmt->query_codec(codec_id, std_compliance);
3191 else if (ofmt->codec_tag)
3192 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
3193 else if (codec_id == ofmt->video_codec ||
3194 codec_id == ofmt->audio_codec ||
3195 codec_id == ofmt->subtitle_codec)
3198 return AVERROR_PATCHWELCOME;
3201 int avformat_network_init(void)
3205 ff_network_inited_globally = 1;
3206 if ((ret = ff_network_init()) < 0)
3213 int avformat_network_deinit(void)
3222 int ff_add_param_change(AVPacket *pkt, int32_t channels,
3223 uint64_t channel_layout, int32_t sample_rate,
3224 int32_t width, int32_t height)
3230 return AVERROR(EINVAL);
3233 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
3235 if (channel_layout) {
3237 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
3241 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
3243 if (width || height) {
3245 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
3247 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
3249 return AVERROR(ENOMEM);
3250 bytestream_put_le32(&data, flags);
3252 bytestream_put_le32(&data, channels);
3254 bytestream_put_le64(&data, channel_layout);
3256 bytestream_put_le32(&data, sample_rate);
3257 if (width || height) {
3258 bytestream_put_le32(&data, width);
3259 bytestream_put_le32(&data, height);
3264 int ff_generate_avci_extradata(AVStream *st)
3266 static const uint8_t avci100_1080p_extradata[] = {
3268 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3269 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3270 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3271 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3272 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3273 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3274 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3275 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3276 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3278 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3281 static const uint8_t avci100_1080i_extradata[] = {
3283 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3284 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3285 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3286 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3287 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3288 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3289 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3290 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3291 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3292 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3293 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3295 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3298 static const uint8_t avci50_1080i_extradata[] = {
3300 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3301 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3302 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3303 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3304 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3305 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3306 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3307 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3308 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3309 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3310 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3312 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3315 static const uint8_t avci100_720p_extradata[] = {
3317 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3318 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3319 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3320 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3321 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3322 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3323 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3324 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3325 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3326 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3328 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3332 const uint8_t *data = NULL;
3335 if (st->codecpar->width == 1920) {
3336 if (st->codecpar->field_order == AV_FIELD_PROGRESSIVE) {
3337 data = avci100_1080p_extradata;
3338 size = sizeof(avci100_1080p_extradata);
3340 data = avci100_1080i_extradata;
3341 size = sizeof(avci100_1080i_extradata);
3343 } else if (st->codecpar->width == 1440) {
3344 data = avci50_1080i_extradata;
3345 size = sizeof(avci50_1080i_extradata);
3346 } else if (st->codecpar->width == 1280) {
3347 data = avci100_720p_extradata;
3348 size = sizeof(avci100_720p_extradata);
3354 av_freep(&st->codecpar->extradata);
3355 st->codecpar->extradata_size = 0;
3356 st->codecpar->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
3357 if (!st->codecpar->extradata)
3358 return AVERROR(ENOMEM);
3360 memcpy(st->codecpar->extradata, data, size);
3361 st->codecpar->extradata_size = size;
3366 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3371 for (i = 0; i < st->nb_side_data; i++) {
3372 if (st->side_data[i].type == type) {
3374 *size = st->side_data[i].size;
3375 return st->side_data[i].data;
3381 uint8_t *av_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type,
3384 AVPacketSideData *sd, *tmp;
3386 uint8_t *data = av_malloc(size);
3391 for (i = 0; i < st->nb_side_data; i++) {
3392 sd = &st->side_data[i];
3394 if (sd->type == type) {
3395 av_freep(&sd->data);
3402 tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
3408 st->side_data = tmp;
3411 sd = &st->side_data[st->nb_side_data - 1];
3418 void ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
3421 s->io_close(s, *pb);