2 * various utility functions for use within Libav
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/parseutils.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/time.h"
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
42 #include "audiointerleave.h"
55 * various utility functions for use within Libav
58 unsigned avformat_version(void)
60 return LIBAVFORMAT_VERSION_INT;
63 const char *avformat_configuration(void)
65 return LIBAV_CONFIGURATION;
68 const char *avformat_license(void)
70 #define LICENSE_PREFIX "libavformat license: "
71 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
74 /* an arbitrarily chosen "sane" max packet size -- 50M */
75 #define SANE_CHUNK_SIZE (50000000)
77 /* Read the data in sane-sized chunks and append to pkt.
78 * Return the number of bytes read or an error. */
79 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
81 int64_t chunk_size = size;
82 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
83 int orig_size = pkt->size;
87 int prev_size = pkt->size;
90 /* When the caller requests a lot of data, limit it to the amount
91 * left in file or SANE_CHUNK_SIZE when it is not known. */
92 if (size > SANE_CHUNK_SIZE) {
93 int64_t filesize = avio_size(s) - avio_tell(s);
94 chunk_size = FFMAX(filesize, SANE_CHUNK_SIZE);
96 read_size = FFMIN(size, chunk_size);
98 ret = av_grow_packet(pkt, read_size);
102 ret = avio_read(s, pkt->data + prev_size, read_size);
103 if (ret != read_size) {
104 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
113 av_packet_unref(pkt);
114 return pkt->size > orig_size ? pkt->size - orig_size : ret;
117 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
122 pkt->pos = avio_tell(s);
124 return append_packet_chunked(s, pkt, size);
127 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
130 return av_get_packet(s, pkt, size);
131 return append_packet_chunked(s, pkt, size);
134 int av_filename_number_test(const char *filename)
138 (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
141 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
142 AVProbeData *pd, int score)
144 static const struct {
147 enum AVMediaType type;
149 { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO },
150 { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO },
151 { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO },
152 { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO },
153 { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO },
154 { "latm", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO },
155 { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO },
156 { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO },
157 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
160 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
164 av_log(s, AV_LOG_DEBUG,
165 "Probe with size=%d, packets=%d detected %s with score=%d\n",
166 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
168 for (i = 0; fmt_id_type[i].name; i++) {
169 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
170 st->codecpar->codec_id = fmt_id_type[i].id;
171 st->codecpar->codec_type = fmt_id_type[i].type;
172 #if FF_API_LAVF_AVCTX
173 FF_DISABLE_DEPRECATION_WARNINGS
174 st->codec->codec_type = st->codecpar->codec_type;
175 st->codec->codec_id = st->codecpar->codec_id;
176 FF_ENABLE_DEPRECATION_WARNINGS
185 /************************************************************/
186 /* input media file */
188 /* Open input file and probe the format if necessary. */
189 static int init_input(AVFormatContext *s, const char *filename,
190 AVDictionary **options)
193 AVProbeData pd = { filename, NULL, 0 };
196 s->flags |= AVFMT_FLAG_CUSTOM_IO;
198 return av_probe_input_buffer(s->pb, &s->iformat, filename,
200 else if (s->iformat->flags & AVFMT_NOFILE)
201 return AVERROR(EINVAL);
205 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
206 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0))))
209 ret = s->io_open(s, &s->pb, filename, AVIO_FLAG_READ, options);
214 return av_probe_input_buffer(s->pb, &s->iformat, filename,
218 static int add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
219 AVPacketList **plast_pktl, int ref)
221 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
225 return AVERROR(ENOMEM);
228 if ((ret = av_packet_ref(&pktl->pkt, pkt)) < 0) {
237 (*plast_pktl)->next = pktl;
239 *packet_buffer = pktl;
241 /* Add the packet in the buffered packet list. */
246 static int queue_attached_pictures(AVFormatContext *s)
249 for (i = 0; i < s->nb_streams; i++)
250 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
251 s->streams[i]->discard < AVDISCARD_ALL) {
253 ret = add_to_pktbuf(&s->internal->raw_packet_buffer,
254 &s->streams[i]->attached_pic,
255 &s->internal->raw_packet_buffer_end, 1);
262 #if FF_API_LAVF_AVCTX
263 FF_DISABLE_DEPRECATION_WARNINGS
264 static int update_stream_avctx(AVFormatContext *s)
267 for (i = 0; i < s->nb_streams; i++) {
268 AVStream *st = s->streams[i];
270 if (!st->internal->need_codec_update)
273 ret = avcodec_parameters_to_context(st->codec, st->codecpar);
277 st->internal->need_codec_update = 0;
281 FF_ENABLE_DEPRECATION_WARNINGS
284 int avformat_open_input(AVFormatContext **ps, const char *filename,
285 AVInputFormat *fmt, AVDictionary **options)
287 AVFormatContext *s = *ps;
289 AVDictionary *tmp = NULL;
290 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
292 if (!s && !(s = avformat_alloc_context()))
293 return AVERROR(ENOMEM);
298 av_dict_copy(&tmp, *options, 0);
300 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
303 if ((ret = init_input(s, filename, &tmp)) < 0)
306 /* Check filename in case an image number is expected. */
307 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
308 if (!av_filename_number_test(filename)) {
309 ret = AVERROR(EINVAL);
314 s->duration = s->start_time = AV_NOPTS_VALUE;
315 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
317 /* Allocate private data. */
318 if (s->iformat->priv_data_size > 0) {
319 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
320 ret = AVERROR(ENOMEM);
323 if (s->iformat->priv_class) {
324 *(const AVClass **) s->priv_data = s->iformat->priv_class;
325 av_opt_set_defaults(s->priv_data);
326 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
331 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
333 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
335 if (s->iformat->read_header)
336 if ((ret = s->iformat->read_header(s)) < 0)
339 if (id3v2_extra_meta &&
340 (ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
342 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
344 if ((ret = queue_attached_pictures(s)) < 0)
347 if (s->pb && !s->internal->data_offset)
348 s->internal->data_offset = avio_tell(s->pb);
350 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
352 #if FF_API_LAVF_AVCTX
353 update_stream_avctx(s);
356 for (i = 0; i < s->nb_streams; i++)
357 s->streams[i]->internal->orig_codec_id = s->streams[i]->codecpar->codec_id;
360 av_dict_free(options);
367 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
369 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
371 avformat_free_context(s);
376 /*******************************************************/
378 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
380 if (st->codecpar->codec_id == AV_CODEC_ID_PROBE) {
381 AVProbeData *pd = &st->probe_data;
382 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
387 if ((err = av_reallocp(&pd->buf, pd->buf_size + pkt->size +
388 AVPROBE_PADDING_SIZE)) < 0)
390 memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
391 pd->buf_size += pkt->size;
392 memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
394 st->probe_packets = 0;
396 av_log(s, AV_LOG_ERROR,
397 "nothing to probe for stream %d\n", st->index);
402 if (!st->probe_packets ||
403 av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
404 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0
405 ? AVPROBE_SCORE_MAX / 4 : 0);
406 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE) {
409 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
416 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
422 AVPacketList *pktl = s->internal->raw_packet_buffer;
426 st = s->streams[pkt->stream_index];
427 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE ||
428 !st->probe_packets ||
429 s->internal->raw_packet_buffer_remaining_size < pkt->size) {
431 if (st->probe_packets)
432 if ((err = probe_codec(s, st, NULL)) < 0)
434 pd = &st->probe_data;
437 s->internal->raw_packet_buffer = pktl->next;
438 s->internal->raw_packet_buffer_remaining_size += pkt->size;
447 ret = s->iformat->read_packet(s, pkt);
449 if (!pktl || ret == AVERROR(EAGAIN))
451 for (i = 0; i < s->nb_streams; i++) {
453 if (st->probe_packets)
454 if ((err = probe_codec(s, st, NULL)) < 0)
461 AVPacket tmp = { 0 };
462 ret = av_packet_ref(&tmp, pkt);
468 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
469 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
470 av_log(s, AV_LOG_WARNING,
471 "Dropped corrupted packet (stream = %d)\n",
473 av_packet_unref(pkt);
477 st = s->streams[pkt->stream_index];
479 switch (st->codecpar->codec_type) {
480 case AVMEDIA_TYPE_VIDEO:
481 if (s->video_codec_id)
482 st->codecpar->codec_id = s->video_codec_id;
484 case AVMEDIA_TYPE_AUDIO:
485 if (s->audio_codec_id)
486 st->codecpar->codec_id = s->audio_codec_id;
488 case AVMEDIA_TYPE_SUBTITLE:
489 if (s->subtitle_codec_id)
490 st->codecpar->codec_id = s->subtitle_codec_id;
494 if (!pktl && (st->codecpar->codec_id != AV_CODEC_ID_PROBE ||
498 err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
499 &s->internal->raw_packet_buffer_end, 0);
502 s->internal->raw_packet_buffer_remaining_size -= pkt->size;
504 if ((err = probe_codec(s, st, pkt)) < 0)
509 /**********************************************************/
512 * Return the frame duration in seconds. Return 0 if not available.
514 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
515 AVCodecParserContext *pc, AVPacket *pkt)
517 AVRational codec_framerate = s->iformat ? st->internal->avctx->framerate :
518 (AVRational){ 0, 1 };
523 switch (st->codecpar->codec_type) {
524 case AVMEDIA_TYPE_VIDEO:
525 if (st->avg_frame_rate.num) {
526 *pnum = st->avg_frame_rate.den;
527 *pden = st->avg_frame_rate.num;
528 } else if (st->time_base.num * 1000LL > st->time_base.den) {
529 *pnum = st->time_base.num;
530 *pden = st->time_base.den;
531 } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
532 *pnum = codec_framerate.den;
533 *pden = codec_framerate.num;
534 if (pc && pc->repeat_pict) {
535 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
536 *pden /= 1 + pc->repeat_pict;
538 *pnum *= 1 + pc->repeat_pict;
540 /* If this codec can be interlaced or progressive then we need
541 * a parser to compute duration of a packet. Thus if we have
542 * no parser in such case leave duration undefined. */
543 if (st->internal->avctx->ticks_per_frame > 1 && !pc)
547 case AVMEDIA_TYPE_AUDIO:
548 frame_size = av_get_audio_frame_duration2(st->codecpar, pkt->size);
549 if (frame_size <= 0 || st->codecpar->sample_rate <= 0)
552 *pden = st->codecpar->sample_rate;
559 static int is_intra_only(enum AVCodecID id)
561 const AVCodecDescriptor *d = avcodec_descriptor_get(id);
564 if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
569 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
570 int64_t dts, int64_t pts)
572 AVStream *st = s->streams[stream_index];
573 AVPacketList *pktl = s->internal->packet_buffer;
575 if (st->first_dts != AV_NOPTS_VALUE ||
576 dts == AV_NOPTS_VALUE ||
577 st->cur_dts == AV_NOPTS_VALUE)
580 st->first_dts = dts - st->cur_dts;
583 for (; pktl; pktl = pktl->next) {
584 if (pktl->pkt.stream_index != stream_index)
586 // FIXME: think more about this check
587 if (pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
588 pktl->pkt.pts += st->first_dts;
590 if (pktl->pkt.dts != AV_NOPTS_VALUE)
591 pktl->pkt.dts += st->first_dts;
593 if (st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
594 st->start_time = pktl->pkt.pts;
596 if (st->start_time == AV_NOPTS_VALUE)
597 st->start_time = pts;
600 static void update_initial_durations(AVFormatContext *s, AVStream *st,
601 int stream_index, int duration)
603 AVPacketList *pktl = s->internal->packet_buffer;
606 if (st->first_dts != AV_NOPTS_VALUE) {
607 cur_dts = st->first_dts;
608 for (; pktl; pktl = pktl->next) {
609 if (pktl->pkt.stream_index == stream_index) {
610 if (pktl->pkt.pts != pktl->pkt.dts ||
611 pktl->pkt.dts != AV_NOPTS_VALUE ||
617 pktl = s->internal->packet_buffer;
618 st->first_dts = cur_dts;
619 } else if (st->cur_dts)
622 for (; pktl; pktl = pktl->next) {
623 if (pktl->pkt.stream_index != stream_index)
625 if (pktl->pkt.pts == pktl->pkt.dts &&
626 pktl->pkt.dts == AV_NOPTS_VALUE &&
627 !pktl->pkt.duration) {
628 pktl->pkt.dts = cur_dts;
629 if (!st->internal->avctx->has_b_frames)
630 pktl->pkt.pts = cur_dts;
632 if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
633 pktl->pkt.duration = duration;
637 if (st->first_dts == AV_NOPTS_VALUE)
638 st->cur_dts = cur_dts;
641 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
642 AVCodecParserContext *pc, AVPacket *pkt)
644 int num, den, presentation_delayed, delay, i;
647 if (s->flags & AVFMT_FLAG_NOFILLIN)
650 if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
651 pkt->dts = AV_NOPTS_VALUE;
653 /* do we have a video B-frame ? */
654 delay = st->internal->avctx->has_b_frames;
655 presentation_delayed = 0;
657 /* XXX: need has_b_frame, but cannot get it if the codec is
660 pc && pc->pict_type != AV_PICTURE_TYPE_B)
661 presentation_delayed = 1;
663 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
664 st->pts_wrap_bits < 63 &&
665 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
666 pkt->dts -= 1LL << st->pts_wrap_bits;
669 /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
670 * We take the conservative approach and discard both.
671 * Note: If this is misbehaving for an H.264 file, then possibly
672 * presentation_delayed is not set correctly. */
673 if (delay == 1 && pkt->dts == pkt->pts &&
674 pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
675 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
676 pkt->dts = AV_NOPTS_VALUE;
679 if (pkt->duration == 0 && st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
680 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
682 pkt->duration = av_rescale_rnd(1, num * (int64_t) st->time_base.den,
683 den * (int64_t) st->time_base.num,
686 if (pkt->duration != 0 && s->internal->packet_buffer)
687 update_initial_durations(s, st, pkt->stream_index,
692 /* Correct timestamps with byte offset if demuxers only have timestamps
693 * on packet boundaries */
694 if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
695 /* this will estimate bitrate based on this frame's duration and size */
696 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
697 if (pkt->pts != AV_NOPTS_VALUE)
699 if (pkt->dts != AV_NOPTS_VALUE)
703 /* This may be redundant, but it should not hurt. */
704 if (pkt->dts != AV_NOPTS_VALUE &&
705 pkt->pts != AV_NOPTS_VALUE &&
707 presentation_delayed = 1;
709 av_log(NULL, AV_LOG_TRACE,
710 "IN delayed:%d pts:%"PRId64", dts:%"PRId64" "
711 "cur_dts:%"PRId64" st:%d pc:%p\n",
712 presentation_delayed, pkt->pts, pkt->dts, st->cur_dts,
713 pkt->stream_index, pc);
714 /* Interpolate PTS and DTS if they are not present. We skip H.264
715 * currently because delay and has_b_frames are not reliably set. */
716 if ((delay == 0 || (delay == 1 && pc)) &&
717 st->codecpar->codec_id != AV_CODEC_ID_H264) {
718 if (presentation_delayed) {
719 /* DTS = decompression timestamp */
720 /* PTS = presentation timestamp */
721 if (pkt->dts == AV_NOPTS_VALUE)
722 pkt->dts = st->last_IP_pts;
723 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
724 if (pkt->dts == AV_NOPTS_VALUE)
725 pkt->dts = st->cur_dts;
727 /* This is tricky: the dts must be incremented by the duration
728 * of the frame we are displaying, i.e. the last I- or P-frame. */
729 if (st->last_IP_duration == 0)
730 st->last_IP_duration = pkt->duration;
731 if (pkt->dts != AV_NOPTS_VALUE)
732 st->cur_dts = pkt->dts + st->last_IP_duration;
733 st->last_IP_duration = pkt->duration;
734 st->last_IP_pts = pkt->pts;
735 /* Cannot compute PTS if not present (we can compute it only
736 * by knowing the future. */
737 } else if (pkt->pts != AV_NOPTS_VALUE ||
738 pkt->dts != AV_NOPTS_VALUE ||
740 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
741 int duration = pkt->duration;
742 if (!duration && st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
743 ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
745 duration = av_rescale_rnd(1,
746 num * (int64_t) st->time_base.den,
747 den * (int64_t) st->time_base.num,
749 if (duration != 0 && s->internal->packet_buffer)
750 update_initial_durations(s, st, pkt->stream_index,
755 if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
757 /* presentation is not delayed : PTS and DTS are the same */
758 if (pkt->pts == AV_NOPTS_VALUE)
760 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
762 if (pkt->pts == AV_NOPTS_VALUE)
763 pkt->pts = st->cur_dts;
765 if (pkt->pts != AV_NOPTS_VALUE)
766 st->cur_dts = pkt->pts + duration;
771 if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
772 st->pts_buffer[0] = pkt->pts;
773 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
774 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
775 if (pkt->dts == AV_NOPTS_VALUE)
776 pkt->dts = st->pts_buffer[0];
777 // We skipped it above so we try here.
778 if (st->codecpar->codec_id == AV_CODEC_ID_H264)
779 // This should happen on the first packet
780 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
781 if (pkt->dts > st->cur_dts)
782 st->cur_dts = pkt->dts;
785 av_log(NULL, AV_LOG_TRACE,
786 "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n",
787 presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
790 if (is_intra_only(st->codecpar->codec_id))
791 pkt->flags |= AV_PKT_FLAG_KEY;
792 #if FF_API_CONVERGENCE_DURATION
793 FF_DISABLE_DEPRECATION_WARNINGS
795 pkt->convergence_duration = pc->convergence_duration;
796 FF_ENABLE_DEPRECATION_WARNINGS
800 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
803 AVPacketList *pktl = *pkt_buf;
804 *pkt_buf = pktl->next;
805 av_packet_unref(&pktl->pkt);
812 * Parse a packet, add all split parts to parse_queue.
814 * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
816 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
818 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
819 AVStream *st = s->streams[stream_index];
820 uint8_t *data = pkt ? pkt->data : NULL;
821 int size = pkt ? pkt->size : 0;
822 int ret = 0, got_output = 0;
825 av_init_packet(&flush_pkt);
830 while (size > 0 || (pkt == &flush_pkt && got_output)) {
833 av_init_packet(&out_pkt);
834 len = av_parser_parse2(st->parser, st->internal->avctx,
835 &out_pkt.data, &out_pkt.size, data, size,
836 pkt->pts, pkt->dts, pkt->pos);
838 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
839 /* increment read pointer */
843 got_output = !!out_pkt.size;
848 if (pkt->side_data) {
849 out_pkt.side_data = pkt->side_data;
850 out_pkt.side_data_elems = pkt->side_data_elems;
851 pkt->side_data = NULL;
852 pkt->side_data_elems = 0;
855 /* set the duration */
856 out_pkt.duration = 0;
857 if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
858 if (st->internal->avctx->sample_rate > 0) {
860 av_rescale_q_rnd(st->parser->duration,
861 (AVRational) { 1, st->internal->avctx->sample_rate },
867 out_pkt.stream_index = st->index;
868 out_pkt.pts = st->parser->pts;
869 out_pkt.dts = st->parser->dts;
870 out_pkt.pos = st->parser->pos;
872 if (st->parser->key_frame == 1 ||
873 (st->parser->key_frame == -1 &&
874 st->parser->pict_type == AV_PICTURE_TYPE_I))
875 out_pkt.flags |= AV_PKT_FLAG_KEY;
877 compute_pkt_fields(s, st, st->parser, &out_pkt);
879 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
880 out_pkt.flags & AV_PKT_FLAG_KEY) {
881 ff_reduce_index(s, st->index);
882 av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
883 0, 0, AVINDEX_KEYFRAME);
886 if ((ret = add_to_pktbuf(&s->internal->parse_queue, &out_pkt,
887 &s->internal->parse_queue_end,
889 av_packet_unref(&out_pkt);
894 /* end of the stream => close and free the parser */
895 if (pkt == &flush_pkt) {
896 av_parser_close(st->parser);
901 av_packet_unref(pkt);
905 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
906 AVPacketList **pkt_buffer_end,
910 av_assert0(*pkt_buffer);
913 *pkt_buffer = pktl->next;
915 *pkt_buffer_end = NULL;
920 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
922 int ret = 0, i, got_packet = 0;
923 AVDictionary *metadata = NULL;
927 while (!got_packet && !s->internal->parse_queue) {
931 /* read next packet */
932 ret = ff_read_packet(s, &cur_pkt);
934 if (ret == AVERROR(EAGAIN))
936 /* flush the parsers */
937 for (i = 0; i < s->nb_streams; i++) {
939 if (st->parser && st->need_parsing)
940 parse_packet(s, NULL, st->index);
942 /* all remaining packets are now in parse_queue =>
943 * really terminate parsing */
947 st = s->streams[cur_pkt.stream_index];
949 if (cur_pkt.pts != AV_NOPTS_VALUE &&
950 cur_pkt.dts != AV_NOPTS_VALUE &&
951 cur_pkt.pts < cur_pkt.dts) {
952 av_log(s, AV_LOG_WARNING,
953 "Invalid timestamps stream=%d, pts=%"PRId64", "
954 "dts=%"PRId64", size=%d\n",
955 cur_pkt.stream_index, cur_pkt.pts,
956 cur_pkt.dts, cur_pkt.size);
958 if (s->debug & FF_FDEBUG_TS)
959 av_log(s, AV_LOG_DEBUG,
960 "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", "
961 "size=%d, duration=%"PRId64", flags=%d\n",
962 cur_pkt.stream_index, cur_pkt.pts, cur_pkt.dts,
963 cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
965 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
966 st->parser = av_parser_init(st->codecpar->codec_id);
968 /* no parser available: just output the raw packets */
969 st->need_parsing = AVSTREAM_PARSE_NONE;
970 else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
971 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
972 else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
973 st->parser->flags |= PARSER_FLAG_ONCE;
976 if (!st->need_parsing || !st->parser) {
977 /* no parsing needed: we just output the packet as is */
979 compute_pkt_fields(s, st, NULL, pkt);
980 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
981 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
982 ff_reduce_index(s, st->index);
983 av_add_index_entry(st, pkt->pos, pkt->dts,
984 0, 0, AVINDEX_KEYFRAME);
987 } else if (st->discard < AVDISCARD_ALL) {
988 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
992 av_packet_unref(&cur_pkt);
996 if (!got_packet && s->internal->parse_queue)
997 ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);
999 av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
1001 s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
1002 av_dict_copy(&s->metadata, metadata, 0);
1003 av_dict_free(&metadata);
1004 av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
1007 #if FF_API_LAVF_AVCTX
1008 update_stream_avctx(s);
1011 if (s->debug & FF_FDEBUG_TS)
1012 av_log(s, AV_LOG_DEBUG,
1013 "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", "
1014 "size=%d, duration=%"PRId64", flags=%d\n",
1015 pkt->stream_index, pkt->pts, pkt->dts,
1016 pkt->size, pkt->duration, pkt->flags);
1021 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1023 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1027 return s->internal->packet_buffer
1028 ? read_from_packet_buffer(&s->internal->packet_buffer,
1029 &s->internal->packet_buffer_end, pkt)
1030 : read_frame_internal(s, pkt);
1034 AVPacketList *pktl = s->internal->packet_buffer;
1037 AVPacket *next_pkt = &pktl->pkt;
1039 if (next_pkt->dts != AV_NOPTS_VALUE) {
1040 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1041 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1042 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1043 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0) &&
1044 av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1046 next_pkt->pts = pktl->pkt.dts;
1050 pktl = s->internal->packet_buffer;
1053 /* read packet from packet buffer, if there is data */
1054 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1055 next_pkt->dts != AV_NOPTS_VALUE && !eof))
1056 return read_from_packet_buffer(&s->internal->packet_buffer,
1057 &s->internal->packet_buffer_end, pkt);
1060 ret = read_frame_internal(s, pkt);
1062 if (pktl && ret != AVERROR(EAGAIN)) {
1069 ret = add_to_pktbuf(&s->internal->packet_buffer, pkt,
1070 &s->internal->packet_buffer_end, 1);
1076 /* XXX: suppress the packet queue */
1077 static void flush_packet_queue(AVFormatContext *s)
1079 free_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end);
1080 free_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end);
1081 free_packet_buffer(&s->internal->raw_packet_buffer, &s->internal->raw_packet_buffer_end);
1083 s->internal->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1086 /*******************************************************/
1089 int av_find_default_stream_index(AVFormatContext *s)
1091 int first_audio_index = -1;
1095 if (s->nb_streams <= 0)
1097 for (i = 0; i < s->nb_streams; i++) {
1099 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1100 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1103 if (first_audio_index < 0 &&
1104 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
1105 first_audio_index = i;
1107 return first_audio_index >= 0 ? first_audio_index : 0;
1110 /** Flush the frame reader. */
1111 void ff_read_frame_flush(AVFormatContext *s)
1116 flush_packet_queue(s);
1118 /* Reset read state for each stream. */
1119 for (i = 0; i < s->nb_streams; i++) {
1123 av_parser_close(st->parser);
1126 st->last_IP_pts = AV_NOPTS_VALUE;
1127 /* We set the current DTS to an unspecified origin. */
1128 st->cur_dts = AV_NOPTS_VALUE;
1130 st->probe_packets = MAX_PROBE_PACKETS;
1132 for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1133 st->pts_buffer[j] = AV_NOPTS_VALUE;
1137 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1141 for (i = 0; i < s->nb_streams; i++) {
1142 AVStream *st = s->streams[i];
1145 av_rescale(timestamp,
1146 st->time_base.den * (int64_t) ref_st->time_base.num,
1147 st->time_base.num * (int64_t) ref_st->time_base.den);
1151 void ff_reduce_index(AVFormatContext *s, int stream_index)
1153 AVStream *st = s->streams[stream_index];
1154 unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1156 if ((unsigned) st->nb_index_entries >= max_entries) {
1158 for (i = 0; 2 * i < st->nb_index_entries; i++)
1159 st->index_entries[i] = st->index_entries[2 * i];
1160 st->nb_index_entries = i;
1164 int ff_add_index_entry(AVIndexEntry **index_entries,
1165 int *nb_index_entries,
1166 unsigned int *index_entries_allocated_size,
1167 int64_t pos, int64_t timestamp,
1168 int size, int distance, int flags)
1170 AVIndexEntry *entries, *ie;
1173 if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1176 entries = av_fast_realloc(*index_entries,
1177 index_entries_allocated_size,
1178 (*nb_index_entries + 1) *
1179 sizeof(AVIndexEntry));
1183 *index_entries = entries;
1185 index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1186 timestamp, AVSEEK_FLAG_ANY);
1189 index = (*nb_index_entries)++;
1190 ie = &entries[index];
1191 assert(index == 0 || ie[-1].timestamp < timestamp);
1193 ie = &entries[index];
1194 if (ie->timestamp != timestamp) {
1195 if (ie->timestamp <= timestamp)
1197 memmove(entries + index + 1, entries + index,
1198 sizeof(AVIndexEntry) * (*nb_index_entries - index));
1199 (*nb_index_entries)++;
1200 } else if (ie->pos == pos && distance < ie->min_distance)
1201 // do not reduce the distance
1202 distance = ie->min_distance;
1206 ie->timestamp = timestamp;
1207 ie->min_distance = distance;
1214 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1215 int size, int distance, int flags)
1217 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1218 &st->index_entries_allocated_size, pos,
1219 timestamp, size, distance, flags);
1222 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1223 int64_t wanted_timestamp, int flags)
1231 // Optimize appending index entries at the end.
1232 if (b && entries[b - 1].timestamp < wanted_timestamp)
1237 timestamp = entries[m].timestamp;
1238 if (timestamp >= wanted_timestamp)
1240 if (timestamp <= wanted_timestamp)
1243 m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1245 if (!(flags & AVSEEK_FLAG_ANY))
1246 while (m >= 0 && m < nb_entries &&
1247 !(entries[m].flags & AVINDEX_KEYFRAME))
1248 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1250 if (m == nb_entries)
1255 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
1257 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1258 wanted_timestamp, flags);
1261 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
1262 int64_t target_ts, int flags)
1264 AVInputFormat *avif = s->iformat;
1265 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1266 int64_t ts_min, ts_max, ts;
1271 if (stream_index < 0)
1274 av_log(s, AV_LOG_TRACE, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1277 ts_min = AV_NOPTS_VALUE;
1278 pos_limit = -1; // GCC falsely says it may be uninitialized.
1280 st = s->streams[stream_index];
1281 if (st->index_entries) {
1284 /* FIXME: Whole function must be checked for non-keyframe entries in
1285 * index case, especially read_timestamp(). */
1286 index = av_index_search_timestamp(st, target_ts,
1287 flags | AVSEEK_FLAG_BACKWARD);
1288 index = FFMAX(index, 0);
1289 e = &st->index_entries[index];
1291 if (e->timestamp <= target_ts || e->pos == e->min_distance) {
1293 ts_min = e->timestamp;
1294 av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1300 index = av_index_search_timestamp(st, target_ts,
1301 flags & ~AVSEEK_FLAG_BACKWARD);
1302 assert(index < st->nb_index_entries);
1304 e = &st->index_entries[index];
1305 assert(e->timestamp >= target_ts);
1307 ts_max = e->timestamp;
1308 pos_limit = pos_max - e->min_distance;
1309 av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
1310 " dts_max=%"PRId64"\n", pos_max, pos_limit, ts_max);
1314 pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
1315 ts_min, ts_max, flags, &ts, avif->read_timestamp);
1320 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1323 ff_update_cur_dts(s, st, ts);
1328 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1329 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1330 int64_t ts_min, int64_t ts_max,
1331 int flags, int64_t *ts_ret,
1332 int64_t (*read_timestamp)(struct AVFormatContext *, int,
1333 int64_t *, int64_t))
1336 int64_t start_pos, filesize;
1339 av_log(s, AV_LOG_TRACE, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1341 if (ts_min == AV_NOPTS_VALUE) {
1342 pos_min = s->internal->data_offset;
1343 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1344 if (ts_min == AV_NOPTS_VALUE)
1348 if (ts_max == AV_NOPTS_VALUE) {
1350 filesize = avio_size(s->pb);
1351 pos_max = filesize - 1;
1354 ts_max = read_timestamp(s, stream_index, &pos_max,
1357 } while (ts_max == AV_NOPTS_VALUE && pos_max >= step);
1358 if (ts_max == AV_NOPTS_VALUE)
1362 int64_t tmp_pos = pos_max + 1;
1363 int64_t tmp_ts = read_timestamp(s, stream_index,
1364 &tmp_pos, INT64_MAX);
1365 if (tmp_ts == AV_NOPTS_VALUE)
1369 if (tmp_pos >= filesize)
1372 pos_limit = pos_max;
1375 if (ts_min > ts_max)
1377 else if (ts_min == ts_max)
1378 pos_limit = pos_min;
1381 while (pos_min < pos_limit) {
1382 av_log(s, AV_LOG_TRACE, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64
1383 " dts_max=%"PRId64"\n", pos_min, pos_max, ts_min, ts_max);
1384 assert(pos_limit <= pos_max);
1386 if (no_change == 0) {
1387 int64_t approximate_keyframe_distance = pos_max - pos_limit;
1388 // interpolate position (better than dichotomy)
1389 pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
1391 pos_min - approximate_keyframe_distance;
1392 } else if (no_change == 1) {
1393 // bisection if interpolation did not change min / max pos last time
1394 pos = (pos_min + pos_limit) >> 1;
1396 /* linear search if bisection failed, can only happen if there
1397 * are very few or no keyframes between min/max */
1402 else if (pos > pos_limit)
1406 // May pass pos_limit instead of -1.
1407 ts = read_timestamp(s, stream_index, &pos, INT64_MAX);
1412 av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64
1413 " target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1414 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts,
1415 pos_limit, start_pos, no_change);
1416 if (ts == AV_NOPTS_VALUE) {
1417 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1420 assert(ts != AV_NOPTS_VALUE);
1421 if (target_ts <= ts) {
1422 pos_limit = start_pos - 1;
1426 if (target_ts >= ts) {
1432 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1433 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1435 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1437 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1438 av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1439 pos, ts_min, target_ts, ts_max);
1444 static int seek_frame_byte(AVFormatContext *s, int stream_index,
1445 int64_t pos, int flags)
1447 int64_t pos_min, pos_max;
1449 pos_min = s->internal->data_offset;
1450 pos_max = avio_size(s->pb) - 1;
1454 else if (pos > pos_max)
1457 avio_seek(s->pb, pos, SEEK_SET);
1462 static int seek_frame_generic(AVFormatContext *s, int stream_index,
1463 int64_t timestamp, int flags)
1470 st = s->streams[stream_index];
1472 index = av_index_search_timestamp(st, timestamp, flags);
1474 if (index < 0 && st->nb_index_entries &&
1475 timestamp < st->index_entries[0].timestamp)
1478 if (index < 0 || index == st->nb_index_entries - 1) {
1481 if (st->nb_index_entries) {
1482 assert(st->index_entries);
1483 ie = &st->index_entries[st->nb_index_entries - 1];
1484 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1486 ff_update_cur_dts(s, st, ie->timestamp);
1488 if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
1494 read_status = av_read_frame(s, &pkt);
1495 } while (read_status == AVERROR(EAGAIN));
1496 if (read_status < 0)
1498 av_packet_unref(&pkt);
1499 if (stream_index == pkt.stream_index)
1500 if ((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1503 index = av_index_search_timestamp(st, timestamp, flags);
1508 ff_read_frame_flush(s);
1509 if (s->iformat->read_seek)
1510 if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1512 ie = &st->index_entries[index];
1513 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1515 ff_update_cur_dts(s, st, ie->timestamp);
1520 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1521 int64_t timestamp, int flags)
1526 if (flags & AVSEEK_FLAG_BYTE) {
1527 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1529 ff_read_frame_flush(s);
1530 return seek_frame_byte(s, stream_index, timestamp, flags);
1533 if (stream_index < 0) {
1534 stream_index = av_find_default_stream_index(s);
1535 if (stream_index < 0)
1538 st = s->streams[stream_index];
1539 /* timestamp for default must be expressed in AV_TIME_BASE units */
1540 timestamp = av_rescale(timestamp, st->time_base.den,
1541 AV_TIME_BASE * (int64_t) st->time_base.num);
1544 /* first, we try the format specific seek */
1545 if (s->iformat->read_seek) {
1546 ff_read_frame_flush(s);
1547 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1553 if (s->iformat->read_timestamp &&
1554 !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1555 ff_read_frame_flush(s);
1556 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1557 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1558 ff_read_frame_flush(s);
1559 return seek_frame_generic(s, stream_index, timestamp, flags);
1564 int av_seek_frame(AVFormatContext *s, int stream_index,
1565 int64_t timestamp, int flags)
1567 int ret = seek_frame_internal(s, stream_index, timestamp, flags);
1570 ret = queue_attached_pictures(s);
1575 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
1576 int64_t ts, int64_t max_ts, int flags)
1578 if (min_ts > ts || max_ts < ts)
1581 if (s->iformat->read_seek2) {
1583 ff_read_frame_flush(s);
1584 ret = s->iformat->read_seek2(s, stream_index, min_ts,
1588 ret = queue_attached_pictures(s);
1592 if (s->iformat->read_timestamp) {
1593 // try to seek via read_timestamp()
1596 // Fall back on old API if new is not implemented but old is.
1597 // Note the old API has somewhat different semantics.
1598 if (s->iformat->read_seek || 1)
1599 return av_seek_frame(s, stream_index, ts,
1600 flags | ((uint64_t) ts - min_ts >
1601 (uint64_t) max_ts - ts
1602 ? AVSEEK_FLAG_BACKWARD : 0));
1604 // try some generic seek like seek_frame_generic() but with new ts semantics
1607 /*******************************************************/
1610 * Return TRUE if the stream has accurate duration in any stream.
1612 * @return TRUE if the stream has accurate duration for at least one component.
1614 static int has_duration(AVFormatContext *ic)
1619 for (i = 0; i < ic->nb_streams; i++) {
1620 st = ic->streams[i];
1621 if (st->duration != AV_NOPTS_VALUE)
1624 if (ic->duration != AV_NOPTS_VALUE)
1630 * Estimate the stream timings from the one of each components.
1632 * Also computes the global bitrate if possible.
1634 static void update_stream_timings(AVFormatContext *ic)
1636 int64_t start_time, start_time1, end_time, end_time1;
1637 int64_t duration, duration1, filesize;
1641 start_time = INT64_MAX;
1642 end_time = INT64_MIN;
1643 duration = INT64_MIN;
1644 for (i = 0; i < ic->nb_streams; i++) {
1645 st = ic->streams[i];
1646 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1647 start_time1 = av_rescale_q(st->start_time, st->time_base,
1649 start_time = FFMIN(start_time, start_time1);
1650 if (st->duration != AV_NOPTS_VALUE) {
1651 end_time1 = start_time1 +
1652 av_rescale_q(st->duration, st->time_base,
1654 end_time = FFMAX(end_time, end_time1);
1657 if (st->duration != AV_NOPTS_VALUE) {
1658 duration1 = av_rescale_q(st->duration, st->time_base,
1660 duration = FFMAX(duration, duration1);
1663 if (start_time != INT64_MAX) {
1664 ic->start_time = start_time;
1665 if (end_time != INT64_MIN)
1666 duration = FFMAX(duration, end_time - start_time);
1668 if (duration != INT64_MIN) {
1669 ic->duration = duration;
1670 if (ic->pb && (filesize = avio_size(ic->pb)) > 0)
1671 /* compute the bitrate */
1672 ic->bit_rate = (double) filesize * 8.0 * AV_TIME_BASE /
1673 (double) ic->duration;
1677 static void fill_all_stream_timings(AVFormatContext *ic)
1682 update_stream_timings(ic);
1683 for (i = 0; i < ic->nb_streams; i++) {
1684 st = ic->streams[i];
1685 if (st->start_time == AV_NOPTS_VALUE) {
1686 if (ic->start_time != AV_NOPTS_VALUE)
1687 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q,
1689 if (ic->duration != AV_NOPTS_VALUE)
1690 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q,
1696 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
1698 int64_t filesize, duration;
1702 /* if bit_rate is already set, we believe it */
1703 if (ic->bit_rate <= 0) {
1705 for (i = 0; i < ic->nb_streams; i++) {
1706 st = ic->streams[i];
1707 if (st->codecpar->bit_rate > 0) {
1708 if (INT_MAX - st->codecpar->bit_rate < bit_rate) {
1712 bit_rate += st->codecpar->bit_rate;
1715 ic->bit_rate = bit_rate;
1718 /* if duration is already set, we believe it */
1719 if (ic->duration == AV_NOPTS_VALUE &&
1720 ic->bit_rate != 0) {
1721 filesize = ic->pb ? avio_size(ic->pb) : 0;
1723 for (i = 0; i < ic->nb_streams; i++) {
1724 st = ic->streams[i];
1725 duration = av_rescale(8 * filesize, st->time_base.den,
1727 (int64_t) st->time_base.num);
1728 if (st->duration == AV_NOPTS_VALUE)
1729 st->duration = duration;
1735 #define DURATION_MAX_READ_SIZE 250000
1736 #define DURATION_MAX_RETRY 3
1738 /* only usable for MPEG-PS streams */
1739 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1741 AVPacket pkt1, *pkt = &pkt1;
1743 int read_size, i, ret;
1745 int64_t filesize, offset, duration;
1748 /* flush packet queue */
1749 flush_packet_queue(ic);
1751 for (i = 0; i < ic->nb_streams; i++) {
1752 st = ic->streams[i];
1753 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
1754 av_log(ic, AV_LOG_WARNING,
1755 "start time is not set in estimate_timings_from_pts\n");
1758 av_parser_close(st->parser);
1763 /* estimate the end time (duration) */
1764 /* XXX: may need to support wrapping */
1765 filesize = ic->pb ? avio_size(ic->pb) : 0;
1766 end_time = AV_NOPTS_VALUE;
1768 offset = filesize - (DURATION_MAX_READ_SIZE << retry);
1772 avio_seek(ic->pb, offset, SEEK_SET);
1775 if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
1779 ret = ff_read_packet(ic, pkt);
1780 } while (ret == AVERROR(EAGAIN));
1783 read_size += pkt->size;
1784 st = ic->streams[pkt->stream_index];
1785 if (pkt->pts != AV_NOPTS_VALUE &&
1786 (st->start_time != AV_NOPTS_VALUE ||
1787 st->first_dts != AV_NOPTS_VALUE)) {
1788 duration = end_time = pkt->pts;
1789 if (st->start_time != AV_NOPTS_VALUE)
1790 duration -= st->start_time;
1792 duration -= st->first_dts;
1794 duration += 1LL << st->pts_wrap_bits;
1796 if (st->duration == AV_NOPTS_VALUE || st->duration < duration)
1797 st->duration = duration;
1800 av_packet_unref(pkt);
1802 } while (end_time == AV_NOPTS_VALUE &&
1803 filesize > (DURATION_MAX_READ_SIZE << retry) &&
1804 ++retry <= DURATION_MAX_RETRY);
1806 fill_all_stream_timings(ic);
1808 avio_seek(ic->pb, old_offset, SEEK_SET);
1809 for (i = 0; i < ic->nb_streams; i++) {
1810 st = ic->streams[i];
1811 st->cur_dts = st->first_dts;
1812 st->last_IP_pts = AV_NOPTS_VALUE;
1816 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
1820 /* get the file size, if possible */
1821 if (ic->iformat->flags & AVFMT_NOFILE) {
1824 file_size = avio_size(ic->pb);
1825 file_size = FFMAX(0, file_size);
1828 if ((!strcmp(ic->iformat->name, "mpeg") ||
1829 !strcmp(ic->iformat->name, "mpegts")) &&
1830 file_size && ic->pb->seekable) {
1831 /* get accurate estimate from the PTSes */
1832 estimate_timings_from_pts(ic, old_offset);
1833 } else if (has_duration(ic)) {
1834 /* at least one component has timings - we use them for all
1836 fill_all_stream_timings(ic);
1838 av_log(ic, AV_LOG_WARNING,
1839 "Estimating duration from bitrate, this may be inaccurate\n");
1840 /* less precise: use bitrate info */
1841 estimate_timings_from_bit_rate(ic);
1843 update_stream_timings(ic);
1847 AVStream av_unused *st;
1848 for (i = 0; i < ic->nb_streams; i++) {
1849 st = ic->streams[i];
1850 av_log(ic, AV_LOG_TRACE, "%d: start_time: %0.3f duration: %0.3f\n", i,
1851 (double) st->start_time / AV_TIME_BASE,
1852 (double) st->duration / AV_TIME_BASE);
1854 av_log(ic, AV_LOG_TRACE,
1855 "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1856 (double) ic->start_time / AV_TIME_BASE,
1857 (double) ic->duration / AV_TIME_BASE,
1858 ic->bit_rate / 1000);
1862 static int has_codec_parameters(AVStream *st)
1864 AVCodecContext *avctx = st->internal->avctx;
1867 switch (avctx->codec_type) {
1868 case AVMEDIA_TYPE_AUDIO:
1869 val = avctx->sample_rate && avctx->channels;
1870 if (st->info->found_decoder >= 0 &&
1871 avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
1874 case AVMEDIA_TYPE_VIDEO:
1876 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
1883 return avctx->codec_id != AV_CODEC_ID_NONE && val != 0;
1886 static int has_decode_delay_been_guessed(AVStream *st)
1888 return st->internal->avctx->codec_id != AV_CODEC_ID_H264 ||
1889 st->info->nb_decoded_frames >= 6;
1892 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
1893 static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt,
1894 AVDictionary **options)
1896 AVCodecContext *avctx = st->internal->avctx;
1897 const AVCodec *codec;
1898 int got_picture = 1, ret = 0;
1899 AVFrame *frame = av_frame_alloc();
1900 AVPacket pkt = *avpkt;
1903 return AVERROR(ENOMEM);
1905 if (!avcodec_is_open(avctx) && !st->info->found_decoder) {
1906 AVDictionary *thread_opt = NULL;
1908 #if FF_API_LAVF_AVCTX
1909 FF_DISABLE_DEPRECATION_WARNINGS
1910 codec = st->codec->codec ? st->codec->codec
1911 : avcodec_find_decoder(st->codecpar->codec_id);
1912 FF_ENABLE_DEPRECATION_WARNINGS
1914 codec = avcodec_find_decoder(st->codecpar->codec_id);
1918 st->info->found_decoder = -1;
1923 /* Force thread count to 1 since the H.264 decoder will not extract
1924 * SPS and PPS to extradata during multi-threaded decoding. */
1925 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
1926 ret = avcodec_open2(avctx, codec, options ? options : &thread_opt);
1928 av_dict_free(&thread_opt);
1930 st->info->found_decoder = -1;
1933 st->info->found_decoder = 1;
1934 } else if (!st->info->found_decoder)
1935 st->info->found_decoder = 1;
1937 if (st->info->found_decoder < 0) {
1942 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
1944 (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
1945 (!st->codec_info_nb_frames &&
1946 (avctx->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
1948 switch (avctx->codec_type) {
1949 case AVMEDIA_TYPE_VIDEO:
1950 ret = avcodec_decode_video2(avctx, frame,
1951 &got_picture, &pkt);
1953 case AVMEDIA_TYPE_AUDIO:
1954 ret = avcodec_decode_audio4(avctx, frame, &got_picture, &pkt);
1961 st->info->nb_decoded_frames++;
1969 av_frame_free(&frame);
1973 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
1975 while (tags->id != AV_CODEC_ID_NONE) {
1983 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
1986 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1987 if (tag == tags[i].tag)
1989 for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
1990 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
1992 return AV_CODEC_ID_NONE;
1995 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2000 return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2002 return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2004 return AV_CODEC_ID_NONE;
2008 if (sflags & (1 << (bps - 1))) {
2011 return AV_CODEC_ID_PCM_S8;
2013 return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2015 return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2017 return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2019 return AV_CODEC_ID_NONE;
2024 return AV_CODEC_ID_PCM_U8;
2026 return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2028 return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2030 return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2032 return AV_CODEC_ID_NONE;
2038 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
2041 for (i = 0; tags && tags[i]; i++) {
2042 int tag = ff_codec_get_tag(tags[i], id);
2049 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
2052 for (i = 0; tags && tags[i]; i++) {
2053 enum AVCodecID id = ff_codec_get_id(tags[i], tag);
2054 if (id != AV_CODEC_ID_NONE)
2057 return AV_CODEC_ID_NONE;
2060 static void compute_chapters_end(AVFormatContext *s)
2063 int64_t max_time = s->duration +
2064 ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2066 for (i = 0; i < s->nb_chapters; i++)
2067 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2068 AVChapter *ch = s->chapters[i];
2069 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
2073 for (j = 0; j < s->nb_chapters; j++) {
2074 AVChapter *ch1 = s->chapters[j];
2075 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
2077 if (j != i && next_start > ch->start && next_start < end)
2080 ch->end = (end == INT64_MAX) ? ch->start : end;
2084 static int get_std_framerate(int i)
2087 return (i + 1) * 1001;
2089 return ((const int[]) { 24, 30, 60, 12, 15 })[i - 60 * 12] * 1000 * 12;
2092 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2094 int i, count, ret, read_size, j;
2096 AVCodecContext *avctx;
2097 AVPacket pkt1, *pkt;
2098 int64_t old_offset = avio_tell(ic->pb);
2099 // new streams might appear, no options for those
2100 int orig_nb_streams = ic->nb_streams;
2102 for (i = 0; i < ic->nb_streams; i++) {
2103 const AVCodec *codec;
2104 AVDictionary *thread_opt = NULL;
2105 st = ic->streams[i];
2106 avctx = st->internal->avctx;
2108 // only for the split stuff
2109 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2110 st->parser = av_parser_init(st->codecpar->codec_id);
2111 if (st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser)
2112 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2115 /* check if the caller has overridden the codec id */
2116 #if FF_API_LAVF_AVCTX
2117 FF_DISABLE_DEPRECATION_WARNINGS
2118 if (st->codec->codec_id != st->internal->orig_codec_id) {
2119 st->codecpar->codec_id = st->codec->codec_id;
2120 st->codecpar->codec_type = st->codec->codec_type;
2121 st->internal->orig_codec_id = st->codec->codec_id;
2123 FF_ENABLE_DEPRECATION_WARNINGS
2125 if (st->codecpar->codec_id != st->internal->orig_codec_id)
2126 st->internal->orig_codec_id = st->codecpar->codec_id;
2128 ret = avcodec_parameters_to_context(avctx, st->codecpar);
2130 goto find_stream_info_err;
2131 if (st->codecpar->codec_id != AV_CODEC_ID_PROBE &&
2132 st->codecpar->codec_id != AV_CODEC_ID_NONE)
2133 st->internal->avctx_inited = 1;
2135 #if FF_API_LAVF_AVCTX
2136 FF_DISABLE_DEPRECATION_WARNINGS
2137 codec = st->codec->codec ? st->codec->codec
2138 : avcodec_find_decoder(st->codecpar->codec_id);
2139 FF_ENABLE_DEPRECATION_WARNINGS
2141 codec = avcodec_find_decoder(st->codecpar->codec_id);
2144 /* Force thread count to 1 since the H.264 decoder will not extract
2145 * SPS and PPS to extradata during multi-threaded decoding. */
2146 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2148 /* Ensure that subtitle_header is properly set. */
2149 if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE
2150 && codec && !avctx->codec)
2151 avcodec_open2(avctx, codec,
2152 options ? &options[i] : &thread_opt);
2154 // Try to just open decoders, in case this is enough to get parameters.
2155 if (!has_codec_parameters(st)) {
2156 if (codec && !avctx->codec)
2157 avcodec_open2(avctx, codec,
2158 options ? &options[i] : &thread_opt);
2161 av_dict_free(&thread_opt);
2164 for (i = 0; i < ic->nb_streams; i++) {
2165 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2166 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2172 if (ff_check_interrupt(&ic->interrupt_callback)) {
2174 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2178 /* check if one codec still needs to be handled */
2179 for (i = 0; i < ic->nb_streams; i++) {
2180 int fps_analyze_framecount = 20;
2182 st = ic->streams[i];
2183 if (!has_codec_parameters(st))
2185 /* If the timebase is coarse (like the usual millisecond precision
2186 * of mkv), we need to analyze more frames to reliably arrive at
2187 * the correct fps. */
2188 if (av_q2d(st->time_base) > 0.0005)
2189 fps_analyze_framecount *= 2;
2190 if (ic->fps_probe_size >= 0)
2191 fps_analyze_framecount = ic->fps_probe_size;
2192 /* variable fps and no guess at the real fps */
2193 if (!st->avg_frame_rate.num &&
2194 st->codec_info_nb_frames < fps_analyze_framecount &&
2195 st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
2197 if (st->parser && st->parser->parser->split &&
2198 !st->codecpar->extradata)
2200 if (st->first_dts == AV_NOPTS_VALUE &&
2201 st->codec_info_nb_frames < ic->max_ts_probe &&
2202 (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
2203 st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO))
2206 if (i == ic->nb_streams) {
2207 /* NOTE: If the format has no header, then we need to read some
2208 * packets to get most of the streams, so we cannot stop here. */
2209 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2210 /* If we found the info for all the codecs, we can stop. */
2212 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2216 /* We did not get all the codec info, but we read too much data. */
2217 if (read_size >= ic->probesize) {
2219 av_log(ic, AV_LOG_DEBUG,
2220 "Probe buffer size limit %d reached\n", ic->probesize);
2224 /* NOTE: A new stream can be added there if no header in file
2225 * (AVFMTCTX_NOHEADER). */
2226 ret = read_frame_internal(ic, &pkt1);
2227 if (ret == AVERROR(EAGAIN))
2232 AVPacket empty_pkt = { 0 };
2234 av_init_packet(&empty_pkt);
2236 /* We could not have all the codec parameters before EOF. */
2238 for (i = 0; i < ic->nb_streams; i++) {
2239 st = ic->streams[i];
2241 /* flush the decoders */
2242 if (st->info->found_decoder == 1) {
2244 err = try_decode_frame(ic, st, &empty_pkt,
2245 (options && i < orig_nb_streams)
2246 ? &options[i] : NULL);
2247 } while (err > 0 && !has_codec_parameters(st));
2251 av_log(ic, AV_LOG_WARNING,
2252 "decoding for stream %d failed\n", st->index);
2253 } else if (!has_codec_parameters(st)) {
2255 avcodec_string(buf, sizeof(buf), st->internal->avctx, 0);
2256 av_log(ic, AV_LOG_WARNING,
2257 "Could not find codec parameters (%s)\n", buf);
2267 if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) {
2268 ret = add_to_pktbuf(&ic->internal->packet_buffer, pkt,
2269 &ic->internal->packet_buffer_end, 0);
2271 goto find_stream_info_err;
2274 read_size += pkt->size;
2276 st = ic->streams[pkt->stream_index];
2277 avctx = st->internal->avctx;
2278 if (!st->internal->avctx_inited) {
2279 ret = avcodec_parameters_to_context(avctx, st->codecpar);
2281 goto find_stream_info_err;
2282 st->internal->avctx_inited = 1;
2285 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2286 /* check for non-increasing dts */
2287 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2288 st->info->fps_last_dts >= pkt->dts) {
2289 av_log(ic, AV_LOG_WARNING,
2290 "Non-increasing DTS in stream %d: packet %d with DTS "
2291 "%"PRId64", packet %d with DTS %"PRId64"\n",
2292 st->index, st->info->fps_last_dts_idx,
2293 st->info->fps_last_dts, st->codec_info_nb_frames,
2295 st->info->fps_first_dts =
2296 st->info->fps_last_dts = AV_NOPTS_VALUE;
2298 /* Check for a discontinuity in dts. If the difference in dts
2299 * is more than 1000 times the average packet duration in the
2300 * sequence, we treat it as a discontinuity. */
2301 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2302 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2303 (pkt->dts - st->info->fps_last_dts) / 1000 >
2304 (st->info->fps_last_dts - st->info->fps_first_dts) /
2305 (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2306 av_log(ic, AV_LOG_WARNING,
2307 "DTS discontinuity in stream %d: packet %d with DTS "
2308 "%"PRId64", packet %d with DTS %"PRId64"\n",
2309 st->index, st->info->fps_last_dts_idx,
2310 st->info->fps_last_dts, st->codec_info_nb_frames,
2312 st->info->fps_first_dts =
2313 st->info->fps_last_dts = AV_NOPTS_VALUE;
2316 /* update stored dts values */
2317 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2318 st->info->fps_first_dts = pkt->dts;
2319 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2321 st->info->fps_last_dts = pkt->dts;
2322 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2324 /* check max_analyze_duration */
2325 if (av_rescale_q(pkt->dts - st->info->fps_first_dts, st->time_base,
2326 AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
2327 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached\n",
2328 ic->max_analyze_duration);
2329 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2330 av_packet_unref(pkt);
2334 if (st->parser && st->parser->parser->split && !avctx->extradata) {
2335 int i = st->parser->parser->split(avctx, pkt->data, pkt->size);
2336 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2337 avctx->extradata_size = i;
2338 avctx->extradata = av_mallocz(avctx->extradata_size +
2339 AV_INPUT_BUFFER_PADDING_SIZE);
2340 if (!avctx->extradata)
2341 return AVERROR(ENOMEM);
2342 memcpy(avctx->extradata, pkt->data,
2343 avctx->extradata_size);
2347 /* If still no information, we try to open the codec and to
2348 * decompress the frame. We try to avoid that in most cases as
2349 * it takes longer and uses more memory. For MPEG-4, we need to
2350 * decompress for QuickTime.
2352 * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2353 * least one frame of codec data, this makes sure the codec initializes
2354 * the channel configuration and does not only trust the values from
2356 try_decode_frame(ic, st, pkt,
2357 (options && i < orig_nb_streams) ? &options[i] : NULL);
2359 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2360 av_packet_unref(pkt);
2362 st->codec_info_nb_frames++;
2366 // close codecs which were opened in try_decode_frame()
2367 for (i = 0; i < ic->nb_streams; i++) {
2368 st = ic->streams[i];
2369 avcodec_close(st->internal->avctx);
2371 for (i = 0; i < ic->nb_streams; i++) {
2372 st = ic->streams[i];
2373 avctx = st->internal->avctx;
2374 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2375 /* estimate average framerate if not set by demuxer */
2376 if (!st->avg_frame_rate.num &&
2377 st->info->fps_last_dts != st->info->fps_first_dts) {
2378 int64_t delta_dts = st->info->fps_last_dts -
2379 st->info->fps_first_dts;
2380 int delta_packets = st->info->fps_last_dts_idx -
2381 st->info->fps_first_dts_idx;
2383 double best_error = 0.01;
2385 if (delta_dts >= INT64_MAX / st->time_base.num ||
2386 delta_packets >= INT64_MAX / st->time_base.den ||
2389 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2390 delta_packets * (int64_t) st->time_base.den,
2391 delta_dts * (int64_t) st->time_base.num, 60000);
2393 /* Round guessed framerate to a "standard" framerate if it's
2394 * within 1% of the original estimate. */
2395 for (j = 0; j < MAX_STD_TIMEBASES; j++) {
2396 AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
2397 double error = fabs(av_q2d(st->avg_frame_rate) /
2398 av_q2d(std_fps) - 1);
2400 if (error < best_error) {
2402 best_fps = std_fps.num;
2406 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2407 best_fps, 12 * 1001, INT_MAX);
2409 } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2410 if (!avctx->bits_per_coded_sample)
2411 avctx->bits_per_coded_sample =
2412 av_get_bits_per_sample(avctx->codec_id);
2413 // set stream disposition based on audio service type
2414 switch (avctx->audio_service_type) {
2415 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
2416 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS;
2418 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
2419 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED;
2421 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
2422 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED;
2424 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
2425 st->disposition = AV_DISPOSITION_COMMENT;
2427 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
2428 st->disposition = AV_DISPOSITION_KARAOKE;
2434 estimate_timings(ic, old_offset);
2436 compute_chapters_end(ic);
2438 /* update the stream parameters from the internal codec contexts */
2439 for (i = 0; i < ic->nb_streams; i++) {
2440 st = ic->streams[i];
2441 if (!st->internal->avctx_inited)
2444 ret = avcodec_parameters_from_context(st->codecpar, st->internal->avctx);
2446 goto find_stream_info_err;
2448 #if FF_API_LAVF_AVCTX
2449 FF_DISABLE_DEPRECATION_WARNINGS
2450 ret = avcodec_parameters_to_context(st->codec, st->codecpar);
2452 goto find_stream_info_err;
2454 if (st->internal->avctx->subtitle_header) {
2455 st->codec->subtitle_header = av_malloc(st->internal->avctx->subtitle_header_size);
2456 if (!st->codec->subtitle_header)
2457 goto find_stream_info_err;
2458 st->codec->subtitle_header_size = st->internal->avctx->subtitle_header_size;
2459 memcpy(st->codec->subtitle_header, st->internal->avctx->subtitle_header,
2460 st->codec->subtitle_header_size);
2462 FF_ENABLE_DEPRECATION_WARNINGS
2465 st->internal->avctx_inited = 0;
2468 find_stream_info_err:
2469 for (i = 0; i < ic->nb_streams; i++) {
2470 av_freep(&ic->streams[i]->info);
2475 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s)
2479 for (i = 0; i < ic->nb_programs; i++)
2480 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
2481 if (ic->programs[i]->stream_index[j] == s)
2482 return ic->programs[i];
2486 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
2487 int wanted_stream_nb, int related_stream,
2488 AVCodec **decoder_ret, int flags)
2490 int i, nb_streams = ic->nb_streams;
2491 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1;
2492 unsigned *program = NULL;
2493 AVCodec *decoder = NULL, *best_decoder = NULL;
2495 if (related_stream >= 0 && wanted_stream_nb < 0) {
2496 AVProgram *p = find_program_from_stream(ic, related_stream);
2498 program = p->stream_index;
2499 nb_streams = p->nb_stream_indexes;
2502 for (i = 0; i < nb_streams; i++) {
2503 int real_stream_index = program ? program[i] : i;
2504 AVStream *st = ic->streams[real_stream_index];
2505 AVCodecParameters *par = st->codecpar;
2506 if (par->codec_type != type)
2508 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
2510 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED |
2511 AV_DISPOSITION_VISUAL_IMPAIRED))
2514 decoder = avcodec_find_decoder(par->codec_id);
2517 ret = AVERROR_DECODER_NOT_FOUND;
2521 if (best_count >= st->codec_info_nb_frames)
2523 best_count = st->codec_info_nb_frames;
2524 ret = real_stream_index;
2525 best_decoder = decoder;
2526 if (program && i == nb_streams - 1 && ret < 0) {
2528 nb_streams = ic->nb_streams;
2529 /* no related stream found, try again with everything */
2534 *decoder_ret = best_decoder;
2538 /*******************************************************/
2540 int av_read_play(AVFormatContext *s)
2542 if (s->iformat->read_play)
2543 return s->iformat->read_play(s);
2545 return avio_pause(s->pb, 0);
2546 return AVERROR(ENOSYS);
2549 int av_read_pause(AVFormatContext *s)
2551 if (s->iformat->read_pause)
2552 return s->iformat->read_pause(s);
2554 return avio_pause(s->pb, 1);
2555 return AVERROR(ENOSYS);
2558 static void free_stream(AVStream **pst)
2560 AVStream *st = *pst;
2566 for (i = 0; i < st->nb_side_data; i++)
2567 av_freep(&st->side_data[i].data);
2568 av_freep(&st->side_data);
2571 av_parser_close(st->parser);
2573 if (st->attached_pic.data)
2574 av_packet_unref(&st->attached_pic);
2577 avcodec_free_context(&st->internal->avctx);
2579 av_freep(&st->internal);
2581 av_dict_free(&st->metadata);
2582 avcodec_parameters_free(&st->codecpar);
2583 av_freep(&st->probe_data.buf);
2584 av_free(st->index_entries);
2585 #if FF_API_LAVF_AVCTX
2586 FF_DISABLE_DEPRECATION_WARNINGS
2587 av_free(st->codec->extradata);
2588 av_free(st->codec->subtitle_header);
2590 FF_ENABLE_DEPRECATION_WARNINGS
2592 av_free(st->priv_data);
2598 void avformat_free_context(AVFormatContext *s)
2606 if (s->iformat && s->iformat->priv_class && s->priv_data)
2607 av_opt_free(s->priv_data);
2609 for (i = 0; i < s->nb_streams; i++)
2610 free_stream(&s->streams[i]);
2612 for (i = s->nb_programs - 1; i >= 0; i--) {
2613 av_dict_free(&s->programs[i]->metadata);
2614 av_freep(&s->programs[i]->stream_index);
2615 av_freep(&s->programs[i]);
2617 av_freep(&s->programs);
2618 av_freep(&s->priv_data);
2619 while (s->nb_chapters--) {
2620 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
2621 av_free(s->chapters[s->nb_chapters]);
2623 av_freep(&s->chapters);
2624 av_dict_free(&s->metadata);
2625 av_freep(&s->streams);
2626 av_freep(&s->internal);
2630 void avformat_close_input(AVFormatContext **ps)
2632 AVFormatContext *s = *ps;
2633 AVIOContext *pb = s->pb;
2635 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
2636 (s->flags & AVFMT_FLAG_CUSTOM_IO))
2639 flush_packet_queue(s);
2642 if (s->iformat->read_close)
2643 s->iformat->read_close(s);
2645 avformat_free_context(s);
2652 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
2657 if (av_reallocp_array(&s->streams, s->nb_streams + 1,
2658 sizeof(*s->streams)) < 0) {
2663 st = av_mallocz(sizeof(AVStream));
2666 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
2671 #if FF_API_LAVF_AVCTX
2672 FF_DISABLE_DEPRECATION_WARNINGS
2673 st->codec = avcodec_alloc_context3(c);
2679 FF_ENABLE_DEPRECATION_WARNINGS
2682 st->internal = av_mallocz(sizeof(*st->internal));
2687 #if FF_API_LAVF_AVCTX
2688 FF_DISABLE_DEPRECATION_WARNINGS
2689 /* no default bitrate if decoding */
2690 st->codec->bit_rate = 0;
2691 FF_ENABLE_DEPRECATION_WARNINGS
2694 /* default pts setting is MPEG-like */
2695 avpriv_set_pts_info(st, 33, 1, 90000);
2696 /* we set the current DTS to 0 so that formats without any timestamps
2697 * but durations get some timestamps, formats with some unknown
2698 * timestamps have their first few packets buffered and the
2699 * timestamps corrected before they are returned to the user */
2702 st->cur_dts = AV_NOPTS_VALUE;
2705 st->codecpar = avcodec_parameters_alloc();
2709 st->internal->avctx = avcodec_alloc_context3(NULL);
2710 if (!st->internal->avctx)
2713 st->index = s->nb_streams;
2714 st->start_time = AV_NOPTS_VALUE;
2715 st->duration = AV_NOPTS_VALUE;
2716 st->first_dts = AV_NOPTS_VALUE;
2717 st->probe_packets = MAX_PROBE_PACKETS;
2719 st->last_IP_pts = AV_NOPTS_VALUE;
2720 for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
2721 st->pts_buffer[i] = AV_NOPTS_VALUE;
2723 st->sample_aspect_ratio = (AVRational) { 0, 1 };
2725 st->info->fps_first_dts = AV_NOPTS_VALUE;
2726 st->info->fps_last_dts = AV_NOPTS_VALUE;
2728 #if FF_API_LAVF_AVCTX
2729 st->internal->need_codec_update = 1;
2732 s->streams[s->nb_streams++] = st;
2739 AVProgram *av_new_program(AVFormatContext *ac, int id)
2741 AVProgram *program = NULL;
2744 av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
2746 for (i = 0; i < ac->nb_programs; i++)
2747 if (ac->programs[i]->id == id)
2748 program = ac->programs[i];
2751 program = av_mallocz(sizeof(AVProgram));
2754 dynarray_add(&ac->programs, &ac->nb_programs, program);
2755 program->discard = AVDISCARD_NONE;
2762 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
2763 int64_t start, int64_t end, const char *title)
2765 AVChapter *chapter = NULL;
2768 for (i = 0; i < s->nb_chapters; i++)
2769 if (s->chapters[i]->id == id)
2770 chapter = s->chapters[i];
2773 chapter = av_mallocz(sizeof(AVChapter));
2776 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2778 av_dict_set(&chapter->metadata, "title", title, 0);
2780 chapter->time_base = time_base;
2781 chapter->start = start;
2787 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
2790 AVProgram *program = NULL;
2792 if (idx >= ac->nb_streams) {
2793 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
2797 for (i = 0; i < ac->nb_programs; i++) {
2798 if (ac->programs[i]->id != progid)
2800 program = ac->programs[i];
2801 for (j = 0; j < program->nb_stream_indexes; j++)
2802 if (program->stream_index[j] == idx)
2805 if (av_reallocp_array(&program->stream_index,
2806 program->nb_stream_indexes + 1,
2807 sizeof(*program->stream_index)) < 0) {
2808 program->nb_stream_indexes = 0;
2811 program->stream_index[program->nb_stream_indexes++] = idx;
2816 uint64_t ff_ntp_time(void)
2818 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
2821 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
2824 char *q, buf1[20], c;
2825 int nd, len, percentd_found;
2837 while (av_isdigit(*p))
2838 nd = nd * 10 + *p++ - '0';
2840 } while (av_isdigit(c));
2849 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2851 if ((q - buf + len) > buf_size - 1)
2853 memcpy(q, buf1, len);
2861 if ((q - buf) < buf_size - 1)
2865 if (!percentd_found)
2874 void av_url_split(char *proto, int proto_size,
2875 char *authorization, int authorization_size,
2876 char *hostname, int hostname_size,
2877 int *port_ptr, char *path, int path_size, const char *url)
2879 const char *p, *ls, *at, *col, *brk;
2885 if (authorization_size > 0)
2886 authorization[0] = 0;
2887 if (hostname_size > 0)
2892 /* parse protocol */
2893 if ((p = strchr(url, ':'))) {
2894 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
2901 /* no protocol means plain filename */
2902 av_strlcpy(path, url, path_size);
2906 /* separate path from hostname */
2907 ls = strchr(p, '/');
2909 ls = strchr(p, '?');
2911 av_strlcpy(path, ls, path_size);
2913 ls = &p[strlen(p)]; // XXX
2915 /* the rest is hostname, use that to parse auth/port */
2917 /* authorization (user[:pass]@hostname) */
2918 if ((at = strchr(p, '@')) && at < ls) {
2919 av_strlcpy(authorization, p,
2920 FFMIN(authorization_size, at + 1 - p));
2921 p = at + 1; /* skip '@' */
2924 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
2926 av_strlcpy(hostname, p + 1,
2927 FFMIN(hostname_size, brk - p));
2928 if (brk[1] == ':' && port_ptr)
2929 *port_ptr = atoi(brk + 2);
2930 } else if ((col = strchr(p, ':')) && col < ls) {
2931 av_strlcpy(hostname, p,
2932 FFMIN(col + 1 - p, hostname_size));
2934 *port_ptr = atoi(col + 1);
2936 av_strlcpy(hostname, p,
2937 FFMIN(ls + 1 - p, hostname_size));
2941 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
2944 static const char hex_table_uc[16] = { '0', '1', '2', '3',
2947 'C', 'D', 'E', 'F' };
2948 static const char hex_table_lc[16] = { '0', '1', '2', '3',
2951 'c', 'd', 'e', 'f' };
2952 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
2954 for (i = 0; i < s; i++) {
2955 buff[i * 2] = hex_table[src[i] >> 4];
2956 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
2962 int ff_hex_to_data(uint8_t *data, const char *p)
2969 p += strspn(p, SPACE_CHARS);
2972 c = av_toupper((unsigned char) *p++);
2973 if (c >= '0' && c <= '9')
2975 else if (c >= 'A' && c <= 'F')
2990 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
2991 unsigned int pts_num, unsigned int pts_den)
2994 if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
2995 if (new_tb.num != pts_num)
2996 av_log(NULL, AV_LOG_DEBUG,
2997 "st:%d removing common factor %d from timebase\n",
2998 s->index, pts_num / new_tb.num);
3000 av_log(NULL, AV_LOG_WARNING,
3001 "st:%d has too large timebase, reducing\n", s->index);
3003 if (new_tb.num <= 0 || new_tb.den <= 0) {
3004 av_log(NULL, AV_LOG_ERROR,
3005 "Ignoring attempt to set invalid timebase for st:%d\n",
3009 s->time_base = new_tb;
3010 s->pts_wrap_bits = pts_wrap_bits;
3013 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3016 const char *ptr = str;
3018 /* Parse key=value pairs. */
3021 char *dest = NULL, *dest_end;
3022 int key_len, dest_len = 0;
3024 /* Skip whitespace and potential commas. */
3025 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3032 if (!(ptr = strchr(key, '=')))
3035 key_len = ptr - key;
3037 callback_get_buf(context, key, key_len, &dest, &dest_len);
3038 dest_end = dest + dest_len - 1;
3042 while (*ptr && *ptr != '\"') {
3046 if (dest && dest < dest_end)
3050 if (dest && dest < dest_end)
3058 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3059 if (dest && dest < dest_end)
3067 int ff_find_stream_index(AVFormatContext *s, int id)
3070 for (i = 0; i < s->nb_streams; i++)
3071 if (s->streams[i]->id == id)
3076 int64_t ff_iso8601_to_unix_time(const char *datestr)
3078 struct tm time1 = { 0 }, time2 = { 0 };
3079 const char *ret1, *ret2;
3080 ret1 = av_small_strptime(datestr, "%Y - %m - %d %T", &time1);
3081 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%T", &time2);
3083 return av_timegm(&time2);
3085 return av_timegm(&time1);
3088 int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
3092 if (ofmt->query_codec)
3093 return ofmt->query_codec(codec_id, std_compliance);
3094 else if (ofmt->codec_tag)
3095 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
3096 else if (codec_id == ofmt->video_codec ||
3097 codec_id == ofmt->audio_codec ||
3098 codec_id == ofmt->subtitle_codec)
3101 return AVERROR_PATCHWELCOME;
3104 int avformat_network_init(void)
3108 ff_network_inited_globally = 1;
3109 if ((ret = ff_network_init()) < 0)
3116 int avformat_network_deinit(void)
3125 int ff_add_param_change(AVPacket *pkt, int32_t channels,
3126 uint64_t channel_layout, int32_t sample_rate,
3127 int32_t width, int32_t height)
3133 return AVERROR(EINVAL);
3136 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
3138 if (channel_layout) {
3140 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
3144 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
3146 if (width || height) {
3148 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
3150 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
3152 return AVERROR(ENOMEM);
3153 bytestream_put_le32(&data, flags);
3155 bytestream_put_le32(&data, channels);
3157 bytestream_put_le64(&data, channel_layout);
3159 bytestream_put_le32(&data, sample_rate);
3160 if (width || height) {
3161 bytestream_put_le32(&data, width);
3162 bytestream_put_le32(&data, height);
3167 int ff_generate_avci_extradata(AVStream *st)
3169 static const uint8_t avci100_1080p_extradata[] = {
3171 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3172 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3173 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3174 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
3175 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
3176 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
3177 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
3178 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
3179 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
3181 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3184 static const uint8_t avci100_1080i_extradata[] = {
3186 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3187 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
3188 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
3189 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
3190 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
3191 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
3192 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
3193 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
3194 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
3195 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
3196 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
3198 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
3201 static const uint8_t avci50_1080i_extradata[] = {
3203 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
3204 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
3205 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
3206 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
3207 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
3208 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
3209 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
3210 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
3211 0x81, 0x13, 0xf7, 0xff, 0x80, 0x01, 0x80, 0x02,
3212 0x71, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
3213 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
3215 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
3218 static const uint8_t avci100_720p_extradata[] = {
3220 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
3221 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
3222 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
3223 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
3224 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
3225 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
3226 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
3227 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
3228 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
3229 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
3231 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
3235 const uint8_t *data = NULL;
3238 if (st->codecpar->width == 1920) {
3239 if (st->codecpar->field_order == AV_FIELD_PROGRESSIVE) {
3240 data = avci100_1080p_extradata;
3241 size = sizeof(avci100_1080p_extradata);
3243 data = avci100_1080i_extradata;
3244 size = sizeof(avci100_1080i_extradata);
3246 } else if (st->codecpar->width == 1440) {
3247 data = avci50_1080i_extradata;
3248 size = sizeof(avci50_1080i_extradata);
3249 } else if (st->codecpar->width == 1280) {
3250 data = avci100_720p_extradata;
3251 size = sizeof(avci100_720p_extradata);
3257 av_freep(&st->codecpar->extradata);
3258 st->codecpar->extradata_size = 0;
3259 st->codecpar->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
3260 if (!st->codecpar->extradata)
3261 return AVERROR(ENOMEM);
3263 memcpy(st->codecpar->extradata, data, size);
3264 st->codecpar->extradata_size = size;
3269 uint8_t *av_stream_get_side_data(AVStream *st, enum AVPacketSideDataType type,
3274 for (i = 0; i < st->nb_side_data; i++) {
3275 if (st->side_data[i].type == type) {
3277 *size = st->side_data[i].size;
3278 return st->side_data[i].data;
3284 uint8_t *av_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type,
3287 AVPacketSideData *sd, *tmp;
3289 uint8_t *data = av_malloc(size);
3294 for (i = 0; i < st->nb_side_data; i++) {
3295 sd = &st->side_data[i];
3297 if (sd->type == type) {
3298 av_freep(&sd->data);
3305 tmp = av_realloc_array(st->side_data, st->nb_side_data + 1, sizeof(*tmp));
3311 st->side_data = tmp;
3314 sd = &st->side_data[st->nb_side_data - 1];
3321 void ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
3324 s->io_close(s, *pb);