2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "avio_internal.h"
25 #include "libavcodec/internal.h"
26 #include "libavcodec/raw.h"
27 #include "libavcodec/bytestream.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/dict.h"
31 #include "libavutil/pixdesc.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/avstring.h"
36 #include "libavutil/mathematics.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/time.h"
39 #include "libavutil/timestamp.h"
41 #include "audiointerleave.h"
53 * various utility functions for use within FFmpeg
56 unsigned avformat_version(void)
58 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
59 return LIBAVFORMAT_VERSION_INT;
62 const char *avformat_configuration(void)
64 return FFMPEG_CONFIGURATION;
67 const char *avformat_license(void)
69 #define LICENSE_PREFIX "libavformat license: "
70 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
73 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
75 static int is_relative(int64_t ts) {
76 return ts > (RELATIVE_TS_BASE - (1LL<<48));
80 * Wrap a given time stamp, if there is an indication for an overflow
83 * @param timestamp the time stamp to wrap
84 * @return resulting time stamp
86 static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
88 if (st->pts_wrap_behavior != AV_PTS_WRAP_IGNORE &&
89 st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
90 if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET &&
91 timestamp < st->pts_wrap_reference)
92 return timestamp + (1ULL<<st->pts_wrap_bits);
93 else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
94 timestamp >= st->pts_wrap_reference)
95 return timestamp - (1ULL<<st->pts_wrap_bits);
100 #define MAKE_ACCESSORS(str, name, type, field) \
101 type av_##name##_get_##field(const str *s) { return s->field; } \
102 void av_##name##_set_##field(str *s, type v) { s->field = v; }
104 MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
106 /* an arbitrarily chosen "sane" max packet size -- 50M */
107 #define SANE_CHUNK_SIZE (50000000)
109 int ffio_limit(AVIOContext *s, int size)
112 int64_t remaining= s->maxsize - avio_tell(s);
113 if(remaining < size){
114 int64_t newsize= avio_size(s);
115 if(!s->maxsize || s->maxsize<newsize)
116 s->maxsize= newsize - !newsize;
117 remaining= s->maxsize - avio_tell(s);
118 remaining= FFMAX(remaining, 0);
121 if(s->maxsize>=0 && remaining+1 < size){
122 av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
130 * Read the data in sane-sized chunks and append to pkt.
131 * Return the number of bytes read or an error.
133 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
135 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
136 int orig_size = pkt->size;
140 int prev_size = pkt->size;
144 * When the caller requests a lot of data, limit it to the amount left
145 * in file or SANE_CHUNK_SIZE when it is not known
148 if (read_size > SANE_CHUNK_SIZE/10) {
149 read_size = ffio_limit(s, read_size);
150 // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
152 read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
155 ret = av_grow_packet(pkt, read_size);
159 ret = avio_read(s, pkt->data + prev_size, read_size);
160 if (ret != read_size) {
161 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
168 pkt->flags |= AV_PKT_FLAG_CORRUPT;
173 return pkt->size > orig_size ? pkt->size - orig_size : ret;
176 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
181 pkt->pos = avio_tell(s);
183 return append_packet_chunked(s, pkt, size);
186 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
189 return av_get_packet(s, pkt, size);
190 return append_packet_chunked(s, pkt, size);
194 int av_filename_number_test(const char *filename)
197 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
200 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
202 AVProbeData lpd = *pd;
203 AVInputFormat *fmt1 = NULL, *fmt;
204 int score, nodat = 0, score_max=0;
205 const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE];
208 lpd.buf = zerobuffer;
210 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
211 int id3len = ff_id3v2_tag_len(lpd.buf);
212 if (lpd.buf_size > id3len + 16) {
214 lpd.buf_size -= id3len;
220 while ((fmt1 = av_iformat_next(fmt1))) {
221 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
224 if (fmt1->read_probe) {
225 score = fmt1->read_probe(&lpd);
226 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
227 score = FFMAX(score, nodat ? AVPROBE_SCORE_EXTENSION / 2 - 1 : 1);
228 } else if (fmt1->extensions) {
229 if (av_match_ext(lpd.filename, fmt1->extensions)) {
230 score = AVPROBE_SCORE_EXTENSION;
233 if (score > score_max) {
236 }else if (score == score_max)
240 score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max);
241 *score_ret= score_max;
246 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
249 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
250 if(score_ret > *score_max){
251 *score_max= score_ret;
257 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
259 return av_probe_input_format2(pd, is_opened, &score);
262 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
264 static const struct {
265 const char *name; enum AVCodecID id; enum AVMediaType type;
267 { "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
268 { "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
269 { "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
270 { "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
271 { "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
272 { "loas" , AV_CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
273 { "m4v" , AV_CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
274 { "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
275 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
279 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
281 if (fmt && st->request_probe <= score) {
283 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
284 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
285 for (i = 0; fmt_id_type[i].name; i++) {
286 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
287 st->codec->codec_id = fmt_id_type[i].id;
288 st->codec->codec_type = fmt_id_type[i].type;
296 /************************************************************/
297 /* input media file */
299 int av_demuxer_open(AVFormatContext *ic){
302 if (ic->iformat->read_header) {
303 err = ic->iformat->read_header(ic);
308 if (ic->pb && !ic->data_offset)
309 ic->data_offset = avio_tell(ic->pb);
315 /** size of probe buffer, for guessing file type from file contents */
316 #define PROBE_BUF_MIN 2048
317 #define PROBE_BUF_MAX (1<<20)
319 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
320 const char *filename, void *logctx,
321 unsigned int offset, unsigned int max_probe_size)
323 AVProbeData pd = { filename ? filename : "", NULL, -offset };
324 unsigned char *buf = NULL;
326 int ret = 0, probe_size, buf_offset = 0;
328 if (!max_probe_size) {
329 max_probe_size = PROBE_BUF_MAX;
330 } else if (max_probe_size > PROBE_BUF_MAX) {
331 max_probe_size = PROBE_BUF_MAX;
332 } else if (max_probe_size < PROBE_BUF_MIN) {
333 av_log(logctx, AV_LOG_ERROR,
334 "Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
335 return AVERROR(EINVAL);
338 if (offset >= max_probe_size) {
339 return AVERROR(EINVAL);
342 if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) {
343 if (!av_strcasecmp(mime_type, "audio/aacp")) {
344 *fmt = av_find_input_format("aac");
346 av_freep(&mime_type);
349 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
350 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
351 int score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;
354 if (probe_size < offset) {
358 /* read probe data */
359 buftmp = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
362 return AVERROR(ENOMEM);
365 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
366 /* fail if error was not end of file, otherwise, lower score */
367 if (ret != AVERROR_EOF) {
372 ret = 0; /* error was end of file, nothing read */
374 pd.buf_size = buf_offset += ret;
375 pd.buf = &buf[offset];
377 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
379 /* guess file format */
380 *fmt = av_probe_input_format2(&pd, 1, &score);
382 if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration
383 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
385 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
391 return AVERROR_INVALIDDATA;
394 /* rewind. reuse probe buffer to avoid seeking */
395 ret = ffio_rewind_with_probe_data(pb, &buf, pd.buf_size);
400 /* open input file and probe the format if necessary */
401 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
404 AVProbeData pd = {filename, NULL, 0};
405 int score = AVPROBE_SCORE_RETRY;
408 s->flags |= AVFMT_FLAG_CUSTOM_IO;
410 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
411 else if (s->iformat->flags & AVFMT_NOFILE)
412 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
413 "will be ignored with AVFMT_NOFILE format.\n");
417 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
418 (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
421 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
422 &s->interrupt_callback, options)) < 0)
426 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
429 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
430 AVPacketList **plast_pktl){
431 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
436 (*plast_pktl)->next = pktl;
438 *packet_buffer = pktl;
440 /* add the packet in the buffered packet list */
446 int avformat_queue_attached_pictures(AVFormatContext *s)
449 for (i = 0; i < s->nb_streams; i++)
450 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
451 s->streams[i]->discard < AVDISCARD_ALL) {
452 AVPacket copy = s->streams[i]->attached_pic;
453 copy.buf = av_buffer_ref(copy.buf);
455 return AVERROR(ENOMEM);
457 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
462 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
464 AVFormatContext *s = *ps;
466 AVDictionary *tmp = NULL;
467 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
469 if (!s && !(s = avformat_alloc_context()))
470 return AVERROR(ENOMEM);
472 av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
473 return AVERROR(EINVAL);
479 av_dict_copy(&tmp, *options, 0);
481 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
484 if ((ret = init_input(s, filename, &tmp)) < 0)
486 avio_skip(s->pb, s->skip_initial_bytes);
488 /* check filename in case an image number is expected */
489 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
490 if (!av_filename_number_test(filename)) {
491 ret = AVERROR(EINVAL);
496 s->duration = s->start_time = AV_NOPTS_VALUE;
497 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
499 /* allocate private data */
500 if (s->iformat->priv_data_size > 0) {
501 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
502 ret = AVERROR(ENOMEM);
505 if (s->iformat->priv_class) {
506 *(const AVClass**)s->priv_data = s->iformat->priv_class;
507 av_opt_set_defaults(s->priv_data);
508 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
513 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
515 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
517 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
518 if ((ret = s->iformat->read_header(s)) < 0)
521 if (id3v2_extra_meta) {
522 if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
523 !strcmp(s->iformat->name, "tta")) {
524 if((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
527 av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
529 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
531 if ((ret = avformat_queue_attached_pictures(s)) < 0)
534 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
535 s->data_offset = avio_tell(s->pb);
537 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
540 av_dict_free(options);
547 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
549 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
551 avformat_free_context(s);
556 /*******************************************************/
558 static void force_codec_ids(AVFormatContext *s, AVStream *st)
560 switch(st->codec->codec_type){
561 case AVMEDIA_TYPE_VIDEO:
562 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
564 case AVMEDIA_TYPE_AUDIO:
565 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
567 case AVMEDIA_TYPE_SUBTITLE:
568 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
573 static void probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
575 if(st->request_probe>0){
576 AVProbeData *pd = &st->probe_data;
578 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
582 uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
586 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
587 pd->buf_size += pkt->size;
588 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
591 st->probe_packets = 0;
593 av_log(s, AV_LOG_WARNING, "nothing to probe for stream %d\n",
598 end= s->raw_packet_buffer_remaining_size <= 0
599 || st->probe_packets<=0;
601 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
602 int score= set_codec_from_probe_data(s, st, pd);
603 if( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_RETRY)
607 st->request_probe= -1;
608 if(st->codec->codec_id != AV_CODEC_ID_NONE){
609 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
611 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
613 force_codec_ids(s, st);
618 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
624 AVPacketList *pktl = s->raw_packet_buffer;
628 st = s->streams[pkt->stream_index];
629 if (s->raw_packet_buffer_remaining_size <= 0)
630 probe_codec(s, st, NULL);
631 if(st->request_probe <= 0){
632 s->raw_packet_buffer = pktl->next;
633 s->raw_packet_buffer_remaining_size += pkt->size;
642 ret= s->iformat->read_packet(s, pkt);
644 if (!pktl || ret == AVERROR(EAGAIN))
646 for (i = 0; i < s->nb_streams; i++) {
648 if (st->probe_packets) {
649 probe_codec(s, st, NULL);
651 av_assert0(st->request_probe <= 0);
656 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
657 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
658 av_log(s, AV_LOG_WARNING,
659 "Dropped corrupted packet (stream = %d)\n",
665 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
666 av_packet_merge_side_data(pkt);
668 if(pkt->stream_index >= (unsigned)s->nb_streams){
669 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
673 st= s->streams[pkt->stream_index];
674 pkt->dts = wrap_timestamp(st, pkt->dts);
675 pkt->pts = wrap_timestamp(st, pkt->pts);
677 force_codec_ids(s, st);
679 /* TODO: audio: time filter; video: frame reordering (pts != dts) */
680 if (s->use_wallclock_as_timestamps)
681 pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
683 if(!pktl && st->request_probe <= 0)
686 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
687 s->raw_packet_buffer_remaining_size -= pkt->size;
689 probe_codec(s, st, pkt);
693 #if FF_API_READ_PACKET
694 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
696 return ff_read_packet(s, pkt);
701 /**********************************************************/
703 static int determinable_frame_size(AVCodecContext *avctx)
705 if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
706 avctx->codec_id == AV_CODEC_ID_MP1 ||
707 avctx->codec_id == AV_CODEC_ID_MP2 ||
708 avctx->codec_id == AV_CODEC_ID_MP3/* ||
709 avctx->codec_id == AV_CODEC_ID_CELT*/)
715 * Get the number of samples of an audio frame. Return -1 on error.
717 int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
721 /* give frame_size priority if demuxing */
722 if (!mux && enc->frame_size > 1)
723 return enc->frame_size;
725 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
728 /* Fall back on using frame_size if muxing. */
729 if (enc->frame_size > 1)
730 return enc->frame_size;
732 //For WMA we currently have no other means to calculate duration thus we
733 //do it here by assuming CBR, which is true for all known cases.
734 if(!mux && enc->bit_rate>0 && size>0 && enc->sample_rate>0 && enc->block_align>1) {
735 if (enc->codec_id == AV_CODEC_ID_WMAV1 || enc->codec_id == AV_CODEC_ID_WMAV2)
736 return ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
744 * Return the frame duration in seconds. Return 0 if not available.
746 void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
747 AVCodecParserContext *pc, AVPacket *pkt)
753 switch(st->codec->codec_type) {
754 case AVMEDIA_TYPE_VIDEO:
755 if (st->r_frame_rate.num && !pc) {
756 *pnum = st->r_frame_rate.den;
757 *pden = st->r_frame_rate.num;
758 } else if(st->time_base.num*1000LL > st->time_base.den) {
759 *pnum = st->time_base.num;
760 *pden = st->time_base.den;
761 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
762 *pnum = st->codec->time_base.num;
763 *pden = st->codec->time_base.den;
764 if (pc && pc->repeat_pict) {
765 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
766 *pden /= 1 + pc->repeat_pict;
768 *pnum *= 1 + pc->repeat_pict;
770 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
771 //Thus if we have no parser in such case leave duration undefined.
772 if(st->codec->ticks_per_frame>1 && !pc){
777 case AVMEDIA_TYPE_AUDIO:
778 frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
779 if (frame_size <= 0 || st->codec->sample_rate <= 0)
782 *pden = st->codec->sample_rate;
789 static int is_intra_only(AVCodecContext *enc){
790 const AVCodecDescriptor *desc;
792 if(enc->codec_type != AVMEDIA_TYPE_VIDEO)
795 desc = av_codec_get_codec_descriptor(enc);
797 desc = avcodec_descriptor_get(enc->codec_id);
798 av_codec_set_codec_descriptor(enc, desc);
801 return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
805 static int has_decode_delay_been_guessed(AVStream *st)
807 if(st->codec->codec_id != AV_CODEC_ID_H264) return 1;
808 if(!st->info) // if we have left find_stream_info then nb_decoded_frames wont increase anymore for stream copy
810 #if CONFIG_H264_DECODER
811 if(st->codec->has_b_frames &&
812 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
815 if(st->codec->has_b_frames<3)
816 return st->nb_decoded_frames >= 7;
817 else if(st->codec->has_b_frames<4)
818 return st->nb_decoded_frames >= 18;
820 return st->nb_decoded_frames >= 20;
823 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
827 if (pktl == s->parse_queue_end)
828 return s->packet_buffer;
832 static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index)
834 if (s->correct_ts_overflow && st->pts_wrap_bits < 63 &&
835 st->pts_wrap_reference == AV_NOPTS_VALUE && st->first_dts != AV_NOPTS_VALUE) {
838 // reference time stamp should be 60 s before first time stamp
839 int64_t pts_wrap_reference = st->first_dts - av_rescale(60, st->time_base.den, st->time_base.num);
840 // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
841 int pts_wrap_behavior = (st->first_dts < (1LL<<st->pts_wrap_bits) - (1LL<<st->pts_wrap_bits-3)) ||
842 (st->first_dts < (1LL<<st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
843 AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET;
845 AVProgram *first_program = av_find_program_from_stream(s, NULL, stream_index);
847 if (!first_program) {
848 int default_stream_index = av_find_default_stream_index(s);
849 if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
850 for (i=0; i<s->nb_streams; i++) {
851 s->streams[i]->pts_wrap_reference = pts_wrap_reference;
852 s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
856 st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
857 st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
861 AVProgram *program = first_program;
863 if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
864 pts_wrap_reference = program->pts_wrap_reference;
865 pts_wrap_behavior = program->pts_wrap_behavior;
868 program = av_find_program_from_stream(s, program, stream_index);
871 // update every program with differing pts_wrap_reference
872 program = first_program;
874 if (program->pts_wrap_reference != pts_wrap_reference) {
875 for (i=0; i<program->nb_stream_indexes; i++) {
876 s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
877 s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
880 program->pts_wrap_reference = pts_wrap_reference;
881 program->pts_wrap_behavior = pts_wrap_behavior;
883 program = av_find_program_from_stream(s, program, stream_index);
891 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
892 int64_t dts, int64_t pts, AVPacket *pkt)
894 AVStream *st= s->streams[stream_index];
895 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
896 int64_t pts_buffer[MAX_REORDER_DELAY+1];
900 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
903 delay = st->codec->has_b_frames;
904 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
906 shift = st->first_dts - RELATIVE_TS_BASE;
908 for (i=0; i<MAX_REORDER_DELAY+1; i++)
909 pts_buffer[i] = AV_NOPTS_VALUE;
911 if (is_relative(pts))
914 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
915 if(pktl->pkt.stream_index != stream_index)
917 if(is_relative(pktl->pkt.pts))
918 pktl->pkt.pts += shift;
920 if(is_relative(pktl->pkt.dts))
921 pktl->pkt.dts += shift;
923 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
924 st->start_time= pktl->pkt.pts;
926 if(pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
927 pts_buffer[0]= pktl->pkt.pts;
928 for(i=0; i<delay && pts_buffer[i] > pts_buffer[i+1]; i++)
929 FFSWAP(int64_t, pts_buffer[i], pts_buffer[i+1]);
930 if(pktl->pkt.dts == AV_NOPTS_VALUE)
931 pktl->pkt.dts= pts_buffer[0];
935 if (update_wrap_reference(s, st, stream_index) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
936 // correct first time stamps to negative values
937 st->first_dts = wrap_timestamp(st, st->first_dts);
938 st->cur_dts = wrap_timestamp(st, st->cur_dts);
939 pkt->dts = wrap_timestamp(st, pkt->dts);
940 pkt->pts = wrap_timestamp(st, pkt->pts);
941 pts = wrap_timestamp(st, pts);
944 if (st->start_time == AV_NOPTS_VALUE)
945 st->start_time = pts;
948 static void update_initial_durations(AVFormatContext *s, AVStream *st,
949 int stream_index, int duration)
951 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
952 int64_t cur_dts= RELATIVE_TS_BASE;
954 if(st->first_dts != AV_NOPTS_VALUE){
955 cur_dts= st->first_dts;
956 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
957 if(pktl->pkt.stream_index == stream_index){
958 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
963 if(pktl && pktl->pkt.dts != st->first_dts) {
964 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s in the queue\n", av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts));
968 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
971 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
972 st->first_dts = cur_dts;
973 }else if(st->cur_dts != RELATIVE_TS_BASE)
976 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
977 if(pktl->pkt.stream_index != stream_index)
979 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
980 && !pktl->pkt.duration){
981 pktl->pkt.dts= cur_dts;
982 if(!st->codec->has_b_frames)
983 pktl->pkt.pts= cur_dts;
984 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
985 pktl->pkt.duration = duration;
988 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
991 st->cur_dts= cur_dts;
994 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
995 AVCodecParserContext *pc, AVPacket *pkt)
997 int num, den, presentation_delayed, delay, i;
1000 if (s->flags & AVFMT_FLAG_NOFILLIN)
1003 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1004 pkt->dts= AV_NOPTS_VALUE;
1006 if (st->codec->codec_id != AV_CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
1007 //FIXME Set low_delay = 0 when has_b_frames = 1
1008 st->codec->has_b_frames = 1;
1010 /* do we have a video B-frame ? */
1011 delay= st->codec->has_b_frames;
1012 presentation_delayed = 0;
1014 /* XXX: need has_b_frame, but cannot get it if the codec is
1017 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1018 presentation_delayed = 1;
1020 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && st->pts_wrap_bits<63 && pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > pkt->pts){
1021 if(is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > st->cur_dts) {
1022 pkt->dts -= 1LL<<st->pts_wrap_bits;
1024 pkt->pts += 1LL<<st->pts_wrap_bits;
1027 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1028 // we take the conservative approach and discard both
1029 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1030 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1031 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1032 if(strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1033 pkt->dts= AV_NOPTS_VALUE;
1036 if (pkt->duration == 0) {
1037 ff_compute_frame_duration(&num, &den, st, pc, pkt);
1039 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1042 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1043 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1045 /* correct timestamps with byte offset if demuxers only have timestamps
1046 on packet boundaries */
1047 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1048 /* this will estimate bitrate based on this frame's duration and size */
1049 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1050 if(pkt->pts != AV_NOPTS_VALUE)
1052 if(pkt->dts != AV_NOPTS_VALUE)
1056 if (pc && pc->dts_sync_point >= 0) {
1057 // we have synchronization info from the parser
1058 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1060 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1061 if (pkt->dts != AV_NOPTS_VALUE) {
1062 // got DTS from the stream, update reference timestamp
1063 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1064 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1065 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1066 // compute DTS based on reference timestamp
1067 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1068 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1070 if (pc->dts_sync_point > 0)
1071 st->reference_dts = pkt->dts; // new reference
1075 /* This may be redundant, but it should not hurt. */
1076 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1077 presentation_delayed = 1;
1079 av_dlog(NULL, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1080 presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1081 /* interpolate PTS and DTS if they are not present */
1082 //We skip H264 currently because delay and has_b_frames are not reliably set
1083 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){
1084 if (presentation_delayed) {
1085 /* DTS = decompression timestamp */
1086 /* PTS = presentation timestamp */
1087 if (pkt->dts == AV_NOPTS_VALUE)
1088 pkt->dts = st->last_IP_pts;
1089 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1090 if (pkt->dts == AV_NOPTS_VALUE)
1091 pkt->dts = st->cur_dts;
1093 /* this is tricky: the dts must be incremented by the duration
1094 of the frame we are displaying, i.e. the last I- or P-frame */
1095 if (st->last_IP_duration == 0)
1096 st->last_IP_duration = pkt->duration;
1097 if(pkt->dts != AV_NOPTS_VALUE)
1098 st->cur_dts = pkt->dts + st->last_IP_duration;
1099 st->last_IP_duration = pkt->duration;
1100 st->last_IP_pts= pkt->pts;
1101 /* cannot compute PTS if not present (we can compute it only
1102 by knowing the future */
1103 } else if (pkt->pts != AV_NOPTS_VALUE ||
1104 pkt->dts != AV_NOPTS_VALUE ||
1106 int duration = pkt->duration;
1108 /* presentation is not delayed : PTS and DTS are the same */
1109 if (pkt->pts == AV_NOPTS_VALUE)
1110 pkt->pts = pkt->dts;
1111 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1113 if (pkt->pts == AV_NOPTS_VALUE)
1114 pkt->pts = st->cur_dts;
1115 pkt->dts = pkt->pts;
1116 if (pkt->pts != AV_NOPTS_VALUE)
1117 st->cur_dts = pkt->pts + duration;
1121 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
1122 st->pts_buffer[0]= pkt->pts;
1123 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1124 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1125 if(pkt->dts == AV_NOPTS_VALUE)
1126 pkt->dts= st->pts_buffer[0];
1128 if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
1129 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); // this should happen on the first packet
1131 if(pkt->dts > st->cur_dts)
1132 st->cur_dts = pkt->dts;
1134 av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1135 presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1138 if (is_intra_only(st->codec))
1139 pkt->flags |= AV_PKT_FLAG_KEY;
1141 pkt->convergence_duration = pc->convergence_duration;
1144 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1147 AVPacketList *pktl = *pkt_buf;
1148 *pkt_buf = pktl->next;
1149 av_free_packet(&pktl->pkt);
1152 *pkt_buf_end = NULL;
1156 * Parse a packet, add all split parts to parse_queue
1158 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1160 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1162 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1163 AVStream *st = s->streams[stream_index];
1164 uint8_t *data = pkt ? pkt->data : NULL;
1165 int size = pkt ? pkt->size : 0;
1166 int ret = 0, got_output = 0;
1169 av_init_packet(&flush_pkt);
1172 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1173 // preserve 0-size sync packets
1174 compute_pkt_fields(s, st, st->parser, pkt);
1177 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1180 av_init_packet(&out_pkt);
1181 len = av_parser_parse2(st->parser, st->codec,
1182 &out_pkt.data, &out_pkt.size, data, size,
1183 pkt->pts, pkt->dts, pkt->pos);
1185 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1187 /* increment read pointer */
1191 got_output = !!out_pkt.size;
1196 if (pkt->side_data) {
1197 out_pkt.side_data = pkt->side_data;
1198 out_pkt.side_data_elems = pkt->side_data_elems;
1199 pkt->side_data = NULL;
1200 pkt->side_data_elems = 0;
1203 /* set the duration */
1204 out_pkt.duration = 0;
1205 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1206 if (st->codec->sample_rate > 0) {
1207 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1208 (AVRational){ 1, st->codec->sample_rate },
1212 } else if (st->codec->time_base.num != 0 &&
1213 st->codec->time_base.den != 0) {
1214 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1215 st->codec->time_base,
1220 out_pkt.stream_index = st->index;
1221 out_pkt.pts = st->parser->pts;
1222 out_pkt.dts = st->parser->dts;
1223 out_pkt.pos = st->parser->pos;
1225 if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1226 out_pkt.pos = st->parser->frame_offset;
1228 if (st->parser->key_frame == 1 ||
1229 (st->parser->key_frame == -1 &&
1230 st->parser->pict_type == AV_PICTURE_TYPE_I))
1231 out_pkt.flags |= AV_PKT_FLAG_KEY;
1233 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1234 out_pkt.flags |= AV_PKT_FLAG_KEY;
1236 compute_pkt_fields(s, st, st->parser, &out_pkt);
1238 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1239 out_pkt.buf = pkt->buf;
1241 #if FF_API_DESTRUCT_PACKET
1242 out_pkt.destruct = pkt->destruct;
1243 pkt->destruct = NULL;
1246 if ((ret = av_dup_packet(&out_pkt)) < 0)
1249 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1250 av_free_packet(&out_pkt);
1251 ret = AVERROR(ENOMEM);
1257 /* end of the stream => close and free the parser */
1258 if (pkt == &flush_pkt) {
1259 av_parser_close(st->parser);
1264 av_free_packet(pkt);
1268 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1269 AVPacketList **pkt_buffer_end,
1273 av_assert0(*pkt_buffer);
1276 *pkt_buffer = pktl->next;
1278 *pkt_buffer_end = NULL;
1283 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1285 int ret = 0, i, got_packet = 0;
1287 av_init_packet(pkt);
1289 while (!got_packet && !s->parse_queue) {
1293 /* read next packet */
1294 ret = ff_read_packet(s, &cur_pkt);
1296 if (ret == AVERROR(EAGAIN))
1298 /* flush the parsers */
1299 for(i = 0; i < s->nb_streams; i++) {
1301 if (st->parser && st->need_parsing)
1302 parse_packet(s, NULL, st->index);
1304 /* all remaining packets are now in parse_queue =>
1305 * really terminate parsing */
1309 st = s->streams[cur_pkt.stream_index];
1311 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1312 cur_pkt.dts != AV_NOPTS_VALUE &&
1313 cur_pkt.pts < cur_pkt.dts) {
1314 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1315 cur_pkt.stream_index,
1316 av_ts2str(cur_pkt.pts),
1317 av_ts2str(cur_pkt.dts),
1320 if (s->debug & FF_FDEBUG_TS)
1321 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1322 cur_pkt.stream_index,
1323 av_ts2str(cur_pkt.pts),
1324 av_ts2str(cur_pkt.dts),
1329 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1330 st->parser = av_parser_init(st->codec->codec_id);
1332 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1333 "%s, packets or times may be invalid.\n",
1334 avcodec_get_name(st->codec->codec_id));
1335 /* no parser available: just output the raw packets */
1336 st->need_parsing = AVSTREAM_PARSE_NONE;
1337 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1338 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1339 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1340 st->parser->flags |= PARSER_FLAG_ONCE;
1341 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1342 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1346 if (!st->need_parsing || !st->parser) {
1347 /* no parsing needed: we just output the packet as is */
1349 compute_pkt_fields(s, st, NULL, pkt);
1350 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1351 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1352 ff_reduce_index(s, st->index);
1353 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1356 } else if (st->discard < AVDISCARD_ALL) {
1357 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1361 av_free_packet(&cur_pkt);
1363 if (pkt->flags & AV_PKT_FLAG_KEY)
1364 st->skip_to_keyframe = 0;
1365 if (st->skip_to_keyframe) {
1366 av_free_packet(&cur_pkt);
1374 if (!got_packet && s->parse_queue)
1375 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1377 if(s->debug & FF_FDEBUG_TS)
1378 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1380 av_ts2str(pkt->pts),
1381 av_ts2str(pkt->dts),
1389 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1391 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1397 ret = s->packet_buffer ?
1398 read_from_packet_buffer(&s->packet_buffer, &s->packet_buffer_end, pkt) :
1399 read_frame_internal(s, pkt);
1406 AVPacketList *pktl = s->packet_buffer;
1409 AVPacket *next_pkt = &pktl->pkt;
1411 if (next_pkt->dts != AV_NOPTS_VALUE) {
1412 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1413 // last dts seen for this stream. if any of packets following
1414 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1415 int64_t last_dts = next_pkt->dts;
1416 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1417 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1418 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1419 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1420 next_pkt->pts = pktl->pkt.dts;
1422 if (last_dts != AV_NOPTS_VALUE) {
1423 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1424 last_dts = pktl->pkt.dts;
1429 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1430 // Fixing the last reference frame had none pts issue (For MXF etc).
1431 // We only do this when
1433 // 2. we are not able to resolve a pts value for current packet.
1434 // 3. the packets for this stream at the end of the files had valid dts.
1435 next_pkt->pts = last_dts + next_pkt->duration;
1437 pktl = s->packet_buffer;
1440 /* read packet from packet buffer, if there is data */
1441 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1442 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1443 ret = read_from_packet_buffer(&s->packet_buffer,
1444 &s->packet_buffer_end, pkt);
1449 ret = read_frame_internal(s, pkt);
1451 if (pktl && ret != AVERROR(EAGAIN)) {
1458 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1459 &s->packet_buffer_end)) < 0)
1460 return AVERROR(ENOMEM);
1465 st = s->streams[pkt->stream_index];
1466 if (st->skip_samples) {
1467 uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
1468 AV_WL32(p, st->skip_samples);
1469 av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
1470 st->skip_samples = 0;
1473 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1474 ff_reduce_index(s, st->index);
1475 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1478 if (is_relative(pkt->dts))
1479 pkt->dts -= RELATIVE_TS_BASE;
1480 if (is_relative(pkt->pts))
1481 pkt->pts -= RELATIVE_TS_BASE;
1486 /* XXX: suppress the packet queue */
1487 static void flush_packet_queue(AVFormatContext *s)
1489 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1490 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1491 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1493 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1496 /*******************************************************/
1499 int av_find_default_stream_index(AVFormatContext *s)
1501 int first_audio_index = -1;
1505 if (s->nb_streams <= 0)
1507 for(i = 0; i < s->nb_streams; i++) {
1509 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1510 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1513 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1514 first_audio_index = i;
1516 return first_audio_index >= 0 ? first_audio_index : 0;
1520 * Flush the frame reader.
1522 void ff_read_frame_flush(AVFormatContext *s)
1527 flush_packet_queue(s);
1529 /* for each stream, reset read state */
1530 for(i = 0; i < s->nb_streams; i++) {
1534 av_parser_close(st->parser);
1537 st->last_IP_pts = AV_NOPTS_VALUE;
1538 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1539 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1540 st->reference_dts = AV_NOPTS_VALUE;
1542 st->probe_packets = MAX_PROBE_PACKETS;
1544 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1545 st->pts_buffer[j]= AV_NOPTS_VALUE;
1549 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1553 for(i = 0; i < s->nb_streams; i++) {
1554 AVStream *st = s->streams[i];
1556 st->cur_dts = av_rescale(timestamp,
1557 st->time_base.den * (int64_t)ref_st->time_base.num,
1558 st->time_base.num * (int64_t)ref_st->time_base.den);
1562 void ff_reduce_index(AVFormatContext *s, int stream_index)
1564 AVStream *st= s->streams[stream_index];
1565 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1567 if((unsigned)st->nb_index_entries >= max_entries){
1569 for(i=0; 2*i<st->nb_index_entries; i++)
1570 st->index_entries[i]= st->index_entries[2*i];
1571 st->nb_index_entries= i;
1575 int ff_add_index_entry(AVIndexEntry **index_entries,
1576 int *nb_index_entries,
1577 unsigned int *index_entries_allocated_size,
1578 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1580 AVIndexEntry *entries, *ie;
1583 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1586 if(timestamp == AV_NOPTS_VALUE)
1587 return AVERROR(EINVAL);
1589 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1590 timestamp -= RELATIVE_TS_BASE;
1592 entries = av_fast_realloc(*index_entries,
1593 index_entries_allocated_size,
1594 (*nb_index_entries + 1) *
1595 sizeof(AVIndexEntry));
1599 *index_entries= entries;
1601 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1604 index= (*nb_index_entries)++;
1605 ie= &entries[index];
1606 av_assert0(index==0 || ie[-1].timestamp < timestamp);
1608 ie= &entries[index];
1609 if(ie->timestamp != timestamp){
1610 if(ie->timestamp <= timestamp)
1612 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1613 (*nb_index_entries)++;
1614 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1615 distance= ie->min_distance;
1619 ie->timestamp = timestamp;
1620 ie->min_distance= distance;
1627 int av_add_index_entry(AVStream *st,
1628 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1630 timestamp = wrap_timestamp(st, timestamp);
1631 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1632 &st->index_entries_allocated_size, pos,
1633 timestamp, size, distance, flags);
1636 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1637 int64_t wanted_timestamp, int flags)
1645 //optimize appending index entries at the end
1646 if(b && entries[b-1].timestamp < wanted_timestamp)
1651 timestamp = entries[m].timestamp;
1652 if(timestamp >= wanted_timestamp)
1654 if(timestamp <= wanted_timestamp)
1657 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1659 if(!(flags & AVSEEK_FLAG_ANY)){
1660 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1661 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1670 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1673 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1674 wanted_timestamp, flags);
1677 static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
1678 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1680 int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
1681 if (stream_index >= 0)
1682 ts = wrap_timestamp(s->streams[stream_index], ts);
1686 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1688 AVInputFormat *avif= s->iformat;
1689 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1690 int64_t ts_min, ts_max, ts;
1695 if (stream_index < 0)
1698 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1701 ts_min= AV_NOPTS_VALUE;
1702 pos_limit= -1; //gcc falsely says it may be uninitialized
1704 st= s->streams[stream_index];
1705 if(st->index_entries){
1708 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1709 index= FFMAX(index, 0);
1710 e= &st->index_entries[index];
1712 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1714 ts_min= e->timestamp;
1715 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1716 pos_min, av_ts2str(ts_min));
1718 av_assert1(index==0);
1721 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1722 av_assert0(index < st->nb_index_entries);
1724 e= &st->index_entries[index];
1725 av_assert1(e->timestamp >= target_ts);
1727 ts_max= e->timestamp;
1728 pos_limit= pos_max - e->min_distance;
1729 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1730 pos_max, pos_limit, av_ts2str(ts_max));
1734 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1739 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1742 ff_read_frame_flush(s);
1743 ff_update_cur_dts(s, st, ts);
1748 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1749 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1750 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1751 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1754 int64_t start_pos, filesize;
1757 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1759 if(ts_min == AV_NOPTS_VALUE){
1760 pos_min = s->data_offset;
1761 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1762 if (ts_min == AV_NOPTS_VALUE)
1766 if(ts_min >= target_ts){
1771 if(ts_max == AV_NOPTS_VALUE){
1774 filesize = avio_size(s->pb);
1775 pos_max = filesize - 1;
1778 pos_max = FFMAX(0, pos_max - step);
1779 ts_max = ff_read_timestamp(s, stream_index, &pos_max, limit, read_timestamp);
1781 }while(ts_max == AV_NOPTS_VALUE && 2*limit > step);
1782 if (ts_max == AV_NOPTS_VALUE)
1786 int64_t tmp_pos= pos_max + 1;
1787 int64_t tmp_ts= ff_read_timestamp(s, stream_index, &tmp_pos, INT64_MAX, read_timestamp);
1788 if(tmp_ts == AV_NOPTS_VALUE)
1792 if(tmp_pos >= filesize)
1798 if(ts_max <= target_ts){
1803 if(ts_min > ts_max){
1805 }else if(ts_min == ts_max){
1810 while (pos_min < pos_limit) {
1811 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1812 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1813 assert(pos_limit <= pos_max);
1816 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1817 // interpolate position (better than dichotomy)
1818 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1819 + pos_min - approximate_keyframe_distance;
1820 }else if(no_change==1){
1821 // bisection, if interpolation failed to change min or max pos last time
1822 pos = (pos_min + pos_limit)>>1;
1824 /* linear search if bisection failed, can only happen if there
1825 are very few or no keyframes between min/max */
1830 else if(pos > pos_limit)
1834 ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); //may pass pos_limit instead of -1
1839 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1840 pos_min, pos, pos_max,
1841 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1842 pos_limit, start_pos, no_change);
1843 if(ts == AV_NOPTS_VALUE){
1844 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1847 assert(ts != AV_NOPTS_VALUE);
1848 if (target_ts <= ts) {
1849 pos_limit = start_pos - 1;
1853 if (target_ts >= ts) {
1859 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1860 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1863 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1865 ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1866 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1867 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1873 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1874 int64_t pos_min, pos_max;
1876 pos_min = s->data_offset;
1877 pos_max = avio_size(s->pb) - 1;
1879 if (pos < pos_min) pos= pos_min;
1880 else if(pos > pos_max) pos= pos_max;
1882 avio_seek(s->pb, pos, SEEK_SET);
1884 s->io_repositioned = 1;
1889 static int seek_frame_generic(AVFormatContext *s,
1890 int stream_index, int64_t timestamp, int flags)
1897 st = s->streams[stream_index];
1899 index = av_index_search_timestamp(st, timestamp, flags);
1901 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1904 if(index < 0 || index==st->nb_index_entries-1){
1908 if(st->nb_index_entries){
1909 av_assert0(st->index_entries);
1910 ie= &st->index_entries[st->nb_index_entries-1];
1911 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1913 ff_update_cur_dts(s, st, ie->timestamp);
1915 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1921 read_status = av_read_frame(s, &pkt);
1922 } while (read_status == AVERROR(EAGAIN));
1923 if (read_status < 0)
1925 av_free_packet(&pkt);
1926 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1927 if(pkt.flags & AV_PKT_FLAG_KEY)
1929 if(nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS){
1930 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1935 index = av_index_search_timestamp(st, timestamp, flags);
1940 ff_read_frame_flush(s);
1941 if (s->iformat->read_seek){
1942 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1945 ie = &st->index_entries[index];
1946 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1948 ff_update_cur_dts(s, st, ie->timestamp);
1953 static int seek_frame_internal(AVFormatContext *s, int stream_index,
1954 int64_t timestamp, int flags)
1959 if (flags & AVSEEK_FLAG_BYTE) {
1960 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
1962 ff_read_frame_flush(s);
1963 return seek_frame_byte(s, stream_index, timestamp, flags);
1966 if(stream_index < 0){
1967 stream_index= av_find_default_stream_index(s);
1968 if(stream_index < 0)
1971 st= s->streams[stream_index];
1972 /* timestamp for default must be expressed in AV_TIME_BASE units */
1973 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1976 /* first, we try the format specific seek */
1977 if (s->iformat->read_seek) {
1978 ff_read_frame_flush(s);
1979 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1986 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
1987 ff_read_frame_flush(s);
1988 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
1989 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
1990 ff_read_frame_flush(s);
1991 return seek_frame_generic(s, stream_index, timestamp, flags);
1997 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2001 if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2002 int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2003 if ((flags & AVSEEK_FLAG_BACKWARD))
2007 return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2008 flags & ~AVSEEK_FLAG_BACKWARD);
2011 ret = seek_frame_internal(s, stream_index, timestamp, flags);
2014 ret = avformat_queue_attached_pictures(s);
2019 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
2021 if(min_ts > ts || max_ts < ts)
2023 if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2024 return AVERROR(EINVAL);
2027 flags |= AVSEEK_FLAG_ANY;
2028 flags &= ~AVSEEK_FLAG_BACKWARD;
2030 if (s->iformat->read_seek2) {
2032 ff_read_frame_flush(s);
2034 if (stream_index == -1 && s->nb_streams == 1) {
2035 AVRational time_base = s->streams[0]->time_base;
2036 ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2037 min_ts = av_rescale_rnd(min_ts, time_base.den,
2038 time_base.num * (int64_t)AV_TIME_BASE,
2039 AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
2040 max_ts = av_rescale_rnd(max_ts, time_base.den,
2041 time_base.num * (int64_t)AV_TIME_BASE,
2042 AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
2045 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
2048 ret = avformat_queue_attached_pictures(s);
2052 if(s->iformat->read_timestamp){
2053 //try to seek via read_timestamp()
2056 // Fall back on old API if new is not implemented but old is.
2057 // Note the old API has somewhat different semantics.
2058 if (s->iformat->read_seek || 1) {
2059 int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2060 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2061 if (ret<0 && ts != min_ts && max_ts != ts) {
2062 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2064 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2069 // try some generic seek like seek_frame_generic() but with new ts semantics
2070 return -1; //unreachable
2073 /*******************************************************/
2076 * Return TRUE if the stream has accurate duration in any stream.
2078 * @return TRUE if the stream has accurate duration for at least one component.
2080 static int has_duration(AVFormatContext *ic)
2085 for(i = 0;i < ic->nb_streams; i++) {
2086 st = ic->streams[i];
2087 if (st->duration != AV_NOPTS_VALUE)
2090 if (ic->duration != AV_NOPTS_VALUE)
2096 * Estimate the stream timings from the one of each components.
2098 * Also computes the global bitrate if possible.
2100 static void update_stream_timings(AVFormatContext *ic)
2102 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2103 int64_t duration, duration1, filesize;
2108 start_time = INT64_MAX;
2109 start_time_text = INT64_MAX;
2110 end_time = INT64_MIN;
2111 duration = INT64_MIN;
2112 for(i = 0;i < ic->nb_streams; i++) {
2113 st = ic->streams[i];
2114 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2115 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2116 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2117 if (start_time1 < start_time_text)
2118 start_time_text = start_time1;
2120 start_time = FFMIN(start_time, start_time1);
2121 end_time1 = AV_NOPTS_VALUE;
2122 if (st->duration != AV_NOPTS_VALUE) {
2123 end_time1 = start_time1
2124 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2125 end_time = FFMAX(end_time, end_time1);
2127 for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){
2128 if(p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2129 p->start_time = start_time1;
2130 if(p->end_time < end_time1)
2131 p->end_time = end_time1;
2134 if (st->duration != AV_NOPTS_VALUE) {
2135 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2136 duration = FFMAX(duration, duration1);
2139 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2140 start_time = start_time_text;
2141 else if(start_time > start_time_text)
2142 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2144 if (start_time != INT64_MAX) {
2145 ic->start_time = start_time;
2146 if (end_time != INT64_MIN) {
2147 if (ic->nb_programs) {
2148 for (i=0; i<ic->nb_programs; i++) {
2149 p = ic->programs[i];
2150 if(p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2151 duration = FFMAX(duration, p->end_time - p->start_time);
2154 duration = FFMAX(duration, end_time - start_time);
2157 if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2158 ic->duration = duration;
2160 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2161 /* compute the bitrate */
2162 double bitrate = (double)filesize * 8.0 * AV_TIME_BASE /
2163 (double)ic->duration;
2164 if (bitrate >= 0 && bitrate <= INT_MAX)
2165 ic->bit_rate = bitrate;
2169 static void fill_all_stream_timings(AVFormatContext *ic)
2174 update_stream_timings(ic);
2175 for(i = 0;i < ic->nb_streams; i++) {
2176 st = ic->streams[i];
2177 if (st->start_time == AV_NOPTS_VALUE) {
2178 if(ic->start_time != AV_NOPTS_VALUE)
2179 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2180 if(ic->duration != AV_NOPTS_VALUE)
2181 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2186 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2188 int64_t filesize, duration;
2189 int bit_rate, i, show_warning = 0;
2192 /* if bit_rate is already set, we believe it */
2193 if (ic->bit_rate <= 0) {
2195 for(i=0;i<ic->nb_streams;i++) {
2196 st = ic->streams[i];
2197 if (st->codec->bit_rate > 0)
2198 bit_rate += st->codec->bit_rate;
2200 ic->bit_rate = bit_rate;
2203 /* if duration is already set, we believe it */
2204 if (ic->duration == AV_NOPTS_VALUE &&
2205 ic->bit_rate != 0) {
2206 filesize = ic->pb ? avio_size(ic->pb) : 0;
2208 for(i = 0; i < ic->nb_streams; i++) {
2209 st = ic->streams[i];
2210 if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2211 && st->duration == AV_NOPTS_VALUE) {
2212 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2213 st->duration = duration;
2220 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2223 #define DURATION_MAX_READ_SIZE 250000LL
2224 #define DURATION_MAX_RETRY 4
2226 /* only usable for MPEG-PS streams */
2227 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2229 AVPacket pkt1, *pkt = &pkt1;
2231 int read_size, i, ret;
2233 int64_t filesize, offset, duration;
2236 /* flush packet queue */
2237 flush_packet_queue(ic);
2239 for (i=0; i<ic->nb_streams; i++) {
2240 st = ic->streams[i];
2241 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2242 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2245 av_parser_close(st->parser);
2250 /* estimate the end time (duration) */
2251 /* XXX: may need to support wrapping */
2252 filesize = ic->pb ? avio_size(ic->pb) : 0;
2253 end_time = AV_NOPTS_VALUE;
2255 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2259 avio_seek(ic->pb, offset, SEEK_SET);
2262 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2266 ret = ff_read_packet(ic, pkt);
2267 } while(ret == AVERROR(EAGAIN));
2270 read_size += pkt->size;
2271 st = ic->streams[pkt->stream_index];
2272 if (pkt->pts != AV_NOPTS_VALUE &&
2273 (st->start_time != AV_NOPTS_VALUE ||
2274 st->first_dts != AV_NOPTS_VALUE)) {
2275 duration = end_time = pkt->pts;
2276 if (st->start_time != AV_NOPTS_VALUE)
2277 duration -= st->start_time;
2279 duration -= st->first_dts;
2281 if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<=0 ||
2282 (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2283 st->duration = duration;
2284 st->info->last_duration = duration;
2287 av_free_packet(pkt);
2289 }while( end_time==AV_NOPTS_VALUE
2290 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2291 && ++retry <= DURATION_MAX_RETRY);
2293 fill_all_stream_timings(ic);
2295 avio_seek(ic->pb, old_offset, SEEK_SET);
2296 for (i=0; i<ic->nb_streams; i++) {
2298 st->cur_dts= st->first_dts;
2299 st->last_IP_pts = AV_NOPTS_VALUE;
2300 st->reference_dts = AV_NOPTS_VALUE;
2304 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2308 /* get the file size, if possible */
2309 if (ic->iformat->flags & AVFMT_NOFILE) {
2312 file_size = avio_size(ic->pb);
2313 file_size = FFMAX(0, file_size);
2316 if ((!strcmp(ic->iformat->name, "mpeg") ||
2317 !strcmp(ic->iformat->name, "mpegts")) &&
2318 file_size && ic->pb->seekable) {
2319 /* get accurate estimate from the PTSes */
2320 estimate_timings_from_pts(ic, old_offset);
2321 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2322 } else if (has_duration(ic)) {
2323 /* at least one component has timings - we use them for all
2325 fill_all_stream_timings(ic);
2326 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2328 /* less precise: use bitrate info */
2329 estimate_timings_from_bit_rate(ic);
2330 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2332 update_stream_timings(ic);
2336 AVStream av_unused *st;
2337 for(i = 0;i < ic->nb_streams; i++) {
2338 st = ic->streams[i];
2339 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2340 (double) st->start_time / AV_TIME_BASE,
2341 (double) st->duration / AV_TIME_BASE);
2343 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2344 (double) ic->start_time / AV_TIME_BASE,
2345 (double) ic->duration / AV_TIME_BASE,
2346 ic->bit_rate / 1000);
2350 static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2352 AVCodecContext *avctx = st->codec;
2354 #define FAIL(errmsg) do { \
2356 *errmsg_ptr = errmsg; \
2360 switch (avctx->codec_type) {
2361 case AVMEDIA_TYPE_AUDIO:
2362 if (!avctx->frame_size && determinable_frame_size(avctx))
2363 FAIL("unspecified frame size");
2364 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2365 FAIL("unspecified sample format");
2366 if (!avctx->sample_rate)
2367 FAIL("unspecified sample rate");
2368 if (!avctx->channels)
2369 FAIL("unspecified number of channels");
2370 if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2371 FAIL("no decodable DTS frames");
2373 case AVMEDIA_TYPE_VIDEO:
2375 FAIL("unspecified size");
2376 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2377 FAIL("unspecified pixel format");
2378 if (st->codec->codec_id == AV_CODEC_ID_RV30 || st->codec->codec_id == AV_CODEC_ID_RV40)
2379 if (!st->sample_aspect_ratio.num && !st->codec->sample_aspect_ratio.num && !st->codec_info_nb_frames)
2380 FAIL("no frame in rv30/40 and no sar");
2382 case AVMEDIA_TYPE_SUBTITLE:
2383 if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2384 FAIL("unspecified size");
2386 case AVMEDIA_TYPE_DATA:
2387 if(avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2390 if (avctx->codec_id == AV_CODEC_ID_NONE)
2391 FAIL("unknown codec");
2395 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2396 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options)
2398 const AVCodec *codec;
2399 int got_picture = 1, ret = 0;
2400 AVFrame *frame = avcodec_alloc_frame();
2401 AVSubtitle subtitle;
2402 AVPacket pkt = *avpkt;
2405 return AVERROR(ENOMEM);
2407 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2408 AVDictionary *thread_opt = NULL;
2410 codec = st->codec->codec ? st->codec->codec :
2411 avcodec_find_decoder(st->codec->codec_id);
2414 st->info->found_decoder = -1;
2419 /* force thread count to 1 since the h264 decoder will not extract SPS
2420 * and PPS to extradata during multi-threaded decoding */
2421 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2422 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2424 av_dict_free(&thread_opt);
2426 st->info->found_decoder = -1;
2429 st->info->found_decoder = 1;
2430 } else if (!st->info->found_decoder)
2431 st->info->found_decoder = 1;
2433 if (st->info->found_decoder < 0) {
2438 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2440 (!has_codec_parameters(st, NULL) ||
2441 !has_decode_delay_been_guessed(st) ||
2442 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2444 avcodec_get_frame_defaults(frame);
2445 switch(st->codec->codec_type) {
2446 case AVMEDIA_TYPE_VIDEO:
2447 ret = avcodec_decode_video2(st->codec, frame,
2448 &got_picture, &pkt);
2450 case AVMEDIA_TYPE_AUDIO:
2451 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2453 case AVMEDIA_TYPE_SUBTITLE:
2454 ret = avcodec_decode_subtitle2(st->codec, &subtitle,
2455 &got_picture, &pkt);
2463 st->nb_decoded_frames++;
2470 if(!pkt.data && !got_picture)
2474 avcodec_free_frame(&frame);
2478 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2480 while (tags->id != AV_CODEC_ID_NONE) {
2488 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2491 for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) {
2492 if(tag == tags[i].tag)
2495 for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) {
2496 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2499 return AV_CODEC_ID_NONE;
2502 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2506 case 32: return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2507 case 64: return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2508 default: return AV_CODEC_ID_NONE;
2513 if (sflags & (1 << (bps - 1))) {
2515 case 1: return AV_CODEC_ID_PCM_S8;
2516 case 2: return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2517 case 3: return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2518 case 4: return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2519 default: return AV_CODEC_ID_NONE;
2523 case 1: return AV_CODEC_ID_PCM_U8;
2524 case 2: return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2525 case 3: return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2526 case 4: return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2527 default: return AV_CODEC_ID_NONE;
2533 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id)
2536 if (!av_codec_get_tag2(tags, id, &tag))
2541 int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
2545 for(i=0; tags && tags[i]; i++){
2546 const AVCodecTag *codec_tags = tags[i];
2547 while (codec_tags->id != AV_CODEC_ID_NONE) {
2548 if (codec_tags->id == id) {
2549 *tag = codec_tags->tag;
2558 enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2561 for(i=0; tags && tags[i]; i++){
2562 enum AVCodecID id= ff_codec_get_id(tags[i], tag);
2563 if(id!=AV_CODEC_ID_NONE) return id;
2565 return AV_CODEC_ID_NONE;
2568 static void compute_chapters_end(AVFormatContext *s)
2571 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2573 for (i = 0; i < s->nb_chapters; i++)
2574 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2575 AVChapter *ch = s->chapters[i];
2576 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2579 for (j = 0; j < s->nb_chapters; j++) {
2580 AVChapter *ch1 = s->chapters[j];
2581 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2582 if (j != i && next_start > ch->start && next_start < end)
2585 ch->end = (end == INT64_MAX) ? ch->start : end;
2589 static int get_std_framerate(int i){
2590 if(i<60*12) return (i+1)*1001;
2591 else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2595 * Is the time base unreliable.
2596 * This is a heuristic to balance between quick acceptance of the values in
2597 * the headers vs. some extra checks.
2598 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2599 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2600 * And there are "variable" fps files this needs to detect as well.
2602 static int tb_unreliable(AVCodecContext *c){
2603 if( c->time_base.den >= 101L*c->time_base.num
2604 || c->time_base.den < 5L*c->time_base.num
2605 /* || c->codec_tag == AV_RL32("DIVX")
2606 || c->codec_tag == AV_RL32("XVID")*/
2607 || c->codec_tag == AV_RL32("mp4v")
2608 || c->codec_id == AV_CODEC_ID_MPEG2VIDEO
2609 || c->codec_id == AV_CODEC_ID_H264
2615 #if FF_API_FORMAT_PARAMETERS
2616 int av_find_stream_info(AVFormatContext *ic)
2618 return avformat_find_stream_info(ic, NULL);
2622 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2624 int i, count, ret, j;
2627 AVPacket pkt1, *pkt;
2628 int64_t old_offset = avio_tell(ic->pb);
2629 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2630 int flush_codecs = ic->probesize > 0;
2633 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2635 for(i=0;i<ic->nb_streams;i++) {
2636 const AVCodec *codec;
2637 AVDictionary *thread_opt = NULL;
2638 st = ic->streams[i];
2640 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2641 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2642 /* if(!st->time_base.num)
2644 if(!st->codec->time_base.num)
2645 st->codec->time_base= st->time_base;
2647 //only for the split stuff
2648 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2649 st->parser = av_parser_init(st->codec->codec_id);
2651 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2652 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2653 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2654 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2656 } else if (st->need_parsing) {
2657 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2658 "%s, packets or times may be invalid.\n",
2659 avcodec_get_name(st->codec->codec_id));
2662 codec = st->codec->codec ? st->codec->codec :
2663 avcodec_find_decoder(st->codec->codec_id);
2665 /* force thread count to 1 since the h264 decoder will not extract SPS
2666 * and PPS to extradata during multi-threaded decoding */
2667 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2669 /* Ensure that subtitle_header is properly set. */
2670 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2671 && codec && !st->codec->codec)
2672 avcodec_open2(st->codec, codec, options ? &options[i]
2675 //try to just open decoders, in case this is enough to get parameters
2676 if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
2677 if (codec && !st->codec->codec)
2678 avcodec_open2(st->codec, codec, options ? &options[i]
2682 av_dict_free(&thread_opt);
2685 for (i=0; i<ic->nb_streams; i++) {
2686 #if FF_API_R_FRAME_RATE
2687 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2689 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2690 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2696 if (ff_check_interrupt(&ic->interrupt_callback)){
2698 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2702 /* check if one codec still needs to be handled */
2703 for(i=0;i<ic->nb_streams;i++) {
2704 int fps_analyze_framecount = 20;
2706 st = ic->streams[i];
2707 if (!has_codec_parameters(st, NULL))
2709 /* if the timebase is coarse (like the usual millisecond precision
2710 of mkv), we need to analyze more frames to reliably arrive at
2712 if (av_q2d(st->time_base) > 0.0005)
2713 fps_analyze_framecount *= 2;
2714 if (ic->fps_probe_size >= 0)
2715 fps_analyze_framecount = ic->fps_probe_size;
2716 if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2717 fps_analyze_framecount = 0;
2718 /* variable fps and no guess at the real fps */
2719 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2720 && st->info->duration_count < fps_analyze_framecount
2721 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2723 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2725 if (st->first_dts == AV_NOPTS_VALUE &&
2726 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2727 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2730 if (i == ic->nb_streams) {
2731 /* NOTE: if the format has no header, then we need to read
2732 some packets to get most of the streams, so we cannot
2734 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2735 /* if we found the info for all the codecs, we can stop */
2737 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2742 /* we did not get all the codec info, but we read too much data */
2743 if (read_size >= ic->probesize) {
2745 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit of %d bytes reached\n", ic->probesize);
2746 for (i = 0; i < ic->nb_streams; i++)
2747 if (!ic->streams[i]->r_frame_rate.num &&
2748 ic->streams[i]->info->duration_count <= 1)
2749 av_log(ic, AV_LOG_WARNING,
2750 "Stream #%d: not enough frames to estimate rate; "
2751 "consider increasing probesize\n", i);
2755 /* NOTE: a new stream can be added there if no header in file
2756 (AVFMTCTX_NOHEADER) */
2757 ret = read_frame_internal(ic, &pkt1);
2758 if (ret == AVERROR(EAGAIN))
2766 if (ic->flags & AVFMT_FLAG_NOBUFFER) {
2769 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2770 &ic->packet_buffer_end);
2771 if ((ret = av_dup_packet(pkt)) < 0)
2772 goto find_stream_info_err;
2775 read_size += pkt->size;
2777 st = ic->streams[pkt->stream_index];
2778 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2779 /* check for non-increasing dts */
2780 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2781 st->info->fps_last_dts >= pkt->dts) {
2782 av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: "
2783 "packet %d with DTS %"PRId64", packet %d with DTS "
2784 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2785 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2786 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2788 /* check for a discontinuity in dts - if the difference in dts
2789 * is more than 1000 times the average packet duration in the sequence,
2790 * we treat it as a discontinuity */
2791 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2792 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2793 (pkt->dts - st->info->fps_last_dts) / 1000 >
2794 (st->info->fps_last_dts - st->info->fps_first_dts) / (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2795 av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: "
2796 "packet %d with DTS %"PRId64", packet %d with DTS "
2797 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2798 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2799 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2802 /* update stored dts values */
2803 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2804 st->info->fps_first_dts = pkt->dts;
2805 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2807 st->info->fps_last_dts = pkt->dts;
2808 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2810 if (st->codec_info_nb_frames>1) {
2812 if (st->time_base.den > 0)
2813 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2814 if (st->avg_frame_rate.num > 0)
2815 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
2817 if (t >= ic->max_analyze_duration) {
2818 av_log(ic, AV_LOG_WARNING, "max_analyze_duration %d reached at %"PRId64" microseconds\n", ic->max_analyze_duration, t);
2821 if (pkt->duration) {
2822 st->info->codec_info_duration += pkt->duration;
2823 st->info->codec_info_duration_fields += st->parser && st->codec->ticks_per_frame==2 ? st->parser->repeat_pict + 1 : 2;
2826 #if FF_API_R_FRAME_RATE
2828 int64_t last = st->info->last_dts;
2830 if( pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last
2831 && pkt->dts - (uint64_t)last < INT64_MAX){
2832 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2833 int64_t duration= pkt->dts - last;
2835 if (!st->info->duration_error)
2836 st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
2838 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2839 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2840 for (i=0; i<MAX_STD_TIMEBASES; i++) {
2841 int framerate= get_std_framerate(i);
2842 double sdts= dts*framerate/(1001*12);
2844 int64_t ticks= llrint(sdts+j*0.5);
2845 double error= sdts - ticks + j*0.5;
2846 st->info->duration_error[j][0][i] += error;
2847 st->info->duration_error[j][1][i] += error*error;
2850 st->info->duration_count++;
2851 // ignore the first 4 values, they might have some random jitter
2852 if (st->info->duration_count > 3 && is_relative(pkt->dts) == is_relative(last))
2853 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2855 if (pkt->dts != AV_NOPTS_VALUE)
2856 st->info->last_dts = pkt->dts;
2859 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2860 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2861 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2862 st->codec->extradata_size= i;
2863 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2864 if (!st->codec->extradata)
2865 return AVERROR(ENOMEM);
2866 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2867 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2871 /* if still no information, we try to open the codec and to
2872 decompress the frame. We try to avoid that in most cases as
2873 it takes longer and uses more memory. For MPEG-4, we need to
2874 decompress for QuickTime.
2876 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2877 least one frame of codec data, this makes sure the codec initializes
2878 the channel configuration and does not only trust the values from the container.
2880 try_decode_frame(st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2882 st->codec_info_nb_frames++;
2887 AVPacket empty_pkt = { 0 };
2889 av_init_packet(&empty_pkt);
2891 ret = -1; /* we could not have all the codec parameters before EOF */
2892 for(i=0;i<ic->nb_streams;i++) {
2895 st = ic->streams[i];
2897 /* flush the decoders */
2898 if (st->info->found_decoder == 1) {
2900 err = try_decode_frame(st, &empty_pkt,
2901 (options && i < orig_nb_streams) ?
2902 &options[i] : NULL);
2903 } while (err > 0 && !has_codec_parameters(st, NULL));
2906 av_log(ic, AV_LOG_INFO,
2907 "decoding for stream %d failed\n", st->index);
2911 if (!has_codec_parameters(st, &errmsg)) {
2913 avcodec_string(buf, sizeof(buf), st->codec, 0);
2914 av_log(ic, AV_LOG_WARNING,
2915 "Could not find codec parameters for stream %d (%s): %s\n"
2916 "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
2924 // close codecs which were opened in try_decode_frame()
2925 for(i=0;i<ic->nb_streams;i++) {
2926 st = ic->streams[i];
2927 avcodec_close(st->codec);
2929 for(i=0;i<ic->nb_streams;i++) {
2930 st = ic->streams[i];
2931 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2932 if(st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2933 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2934 if (avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2935 st->codec->codec_tag= tag;
2938 /* estimate average framerate if not set by demuxer */
2939 if (st->info->codec_info_duration_fields && !st->avg_frame_rate.num && st->info->codec_info_duration) {
2941 double best_error = 0.01;
2943 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2944 st->info->codec_info_duration_fields*(int64_t)st->time_base.den,
2945 st->info->codec_info_duration*2*(int64_t)st->time_base.num, 60000);
2947 /* round guessed framerate to a "standard" framerate if it's
2948 * within 1% of the original estimate*/
2949 for (j = 1; j < MAX_STD_TIMEBASES; j++) {
2950 AVRational std_fps = { get_std_framerate(j), 12*1001 };
2951 double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1);
2953 if (error < best_error) {
2955 best_fps = std_fps.num;
2959 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2960 best_fps, 12*1001, INT_MAX);
2963 // the check for tb_unreliable() is not completely correct, since this is not about handling
2964 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2965 // ipmovie.c produces.
2966 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
2967 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
2968 if (st->info->duration_count>1 && !st->r_frame_rate.num
2969 && tb_unreliable(st->codec)) {
2971 double best_error= 0.01;
2973 for (j=0; j<MAX_STD_TIMEBASES; j++) {
2976 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
2978 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
2981 int n= st->info->duration_count;
2982 double a= st->info->duration_error[k][0][j] / n;
2983 double error= st->info->duration_error[k][1][j]/n - a*a;
2985 if(error < best_error && best_error> 0.000000001){
2987 num = get_std_framerate(j);
2990 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
2993 // do not increase frame rate by more than 1 % in order to match a standard rate.
2994 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2995 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2998 if (!st->r_frame_rate.num){
2999 if( st->codec->time_base.den * (int64_t)st->time_base.num
3000 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
3001 st->r_frame_rate.num = st->codec->time_base.den;
3002 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
3004 st->r_frame_rate.num = st->time_base.den;
3005 st->r_frame_rate.den = st->time_base.num;
3008 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
3009 if(!st->codec->bits_per_coded_sample)
3010 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
3011 // set stream disposition based on audio service type
3012 switch (st->codec->audio_service_type) {
3013 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
3014 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
3015 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
3016 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
3017 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
3018 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
3019 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
3020 st->disposition = AV_DISPOSITION_COMMENT; break;
3021 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
3022 st->disposition = AV_DISPOSITION_KARAOKE; break;
3028 estimate_timings(ic, old_offset);
3030 compute_chapters_end(ic);
3032 find_stream_info_err:
3033 for (i=0; i < ic->nb_streams; i++) {
3034 st = ic->streams[i];
3035 if (ic->streams[i]->codec && ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
3036 ic->streams[i]->codec->thread_count = 0;
3038 av_freep(&st->info->duration_error);
3039 av_freep(&ic->streams[i]->info);
3042 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
3046 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
3050 for (i = 0; i < ic->nb_programs; i++) {
3051 if (ic->programs[i] == last) {
3055 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3056 if (ic->programs[i]->stream_index[j] == s)
3057 return ic->programs[i];
3063 int av_find_best_stream(AVFormatContext *ic,
3064 enum AVMediaType type,
3065 int wanted_stream_nb,
3067 AVCodec **decoder_ret,
3070 int i, nb_streams = ic->nb_streams;
3071 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3072 unsigned *program = NULL;
3073 AVCodec *decoder = NULL, *best_decoder = NULL;
3075 if (related_stream >= 0 && wanted_stream_nb < 0) {
3076 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3078 program = p->stream_index;
3079 nb_streams = p->nb_stream_indexes;
3082 for (i = 0; i < nb_streams; i++) {
3083 int real_stream_index = program ? program[i] : i;
3084 AVStream *st = ic->streams[real_stream_index];
3085 AVCodecContext *avctx = st->codec;
3086 if (avctx->codec_type != type)
3088 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3090 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
3093 decoder = avcodec_find_decoder(st->codec->codec_id);
3096 ret = AVERROR_DECODER_NOT_FOUND;
3100 count = st->codec_info_nb_frames;
3101 bitrate = avctx->bit_rate;
3102 multiframe = FFMIN(5, count);
3103 if ((best_multiframe > multiframe) ||
3104 (best_multiframe == multiframe && best_bitrate > bitrate) ||
3105 (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3108 best_bitrate = bitrate;
3109 best_multiframe = multiframe;
3110 ret = real_stream_index;
3111 best_decoder = decoder;
3112 if (program && i == nb_streams - 1 && ret < 0) {
3114 nb_streams = ic->nb_streams;
3115 i = 0; /* no related stream found, try again with everything */
3119 *decoder_ret = best_decoder;
3123 /*******************************************************/
3125 int av_read_play(AVFormatContext *s)
3127 if (s->iformat->read_play)
3128 return s->iformat->read_play(s);
3130 return avio_pause(s->pb, 0);
3131 return AVERROR(ENOSYS);
3134 int av_read_pause(AVFormatContext *s)
3136 if (s->iformat->read_pause)
3137 return s->iformat->read_pause(s);
3139 return avio_pause(s->pb, 1);
3140 return AVERROR(ENOSYS);
3143 void ff_free_stream(AVFormatContext *s, AVStream *st){
3144 av_assert0(s->nb_streams>0);
3145 av_assert0(s->streams[ s->nb_streams-1 ] == st);
3148 av_parser_close(st->parser);
3150 if (st->attached_pic.data)
3151 av_free_packet(&st->attached_pic);
3152 av_dict_free(&st->metadata);
3153 av_freep(&st->probe_data.buf);
3154 av_freep(&st->index_entries);
3155 av_freep(&st->codec->extradata);
3156 av_freep(&st->codec->subtitle_header);
3157 av_freep(&st->codec);
3158 av_freep(&st->priv_data);
3160 av_freep(&st->info->duration_error);
3161 av_freep(&st->info);
3162 av_freep(&s->streams[ --s->nb_streams ]);
3165 void avformat_free_context(AVFormatContext *s)
3173 if (s->iformat && s->iformat->priv_class && s->priv_data)
3174 av_opt_free(s->priv_data);
3176 for(i=s->nb_streams-1; i>=0; i--) {
3177 ff_free_stream(s, s->streams[i]);
3179 for(i=s->nb_programs-1; i>=0; i--) {
3180 av_dict_free(&s->programs[i]->metadata);
3181 av_freep(&s->programs[i]->stream_index);
3182 av_freep(&s->programs[i]);
3184 av_freep(&s->programs);
3185 av_freep(&s->priv_data);
3186 while(s->nb_chapters--) {
3187 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
3188 av_freep(&s->chapters[s->nb_chapters]);
3190 av_freep(&s->chapters);
3191 av_dict_free(&s->metadata);
3192 av_freep(&s->streams);
3196 #if FF_API_CLOSE_INPUT_FILE
3197 void av_close_input_file(AVFormatContext *s)
3199 avformat_close_input(&s);
3203 void avformat_close_input(AVFormatContext **ps)
3205 AVFormatContext *s = *ps;
3206 AVIOContext *pb = s->pb;
3208 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
3209 (s->flags & AVFMT_FLAG_CUSTOM_IO))
3212 flush_packet_queue(s);
3215 if (s->iformat->read_close)
3216 s->iformat->read_close(s);
3219 avformat_free_context(s);
3226 #if FF_API_NEW_STREAM
3227 AVStream *av_new_stream(AVFormatContext *s, int id)
3229 AVStream *st = avformat_new_stream(s, NULL);
3236 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
3242 if (s->nb_streams >= INT_MAX/sizeof(*streams))
3244 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams));
3247 s->streams = streams;
3249 st = av_mallocz(sizeof(AVStream));
3252 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3256 st->info->last_dts = AV_NOPTS_VALUE;
3258 st->codec = avcodec_alloc_context3(c);
3260 /* no default bitrate if decoding */
3261 st->codec->bit_rate = 0;
3263 st->index = s->nb_streams;
3264 st->start_time = AV_NOPTS_VALUE;
3265 st->duration = AV_NOPTS_VALUE;
3266 /* we set the current DTS to 0 so that formats without any timestamps
3267 but durations get some timestamps, formats with some unknown
3268 timestamps have their first few packets buffered and the
3269 timestamps corrected before they are returned to the user */
3270 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3271 st->first_dts = AV_NOPTS_VALUE;
3272 st->probe_packets = MAX_PROBE_PACKETS;
3273 st->pts_wrap_reference = AV_NOPTS_VALUE;
3274 st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3276 /* default pts setting is MPEG-like */
3277 avpriv_set_pts_info(st, 33, 1, 90000);
3278 st->last_IP_pts = AV_NOPTS_VALUE;
3279 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3280 st->pts_buffer[i]= AV_NOPTS_VALUE;
3281 st->reference_dts = AV_NOPTS_VALUE;
3283 st->sample_aspect_ratio = (AVRational){0,1};
3285 #if FF_API_R_FRAME_RATE
3286 st->info->last_dts = AV_NOPTS_VALUE;
3288 st->info->fps_first_dts = AV_NOPTS_VALUE;
3289 st->info->fps_last_dts = AV_NOPTS_VALUE;
3291 s->streams[s->nb_streams++] = st;
3295 AVProgram *av_new_program(AVFormatContext *ac, int id)
3297 AVProgram *program=NULL;
3300 av_dlog(ac, "new_program: id=0x%04x\n", id);
3302 for(i=0; i<ac->nb_programs; i++)
3303 if(ac->programs[i]->id == id)
3304 program = ac->programs[i];
3307 program = av_mallocz(sizeof(AVProgram));
3310 dynarray_add(&ac->programs, &ac->nb_programs, program);
3311 program->discard = AVDISCARD_NONE;
3314 program->pts_wrap_reference = AV_NOPTS_VALUE;
3315 program->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3317 program->start_time =
3318 program->end_time = AV_NOPTS_VALUE;
3323 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3325 AVChapter *chapter = NULL;
3328 for(i=0; i<s->nb_chapters; i++)
3329 if(s->chapters[i]->id == id)
3330 chapter = s->chapters[i];
3333 chapter= av_mallocz(sizeof(AVChapter));
3336 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3338 av_dict_set(&chapter->metadata, "title", title, 0);
3340 chapter->time_base= time_base;
3341 chapter->start = start;
3347 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3350 AVProgram *program=NULL;
3353 if (idx >= ac->nb_streams) {
3354 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3358 for(i=0; i<ac->nb_programs; i++){
3359 if(ac->programs[i]->id != progid)
3361 program = ac->programs[i];
3362 for(j=0; j<program->nb_stream_indexes; j++)
3363 if(program->stream_index[j] == idx)
3366 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3369 program->stream_index = tmp;
3370 program->stream_index[program->nb_stream_indexes++] = idx;
3375 static void print_fps(double d, const char *postfix){
3376 uint64_t v= lrintf(d*100);
3377 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3378 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3379 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3382 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3384 if(m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))){
3385 AVDictionaryEntry *tag=NULL;
3387 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3388 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3389 if(strcmp("language", tag->key)){
3390 const char *p = tag->value;
3391 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3394 size_t len = strcspn(p, "\x8\xa\xb\xc\xd");
3395 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3396 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3398 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3399 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3402 av_log(ctx, AV_LOG_INFO, "\n");
3408 /* "user interface" functions */
3409 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3412 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3413 AVStream *st = ic->streams[i];
3414 int g = av_gcd(st->time_base.num, st->time_base.den);
3415 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3416 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3417 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3418 /* the pid is an important information, so we display it */
3419 /* XXX: add a generic system */
3420 if (flags & AVFMT_SHOW_IDS)
3421 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3423 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3424 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3425 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3426 if (st->sample_aspect_ratio.num && // default
3427 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3428 AVRational display_aspect_ratio;
3429 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3430 st->codec->width*st->sample_aspect_ratio.num,
3431 st->codec->height*st->sample_aspect_ratio.den,
3433 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3434 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3435 display_aspect_ratio.num, display_aspect_ratio.den);
3437 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3438 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3439 print_fps(av_q2d(st->avg_frame_rate), "fps");
3440 #if FF_API_R_FRAME_RATE
3441 if(st->r_frame_rate.den && st->r_frame_rate.num)
3442 print_fps(av_q2d(st->r_frame_rate), "tbr");
3444 if(st->time_base.den && st->time_base.num)
3445 print_fps(1/av_q2d(st->time_base), "tbn");
3446 if(st->codec->time_base.den && st->codec->time_base.num)
3447 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3449 if (st->disposition & AV_DISPOSITION_DEFAULT)
3450 av_log(NULL, AV_LOG_INFO, " (default)");
3451 if (st->disposition & AV_DISPOSITION_DUB)
3452 av_log(NULL, AV_LOG_INFO, " (dub)");
3453 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3454 av_log(NULL, AV_LOG_INFO, " (original)");
3455 if (st->disposition & AV_DISPOSITION_COMMENT)
3456 av_log(NULL, AV_LOG_INFO, " (comment)");
3457 if (st->disposition & AV_DISPOSITION_LYRICS)
3458 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3459 if (st->disposition & AV_DISPOSITION_KARAOKE)
3460 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3461 if (st->disposition & AV_DISPOSITION_FORCED)
3462 av_log(NULL, AV_LOG_INFO, " (forced)");
3463 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3464 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3465 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3466 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3467 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3468 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3469 av_log(NULL, AV_LOG_INFO, "\n");
3470 dump_metadata(NULL, st->metadata, " ");
3473 void av_dump_format(AVFormatContext *ic,
3479 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3480 if (ic->nb_streams && !printed)
3483 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3484 is_output ? "Output" : "Input",
3486 is_output ? ic->oformat->name : ic->iformat->name,
3487 is_output ? "to" : "from", url);
3488 dump_metadata(NULL, ic->metadata, " ");
3490 av_log(NULL, AV_LOG_INFO, " Duration: ");
3491 if (ic->duration != AV_NOPTS_VALUE) {
3492 int hours, mins, secs, us;
3493 int64_t duration = ic->duration + 5000;
3494 secs = duration / AV_TIME_BASE;
3495 us = duration % AV_TIME_BASE;
3500 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3501 (100 * us) / AV_TIME_BASE);
3503 av_log(NULL, AV_LOG_INFO, "N/A");
3505 if (ic->start_time != AV_NOPTS_VALUE) {
3507 av_log(NULL, AV_LOG_INFO, ", start: ");
3508 secs = ic->start_time / AV_TIME_BASE;
3509 us = abs(ic->start_time % AV_TIME_BASE);
3510 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3511 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3513 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3515 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3517 av_log(NULL, AV_LOG_INFO, "N/A");
3519 av_log(NULL, AV_LOG_INFO, "\n");
3521 for (i = 0; i < ic->nb_chapters; i++) {
3522 AVChapter *ch = ic->chapters[i];
3523 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3524 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3525 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3527 dump_metadata(NULL, ch->metadata, " ");
3529 if(ic->nb_programs) {
3530 int j, k, total = 0;
3531 for(j=0; j<ic->nb_programs; j++) {
3532 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3534 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3535 name ? name->value : "");
3536 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3537 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3538 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3539 printed[ic->programs[j]->stream_index[k]] = 1;
3541 total += ic->programs[j]->nb_stream_indexes;
3543 if (total < ic->nb_streams)
3544 av_log(NULL, AV_LOG_INFO, " No Program\n");
3546 for(i=0;i<ic->nb_streams;i++)
3548 dump_stream_format(ic, i, index, is_output);
3553 uint64_t ff_ntp_time(void)
3555 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3558 int av_get_frame_filename(char *buf, int buf_size,
3559 const char *path, int number)
3562 char *q, buf1[20], c;
3563 int nd, len, percentd_found;
3575 while (av_isdigit(*p)) {
3576 nd = nd * 10 + *p++ - '0';
3579 } while (av_isdigit(c));
3588 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3590 if ((q - buf + len) > buf_size - 1)
3592 memcpy(q, buf1, len);
3600 if ((q - buf) < buf_size - 1)
3604 if (!percentd_found)
3613 static void hex_dump_internal(void *avcl, FILE *f, int level,
3614 const uint8_t *buf, int size)
3617 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3619 for(i=0;i<size;i+=16) {
3626 PRINT(" %02x", buf[i+j]);
3631 for(j=0;j<len;j++) {
3633 if (c < ' ' || c > '~')
3642 void av_hex_dump(FILE *f, const uint8_t *buf, int size)
3644 hex_dump_internal(NULL, f, 0, buf, size);
3647 void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
3649 hex_dump_internal(avcl, NULL, level, buf, size);
3652 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3654 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3655 PRINT("stream #%d:\n", pkt->stream_index);
3656 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3657 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3658 /* DTS is _always_ valid after av_read_frame() */
3660 if (pkt->dts == AV_NOPTS_VALUE)
3663 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3664 /* PTS may not be known if B-frames are present. */
3666 if (pkt->pts == AV_NOPTS_VALUE)
3669 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3671 PRINT(" size=%d\n", pkt->size);
3674 av_hex_dump(f, pkt->data, pkt->size);
3678 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3680 AVRational tb = { 1, AV_TIME_BASE };
3681 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb);
3685 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3687 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3691 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3693 AVRational tb = { 1, AV_TIME_BASE };
3694 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb);
3698 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3701 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3704 void av_url_split(char *proto, int proto_size,
3705 char *authorization, int authorization_size,
3706 char *hostname, int hostname_size,
3708 char *path, int path_size,
3711 const char *p, *ls, *ls2, *at, *at2, *col, *brk;
3713 if (port_ptr) *port_ptr = -1;
3714 if (proto_size > 0) proto[0] = 0;
3715 if (authorization_size > 0) authorization[0] = 0;
3716 if (hostname_size > 0) hostname[0] = 0;
3717 if (path_size > 0) path[0] = 0;
3719 /* parse protocol */
3720 if ((p = strchr(url, ':'))) {
3721 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3726 /* no protocol means plain filename */
3727 av_strlcpy(path, url, path_size);
3731 /* separate path from hostname */
3732 ls = strchr(p, '/');
3733 ls2 = strchr(p, '?');
3737 ls = FFMIN(ls, ls2);
3739 av_strlcpy(path, ls, path_size);
3741 ls = &p[strlen(p)]; // XXX
3743 /* the rest is hostname, use that to parse auth/port */
3745 /* authorization (user[:pass]@hostname) */
3747 while ((at = strchr(p, '@')) && at < ls) {
3748 av_strlcpy(authorization, at2,
3749 FFMIN(authorization_size, at + 1 - at2));
3750 p = at + 1; /* skip '@' */
3753 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3755 av_strlcpy(hostname, p + 1,
3756 FFMIN(hostname_size, brk - p));
3757 if (brk[1] == ':' && port_ptr)
3758 *port_ptr = atoi(brk + 2);
3759 } else if ((col = strchr(p, ':')) && col < ls) {
3760 av_strlcpy(hostname, p,
3761 FFMIN(col + 1 - p, hostname_size));
3762 if (port_ptr) *port_ptr = atoi(col + 1);
3764 av_strlcpy(hostname, p,
3765 FFMIN(ls + 1 - p, hostname_size));
3769 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3772 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3775 'C', 'D', 'E', 'F' };
3776 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3779 'c', 'd', 'e', 'f' };
3780 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3782 for(i = 0; i < s; i++) {
3783 buff[i * 2] = hex_table[src[i] >> 4];
3784 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3790 int ff_hex_to_data(uint8_t *data, const char *p)
3797 p += strspn(p, SPACE_CHARS);
3800 c = av_toupper((unsigned char) *p++);
3801 if (c >= '0' && c <= '9')
3803 else if (c >= 'A' && c <= 'F')
3818 #if FF_API_SET_PTS_INFO
3819 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3820 unsigned int pts_num, unsigned int pts_den)
3822 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
3826 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3827 unsigned int pts_num, unsigned int pts_den)
3830 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
3831 if(new_tb.num != pts_num)
3832 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
3834 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3836 if(new_tb.num <= 0 || new_tb.den <= 0) {
3837 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
3840 s->time_base = new_tb;
3841 av_codec_set_pkt_timebase(s->codec, new_tb);
3842 s->pts_wrap_bits = pts_wrap_bits;
3845 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3848 const char *ptr = str;
3850 /* Parse key=value pairs. */
3853 char *dest = NULL, *dest_end;
3854 int key_len, dest_len = 0;
3856 /* Skip whitespace and potential commas. */
3857 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3864 if (!(ptr = strchr(key, '=')))
3867 key_len = ptr - key;
3869 callback_get_buf(context, key, key_len, &dest, &dest_len);
3870 dest_end = dest + dest_len - 1;
3874 while (*ptr && *ptr != '\"') {
3878 if (dest && dest < dest_end)
3882 if (dest && dest < dest_end)
3890 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3891 if (dest && dest < dest_end)
3899 int ff_find_stream_index(AVFormatContext *s, int id)
3902 for (i = 0; i < s->nb_streams; i++) {
3903 if (s->streams[i]->id == id)
3909 int64_t ff_iso8601_to_unix_time(const char *datestr)
3911 struct tm time1 = {0}, time2 = {0};
3913 ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1);
3914 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2);
3916 return av_timegm(&time2);
3918 return av_timegm(&time1);
3921 int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance)
3924 if (ofmt->query_codec)
3925 return ofmt->query_codec(codec_id, std_compliance);
3926 else if (ofmt->codec_tag)
3927 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
3928 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
3929 codec_id == ofmt->subtitle_codec)
3932 return AVERROR_PATCHWELCOME;
3935 int avformat_network_init(void)
3939 ff_network_inited_globally = 1;
3940 if ((ret = ff_network_init()) < 0)
3947 int avformat_network_deinit(void)
3956 int ff_add_param_change(AVPacket *pkt, int32_t channels,
3957 uint64_t channel_layout, int32_t sample_rate,
3958 int32_t width, int32_t height)
3964 return AVERROR(EINVAL);
3967 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
3969 if (channel_layout) {
3971 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
3975 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
3977 if (width || height) {
3979 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
3981 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
3983 return AVERROR(ENOMEM);
3984 bytestream_put_le32(&data, flags);
3986 bytestream_put_le32(&data, channels);
3988 bytestream_put_le64(&data, channel_layout);
3990 bytestream_put_le32(&data, sample_rate);
3991 if (width || height) {
3992 bytestream_put_le32(&data, width);
3993 bytestream_put_le32(&data, height);
3998 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4000 AVRational undef = {0, 1};
4001 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4002 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4003 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4005 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4006 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4007 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4008 stream_sample_aspect_ratio = undef;
4010 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4011 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4012 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4013 frame_sample_aspect_ratio = undef;
4015 if (stream_sample_aspect_ratio.num)
4016 return stream_sample_aspect_ratio;
4018 return frame_sample_aspect_ratio;
4021 AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
4023 AVRational fr = st->r_frame_rate;
4025 if (st->codec->ticks_per_frame > 1) {
4026 AVRational codec_fr = av_inv_q(st->codec->time_base);
4027 AVRational avg_fr = st->avg_frame_rate;
4028 codec_fr.den *= st->codec->ticks_per_frame;
4029 if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
4030 && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1)
4037 int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
4040 if (*spec <= '9' && *spec >= '0') /* opt:index */
4041 return strtol(spec, NULL, 0) == st->index;
4042 else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4043 *spec == 't') { /* opt:[vasdt] */
4044 enum AVMediaType type;
4047 case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4048 case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4049 case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4050 case 'd': type = AVMEDIA_TYPE_DATA; break;
4051 case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4052 default: av_assert0(0);
4054 if (type != st->codec->codec_type)
4056 if (*spec++ == ':') { /* possibly followed by :index */
4057 int i, index = strtol(spec, NULL, 0);
4058 for (i = 0; i < s->nb_streams; i++)
4059 if (s->streams[i]->codec->codec_type == type && index-- == 0)
4060 return i == st->index;
4064 } else if (*spec == 'p' && *(spec + 1) == ':') {
4068 prog_id = strtol(spec, &endptr, 0);
4069 for (i = 0; i < s->nb_programs; i++) {
4070 if (s->programs[i]->id != prog_id)
4073 if (*endptr++ == ':') {
4074 int stream_idx = strtol(endptr, NULL, 0);
4075 return stream_idx >= 0 &&
4076 stream_idx < s->programs[i]->nb_stream_indexes &&
4077 st->index == s->programs[i]->stream_index[stream_idx];
4080 for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
4081 if (st->index == s->programs[i]->stream_index[j])
4085 } else if (*spec == '#') {
4088 sid = strtol(spec + 1, &endptr, 0);
4090 return st->id == sid;
4091 } else if (!*spec) /* empty specifier, matches everything */
4094 av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
4095 return AVERROR(EINVAL);
4098 void ff_generate_avci_extradata(AVStream *st)
4100 static const uint8_t avci100_1080p_extradata[] = {
4102 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4103 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4104 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4105 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
4106 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
4107 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
4108 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
4109 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
4110 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4112 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4115 static const uint8_t avci100_1080i_extradata[] = {
4117 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4118 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4119 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4120 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
4121 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
4122 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
4123 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
4124 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
4125 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
4126 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
4127 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
4129 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4132 static const uint8_t avci50_1080i_extradata[] = {
4134 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4135 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4136 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4137 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
4138 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
4139 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
4140 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
4141 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
4142 0x81, 0x13, 0xf7, 0xff, 0x80, 0x02, 0x00, 0x01,
4143 0xf1, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
4144 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
4146 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4149 static const uint8_t avci100_720p_extradata[] = {
4151 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4152 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
4153 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
4154 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
4155 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
4156 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
4157 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
4158 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
4159 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
4160 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
4162 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
4166 const uint8_t *data = 0;
4167 if (st->codec->width == 1920) {
4168 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4169 data = avci100_1080p_extradata;
4170 size = sizeof(avci100_1080p_extradata);
4172 data = avci100_1080i_extradata;
4173 size = sizeof(avci100_1080i_extradata);
4175 } else if (st->codec->width == 1440) {
4176 data = avci50_1080i_extradata;
4177 size = sizeof(avci50_1080i_extradata);
4178 } else if (st->codec->width == 1280) {
4179 data = avci100_720p_extradata;
4180 size = sizeof(avci100_720p_extradata);
4184 av_freep(&st->codec->extradata);
4185 st->codec->extradata_size = 0;
4186 st->codec->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
4187 if (!st->codec->extradata)
4189 memcpy(st->codec->extradata, data, size);
4190 st->codec->extradata_size = size;