2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "avio_internal.h"
25 #include "libavcodec/internal.h"
26 #include "libavcodec/raw.h"
27 #include "libavcodec/bytestream.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/pixdesc.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/avstring.h"
36 #include "libavutil/mathematics.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/time.h"
39 #include "libavutil/timestamp.h"
41 #include "audiointerleave.h"
53 * various utility functions for use within FFmpeg
56 unsigned avformat_version(void)
58 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
59 return LIBAVFORMAT_VERSION_INT;
62 const char *avformat_configuration(void)
64 return FFMPEG_CONFIGURATION;
67 const char *avformat_license(void)
69 #define LICENSE_PREFIX "libavformat license: "
70 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
73 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
75 static int is_relative(int64_t ts) {
76 return ts > (RELATIVE_TS_BASE - (1LL<<48));
80 * Wrap a given time stamp, if there is an indication for an overflow
83 * @param timestamp the time stamp to wrap
84 * @return resulting time stamp
86 static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
88 if (st->pts_wrap_behavior != AV_PTS_WRAP_IGNORE &&
89 st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
90 if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET &&
91 timestamp < st->pts_wrap_reference)
92 return timestamp + (1ULL<<st->pts_wrap_bits);
93 else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
94 timestamp >= st->pts_wrap_reference)
95 return timestamp - (1ULL<<st->pts_wrap_bits);
100 MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
101 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, video_codec)
102 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
103 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
105 static AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID codec_id)
107 if (st->codec->codec)
108 return st->codec->codec;
110 switch(st->codec->codec_type){
111 case AVMEDIA_TYPE_VIDEO:
112 if(s->video_codec) return s->video_codec;
114 case AVMEDIA_TYPE_AUDIO:
115 if(s->audio_codec) return s->audio_codec;
117 case AVMEDIA_TYPE_SUBTITLE:
118 if(s->subtitle_codec) return s->subtitle_codec;
122 return avcodec_find_decoder(codec_id);
125 int av_format_get_probe_score(const AVFormatContext *s)
127 return s->probe_score;
130 /* an arbitrarily chosen "sane" max packet size -- 50M */
131 #define SANE_CHUNK_SIZE (50000000)
133 int ffio_limit(AVIOContext *s, int size)
136 int64_t remaining= s->maxsize - avio_tell(s);
137 if(remaining < size){
138 int64_t newsize= avio_size(s);
139 if(!s->maxsize || s->maxsize<newsize)
140 s->maxsize= newsize - !newsize;
141 remaining= s->maxsize - avio_tell(s);
142 remaining= FFMAX(remaining, 0);
145 if(s->maxsize>=0 && remaining+1 < size){
146 av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
154 * Read the data in sane-sized chunks and append to pkt.
155 * Return the number of bytes read or an error.
157 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
159 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
160 int orig_size = pkt->size;
164 int prev_size = pkt->size;
168 * When the caller requests a lot of data, limit it to the amount left
169 * in file or SANE_CHUNK_SIZE when it is not known
172 if (read_size > SANE_CHUNK_SIZE/10) {
173 read_size = ffio_limit(s, read_size);
174 // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
176 read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
179 ret = av_grow_packet(pkt, read_size);
183 ret = avio_read(s, pkt->data + prev_size, read_size);
184 if (ret != read_size) {
185 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
192 pkt->flags |= AV_PKT_FLAG_CORRUPT;
197 return pkt->size > orig_size ? pkt->size - orig_size : ret;
200 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
205 pkt->pos = avio_tell(s);
207 return append_packet_chunked(s, pkt, size);
210 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
213 return av_get_packet(s, pkt, size);
214 return append_packet_chunked(s, pkt, size);
218 int av_filename_number_test(const char *filename)
221 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
224 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
226 AVProbeData lpd = *pd;
227 AVInputFormat *fmt1 = NULL, *fmt;
228 int score, nodat = 0, score_max=0;
229 const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE];
232 lpd.buf = zerobuffer;
234 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
235 int id3len = ff_id3v2_tag_len(lpd.buf);
236 if (lpd.buf_size > id3len + 16) {
238 lpd.buf_size -= id3len;
244 while ((fmt1 = av_iformat_next(fmt1))) {
245 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
248 if (fmt1->read_probe) {
249 score = fmt1->read_probe(&lpd);
250 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
251 score = FFMAX(score, nodat ? AVPROBE_SCORE_EXTENSION / 2 - 1 : 1);
252 } else if (fmt1->extensions) {
253 if (av_match_ext(lpd.filename, fmt1->extensions)) {
254 score = AVPROBE_SCORE_EXTENSION;
257 if (score > score_max) {
260 }else if (score == score_max)
264 score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max);
265 *score_ret= score_max;
270 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
273 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
274 if(score_ret > *score_max){
275 *score_max= score_ret;
281 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
283 return av_probe_input_format2(pd, is_opened, &score);
286 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
288 static const struct {
289 const char *name; enum AVCodecID id; enum AVMediaType type;
291 { "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
292 { "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
293 { "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
294 { "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
295 { "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
296 { "loas" , AV_CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
297 { "m4v" , AV_CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
298 { "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
299 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
303 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
305 if (fmt && st->request_probe <= score) {
307 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
308 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
309 for (i = 0; fmt_id_type[i].name; i++) {
310 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
311 st->codec->codec_id = fmt_id_type[i].id;
312 st->codec->codec_type = fmt_id_type[i].type;
320 /************************************************************/
321 /* input media file */
323 int av_demuxer_open(AVFormatContext *ic){
326 if (ic->iformat->read_header) {
327 err = ic->iformat->read_header(ic);
332 if (ic->pb && !ic->data_offset)
333 ic->data_offset = avio_tell(ic->pb);
339 int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
340 const char *filename, void *logctx,
341 unsigned int offset, unsigned int max_probe_size)
343 AVProbeData pd = { filename ? filename : "", NULL, -offset };
344 unsigned char *buf = NULL;
346 int ret = 0, probe_size, buf_offset = 0;
349 if (!max_probe_size) {
350 max_probe_size = PROBE_BUF_MAX;
351 } else if (max_probe_size > PROBE_BUF_MAX) {
352 max_probe_size = PROBE_BUF_MAX;
353 } else if (max_probe_size < PROBE_BUF_MIN) {
354 av_log(logctx, AV_LOG_ERROR,
355 "Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
356 return AVERROR(EINVAL);
359 if (offset >= max_probe_size) {
360 return AVERROR(EINVAL);
363 if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) {
364 if (!av_strcasecmp(mime_type, "audio/aacp")) {
365 *fmt = av_find_input_format("aac");
367 av_freep(&mime_type);
370 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
371 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
373 if (probe_size < offset) {
376 score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;
378 /* read probe data */
379 if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
381 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
382 /* fail if error was not end of file, otherwise, lower score */
383 if (ret != AVERROR_EOF) {
388 ret = 0; /* error was end of file, nothing read */
390 pd.buf_size = buf_offset += ret;
391 pd.buf = &buf[offset];
393 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
395 /* guess file format */
396 *fmt = av_probe_input_format2(&pd, 1, &score);
398 if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration
399 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
401 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
403 FILE *f = fopen("probestat.tmp", "ab");
404 fprintf(f, "probe_size:%d format:%s score:%d filename:%s\n", probe_size, (*fmt)->name, score, filename);
412 return AVERROR_INVALIDDATA;
415 /* rewind. reuse probe buffer to avoid seeking */
416 ret = ffio_rewind_with_probe_data(pb, &buf, pd.buf_size);
418 return ret < 0 ? ret : score;
421 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
422 const char *filename, void *logctx,
423 unsigned int offset, unsigned int max_probe_size)
425 int ret = av_probe_input_buffer2(pb, fmt, filename, logctx, offset, max_probe_size);
426 return ret < 0 ? ret : 0;
430 /* open input file and probe the format if necessary */
431 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
434 AVProbeData pd = {filename, NULL, 0};
435 int score = AVPROBE_SCORE_RETRY;
438 s->flags |= AVFMT_FLAG_CUSTOM_IO;
440 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
441 else if (s->iformat->flags & AVFMT_NOFILE)
442 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
443 "will be ignored with AVFMT_NOFILE format.\n");
447 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
448 (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
451 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
452 &s->interrupt_callback, options)) < 0)
456 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
459 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
460 AVPacketList **plast_pktl){
461 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
466 (*plast_pktl)->next = pktl;
468 *packet_buffer = pktl;
470 /* add the packet in the buffered packet list */
476 int avformat_queue_attached_pictures(AVFormatContext *s)
479 for (i = 0; i < s->nb_streams; i++)
480 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
481 s->streams[i]->discard < AVDISCARD_ALL) {
482 AVPacket copy = s->streams[i]->attached_pic;
483 copy.buf = av_buffer_ref(copy.buf);
485 return AVERROR(ENOMEM);
487 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
492 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
494 AVFormatContext *s = *ps;
496 AVDictionary *tmp = NULL;
497 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
499 if (!s && !(s = avformat_alloc_context()))
500 return AVERROR(ENOMEM);
502 av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
503 return AVERROR(EINVAL);
509 av_dict_copy(&tmp, *options, 0);
511 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
514 if ((ret = init_input(s, filename, &tmp)) < 0)
516 s->probe_score = ret;
517 avio_skip(s->pb, s->skip_initial_bytes);
519 /* check filename in case an image number is expected */
520 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
521 if (!av_filename_number_test(filename)) {
522 ret = AVERROR(EINVAL);
527 s->duration = s->start_time = AV_NOPTS_VALUE;
528 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
530 /* allocate private data */
531 if (s->iformat->priv_data_size > 0) {
532 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
533 ret = AVERROR(ENOMEM);
536 if (s->iformat->priv_class) {
537 *(const AVClass**)s->priv_data = s->iformat->priv_class;
538 av_opt_set_defaults(s->priv_data);
539 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
544 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
546 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
548 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
549 if ((ret = s->iformat->read_header(s)) < 0)
552 if (id3v2_extra_meta) {
553 if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
554 !strcmp(s->iformat->name, "tta")) {
555 if((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
558 av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
560 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
562 if ((ret = avformat_queue_attached_pictures(s)) < 0)
565 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
566 s->data_offset = avio_tell(s->pb);
568 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
571 av_dict_free(options);
578 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
580 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
582 avformat_free_context(s);
587 /*******************************************************/
589 static void force_codec_ids(AVFormatContext *s, AVStream *st)
591 switch(st->codec->codec_type){
592 case AVMEDIA_TYPE_VIDEO:
593 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
595 case AVMEDIA_TYPE_AUDIO:
596 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
598 case AVMEDIA_TYPE_SUBTITLE:
599 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
604 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
606 if(st->request_probe>0){
607 AVProbeData *pd = &st->probe_data;
609 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
613 uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
615 av_log(s, AV_LOG_WARNING,
616 "Failed to reallocate probe buffer for stream %d\n",
621 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
622 pd->buf_size += pkt->size;
623 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
626 st->probe_packets = 0;
628 av_log(s, AV_LOG_WARNING, "nothing to probe for stream %d\n",
633 end= s->raw_packet_buffer_remaining_size <= 0
634 || st->probe_packets<=0;
636 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
637 int score= set_codec_from_probe_data(s, st, pd);
638 if( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_RETRY)
642 st->request_probe= -1;
643 if(st->codec->codec_id != AV_CODEC_ID_NONE){
644 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
646 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
648 force_codec_ids(s, st);
654 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
660 AVPacketList *pktl = s->raw_packet_buffer;
664 st = s->streams[pkt->stream_index];
665 if (s->raw_packet_buffer_remaining_size <= 0) {
666 if ((err = probe_codec(s, st, NULL)) < 0)
669 if(st->request_probe <= 0){
670 s->raw_packet_buffer = pktl->next;
671 s->raw_packet_buffer_remaining_size += pkt->size;
680 ret= s->iformat->read_packet(s, pkt);
682 if (!pktl || ret == AVERROR(EAGAIN))
684 for (i = 0; i < s->nb_streams; i++) {
686 if (st->probe_packets) {
687 if ((err = probe_codec(s, st, NULL)) < 0)
690 av_assert0(st->request_probe <= 0);
695 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
696 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
697 av_log(s, AV_LOG_WARNING,
698 "Dropped corrupted packet (stream = %d)\n",
704 if(pkt->stream_index >= (unsigned)s->nb_streams){
705 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
709 st= s->streams[pkt->stream_index];
710 pkt->dts = wrap_timestamp(st, pkt->dts);
711 pkt->pts = wrap_timestamp(st, pkt->pts);
713 force_codec_ids(s, st);
715 /* TODO: audio: time filter; video: frame reordering (pts != dts) */
716 if (s->use_wallclock_as_timestamps)
717 pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
719 if(!pktl && st->request_probe <= 0)
722 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
723 s->raw_packet_buffer_remaining_size -= pkt->size;
725 if ((err = probe_codec(s, st, pkt)) < 0)
730 #if FF_API_READ_PACKET
731 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
733 return ff_read_packet(s, pkt);
738 /**********************************************************/
740 static int determinable_frame_size(AVCodecContext *avctx)
742 if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
743 avctx->codec_id == AV_CODEC_ID_MP1 ||
744 avctx->codec_id == AV_CODEC_ID_MP2 ||
745 avctx->codec_id == AV_CODEC_ID_MP3/* ||
746 avctx->codec_id == AV_CODEC_ID_CELT*/)
752 * Get the number of samples of an audio frame. Return -1 on error.
754 int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
758 /* give frame_size priority if demuxing */
759 if (!mux && enc->frame_size > 1)
760 return enc->frame_size;
762 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
765 /* Fall back on using frame_size if muxing. */
766 if (enc->frame_size > 1)
767 return enc->frame_size;
769 //For WMA we currently have no other means to calculate duration thus we
770 //do it here by assuming CBR, which is true for all known cases.
771 if(!mux && enc->bit_rate>0 && size>0 && enc->sample_rate>0 && enc->block_align>1) {
772 if (enc->codec_id == AV_CODEC_ID_WMAV1 || enc->codec_id == AV_CODEC_ID_WMAV2)
773 return ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
781 * Return the frame duration in seconds. Return 0 if not available.
783 void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
784 AVCodecParserContext *pc, AVPacket *pkt)
790 switch(st->codec->codec_type) {
791 case AVMEDIA_TYPE_VIDEO:
792 if (st->r_frame_rate.num && !pc) {
793 *pnum = st->r_frame_rate.den;
794 *pden = st->r_frame_rate.num;
795 } else if(st->time_base.num*1000LL > st->time_base.den) {
796 *pnum = st->time_base.num;
797 *pden = st->time_base.den;
798 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
799 *pnum = st->codec->time_base.num;
800 *pden = st->codec->time_base.den;
801 if (pc && pc->repeat_pict) {
802 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
803 *pden /= 1 + pc->repeat_pict;
805 *pnum *= 1 + pc->repeat_pict;
807 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
808 //Thus if we have no parser in such case leave duration undefined.
809 if(st->codec->ticks_per_frame>1 && !pc){
814 case AVMEDIA_TYPE_AUDIO:
815 frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
816 if (frame_size <= 0 || st->codec->sample_rate <= 0)
819 *pden = st->codec->sample_rate;
826 static int is_intra_only(AVCodecContext *enc){
827 const AVCodecDescriptor *desc;
829 if(enc->codec_type != AVMEDIA_TYPE_VIDEO)
832 desc = av_codec_get_codec_descriptor(enc);
834 desc = avcodec_descriptor_get(enc->codec_id);
835 av_codec_set_codec_descriptor(enc, desc);
838 return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
842 static int has_decode_delay_been_guessed(AVStream *st)
844 if(st->codec->codec_id != AV_CODEC_ID_H264) return 1;
845 if(!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
847 #if CONFIG_H264_DECODER
848 if(st->codec->has_b_frames &&
849 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
852 if(st->codec->has_b_frames<3)
853 return st->nb_decoded_frames >= 7;
854 else if(st->codec->has_b_frames<4)
855 return st->nb_decoded_frames >= 18;
857 return st->nb_decoded_frames >= 20;
860 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
864 if (pktl == s->parse_queue_end)
865 return s->packet_buffer;
869 static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index)
871 if (s->correct_ts_overflow && st->pts_wrap_bits < 63 &&
872 st->pts_wrap_reference == AV_NOPTS_VALUE && st->first_dts != AV_NOPTS_VALUE) {
875 // reference time stamp should be 60 s before first time stamp
876 int64_t pts_wrap_reference = st->first_dts - av_rescale(60, st->time_base.den, st->time_base.num);
877 // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
878 int pts_wrap_behavior = (st->first_dts < (1LL<<st->pts_wrap_bits) - (1LL<<st->pts_wrap_bits-3)) ||
879 (st->first_dts < (1LL<<st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
880 AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET;
882 AVProgram *first_program = av_find_program_from_stream(s, NULL, stream_index);
884 if (!first_program) {
885 int default_stream_index = av_find_default_stream_index(s);
886 if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
887 for (i=0; i<s->nb_streams; i++) {
888 s->streams[i]->pts_wrap_reference = pts_wrap_reference;
889 s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
893 st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
894 st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
898 AVProgram *program = first_program;
900 if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
901 pts_wrap_reference = program->pts_wrap_reference;
902 pts_wrap_behavior = program->pts_wrap_behavior;
905 program = av_find_program_from_stream(s, program, stream_index);
908 // update every program with differing pts_wrap_reference
909 program = first_program;
911 if (program->pts_wrap_reference != pts_wrap_reference) {
912 for (i=0; i<program->nb_stream_indexes; i++) {
913 s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
914 s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
917 program->pts_wrap_reference = pts_wrap_reference;
918 program->pts_wrap_behavior = pts_wrap_behavior;
920 program = av_find_program_from_stream(s, program, stream_index);
928 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
929 int64_t dts, int64_t pts, AVPacket *pkt)
931 AVStream *st= s->streams[stream_index];
932 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
933 int64_t pts_buffer[MAX_REORDER_DELAY+1];
937 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
940 delay = st->codec->has_b_frames;
941 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
943 shift = st->first_dts - RELATIVE_TS_BASE;
945 for (i=0; i<MAX_REORDER_DELAY+1; i++)
946 pts_buffer[i] = AV_NOPTS_VALUE;
948 if (is_relative(pts))
951 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
952 if(pktl->pkt.stream_index != stream_index)
954 if(is_relative(pktl->pkt.pts))
955 pktl->pkt.pts += shift;
957 if(is_relative(pktl->pkt.dts))
958 pktl->pkt.dts += shift;
960 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
961 st->start_time= pktl->pkt.pts;
963 if(pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
964 pts_buffer[0]= pktl->pkt.pts;
965 for(i=0; i<delay && pts_buffer[i] > pts_buffer[i+1]; i++)
966 FFSWAP(int64_t, pts_buffer[i], pts_buffer[i+1]);
967 if(pktl->pkt.dts == AV_NOPTS_VALUE)
968 pktl->pkt.dts= pts_buffer[0];
972 if (update_wrap_reference(s, st, stream_index) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
973 // correct first time stamps to negative values
974 st->first_dts = wrap_timestamp(st, st->first_dts);
975 st->cur_dts = wrap_timestamp(st, st->cur_dts);
976 pkt->dts = wrap_timestamp(st, pkt->dts);
977 pkt->pts = wrap_timestamp(st, pkt->pts);
978 pts = wrap_timestamp(st, pts);
981 if (st->start_time == AV_NOPTS_VALUE)
982 st->start_time = pts;
985 static void update_initial_durations(AVFormatContext *s, AVStream *st,
986 int stream_index, int duration)
988 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
989 int64_t cur_dts= RELATIVE_TS_BASE;
991 if(st->first_dts != AV_NOPTS_VALUE){
992 cur_dts= st->first_dts;
993 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
994 if(pktl->pkt.stream_index == stream_index){
995 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
1000 if(pktl && pktl->pkt.dts != st->first_dts) {
1001 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %d) in the queue\n",
1002 av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
1006 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
1009 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
1010 st->first_dts = cur_dts;
1011 }else if(st->cur_dts != RELATIVE_TS_BASE)
1014 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
1015 if(pktl->pkt.stream_index != stream_index)
1017 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
1018 && !pktl->pkt.duration){
1019 pktl->pkt.dts= cur_dts;
1020 if(!st->codec->has_b_frames)
1021 pktl->pkt.pts= cur_dts;
1022 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
1023 pktl->pkt.duration = duration;
1026 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
1029 st->cur_dts= cur_dts;
1032 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
1033 AVCodecParserContext *pc, AVPacket *pkt)
1035 int num, den, presentation_delayed, delay, i;
1038 if (s->flags & AVFMT_FLAG_NOFILLIN)
1041 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1042 pkt->dts= AV_NOPTS_VALUE;
1044 if (pc && pc->pict_type == AV_PICTURE_TYPE_B
1045 && !st->codec->has_b_frames)
1046 //FIXME Set low_delay = 0 when has_b_frames = 1
1047 st->codec->has_b_frames = 1;
1049 /* do we have a video B-frame ? */
1050 delay= st->codec->has_b_frames;
1051 presentation_delayed = 0;
1053 /* XXX: need has_b_frame, but cannot get it if the codec is
1056 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1057 presentation_delayed = 1;
1059 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
1060 st->pts_wrap_bits < 63 &&
1061 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
1062 if(is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > st->cur_dts) {
1063 pkt->dts -= 1LL<<st->pts_wrap_bits;
1065 pkt->pts += 1LL<<st->pts_wrap_bits;
1068 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1069 // we take the conservative approach and discard both
1070 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1071 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1072 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1073 if(strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1074 pkt->dts= AV_NOPTS_VALUE;
1077 if (pkt->duration == 0) {
1078 ff_compute_frame_duration(&num, &den, st, pc, pkt);
1080 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1083 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1084 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1086 /* correct timestamps with byte offset if demuxers only have timestamps
1087 on packet boundaries */
1088 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1089 /* this will estimate bitrate based on this frame's duration and size */
1090 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1091 if(pkt->pts != AV_NOPTS_VALUE)
1093 if(pkt->dts != AV_NOPTS_VALUE)
1097 /* This may be redundant, but it should not hurt. */
1098 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1099 presentation_delayed = 1;
1101 av_dlog(NULL, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1102 presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1103 /* interpolate PTS and DTS if they are not present */
1104 //We skip H264 currently because delay and has_b_frames are not reliably set
1105 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){
1106 if (presentation_delayed) {
1107 /* DTS = decompression timestamp */
1108 /* PTS = presentation timestamp */
1109 if (pkt->dts == AV_NOPTS_VALUE)
1110 pkt->dts = st->last_IP_pts;
1111 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1112 if (pkt->dts == AV_NOPTS_VALUE)
1113 pkt->dts = st->cur_dts;
1115 /* this is tricky: the dts must be incremented by the duration
1116 of the frame we are displaying, i.e. the last I- or P-frame */
1117 if (st->last_IP_duration == 0)
1118 st->last_IP_duration = pkt->duration;
1119 if(pkt->dts != AV_NOPTS_VALUE)
1120 st->cur_dts = pkt->dts + st->last_IP_duration;
1121 st->last_IP_duration = pkt->duration;
1122 st->last_IP_pts= pkt->pts;
1123 /* cannot compute PTS if not present (we can compute it only
1124 by knowing the future */
1125 } else if (pkt->pts != AV_NOPTS_VALUE ||
1126 pkt->dts != AV_NOPTS_VALUE ||
1128 int duration = pkt->duration;
1130 /* presentation is not delayed : PTS and DTS are the same */
1131 if (pkt->pts == AV_NOPTS_VALUE)
1132 pkt->pts = pkt->dts;
1133 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1135 if (pkt->pts == AV_NOPTS_VALUE)
1136 pkt->pts = st->cur_dts;
1137 pkt->dts = pkt->pts;
1138 if (pkt->pts != AV_NOPTS_VALUE)
1139 st->cur_dts = pkt->pts + duration;
1143 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
1144 st->pts_buffer[0]= pkt->pts;
1145 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1146 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1147 if(pkt->dts == AV_NOPTS_VALUE)
1148 pkt->dts= st->pts_buffer[0];
1150 if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
1151 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); // this should happen on the first packet
1153 if(pkt->dts > st->cur_dts)
1154 st->cur_dts = pkt->dts;
1156 av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1157 presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1160 if (is_intra_only(st->codec))
1161 pkt->flags |= AV_PKT_FLAG_KEY;
1163 pkt->convergence_duration = pc->convergence_duration;
1166 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1169 AVPacketList *pktl = *pkt_buf;
1170 *pkt_buf = pktl->next;
1171 av_free_packet(&pktl->pkt);
1174 *pkt_buf_end = NULL;
1178 * Parse a packet, add all split parts to parse_queue
1180 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1182 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1184 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1185 AVStream *st = s->streams[stream_index];
1186 uint8_t *data = pkt ? pkt->data : NULL;
1187 int size = pkt ? pkt->size : 0;
1188 int ret = 0, got_output = 0;
1191 av_init_packet(&flush_pkt);
1194 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1195 // preserve 0-size sync packets
1196 compute_pkt_fields(s, st, st->parser, pkt);
1199 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1202 av_init_packet(&out_pkt);
1203 len = av_parser_parse2(st->parser, st->codec,
1204 &out_pkt.data, &out_pkt.size, data, size,
1205 pkt->pts, pkt->dts, pkt->pos);
1207 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1209 /* increment read pointer */
1213 got_output = !!out_pkt.size;
1218 if (pkt->side_data) {
1219 out_pkt.side_data = pkt->side_data;
1220 out_pkt.side_data_elems = pkt->side_data_elems;
1221 pkt->side_data = NULL;
1222 pkt->side_data_elems = 0;
1225 /* set the duration */
1226 out_pkt.duration = 0;
1227 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1228 if (st->codec->sample_rate > 0) {
1229 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1230 (AVRational){ 1, st->codec->sample_rate },
1234 } else if (st->codec->time_base.num != 0 &&
1235 st->codec->time_base.den != 0) {
1236 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1237 st->codec->time_base,
1242 out_pkt.stream_index = st->index;
1243 out_pkt.pts = st->parser->pts;
1244 out_pkt.dts = st->parser->dts;
1245 out_pkt.pos = st->parser->pos;
1247 if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1248 out_pkt.pos = st->parser->frame_offset;
1250 if (st->parser->key_frame == 1 ||
1251 (st->parser->key_frame == -1 &&
1252 st->parser->pict_type == AV_PICTURE_TYPE_I))
1253 out_pkt.flags |= AV_PKT_FLAG_KEY;
1255 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1256 out_pkt.flags |= AV_PKT_FLAG_KEY;
1258 compute_pkt_fields(s, st, st->parser, &out_pkt);
1260 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1261 out_pkt.buf = pkt->buf;
1263 #if FF_API_DESTRUCT_PACKET
1264 FF_DISABLE_DEPRECATION_WARNINGS
1265 out_pkt.destruct = pkt->destruct;
1266 pkt->destruct = NULL;
1267 FF_ENABLE_DEPRECATION_WARNINGS
1270 if ((ret = av_dup_packet(&out_pkt)) < 0)
1273 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1274 av_free_packet(&out_pkt);
1275 ret = AVERROR(ENOMEM);
1281 /* end of the stream => close and free the parser */
1282 if (pkt == &flush_pkt) {
1283 av_parser_close(st->parser);
1288 av_free_packet(pkt);
1292 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1293 AVPacketList **pkt_buffer_end,
1297 av_assert0(*pkt_buffer);
1300 *pkt_buffer = pktl->next;
1302 *pkt_buffer_end = NULL;
1307 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1309 int ret = 0, i, got_packet = 0;
1311 av_init_packet(pkt);
1313 while (!got_packet && !s->parse_queue) {
1317 /* read next packet */
1318 ret = ff_read_packet(s, &cur_pkt);
1320 if (ret == AVERROR(EAGAIN))
1322 /* flush the parsers */
1323 for(i = 0; i < s->nb_streams; i++) {
1325 if (st->parser && st->need_parsing)
1326 parse_packet(s, NULL, st->index);
1328 /* all remaining packets are now in parse_queue =>
1329 * really terminate parsing */
1333 st = s->streams[cur_pkt.stream_index];
1335 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1336 cur_pkt.dts != AV_NOPTS_VALUE &&
1337 cur_pkt.pts < cur_pkt.dts) {
1338 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1339 cur_pkt.stream_index,
1340 av_ts2str(cur_pkt.pts),
1341 av_ts2str(cur_pkt.dts),
1344 if (s->debug & FF_FDEBUG_TS)
1345 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1346 cur_pkt.stream_index,
1347 av_ts2str(cur_pkt.pts),
1348 av_ts2str(cur_pkt.dts),
1353 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1354 st->parser = av_parser_init(st->codec->codec_id);
1356 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1357 "%s, packets or times may be invalid.\n",
1358 avcodec_get_name(st->codec->codec_id));
1359 /* no parser available: just output the raw packets */
1360 st->need_parsing = AVSTREAM_PARSE_NONE;
1361 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1362 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1363 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1364 st->parser->flags |= PARSER_FLAG_ONCE;
1365 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1366 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1370 if (!st->need_parsing || !st->parser) {
1371 /* no parsing needed: we just output the packet as is */
1373 compute_pkt_fields(s, st, NULL, pkt);
1374 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1375 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1376 ff_reduce_index(s, st->index);
1377 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1380 } else if (st->discard < AVDISCARD_ALL) {
1381 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1385 av_free_packet(&cur_pkt);
1387 if (pkt->flags & AV_PKT_FLAG_KEY)
1388 st->skip_to_keyframe = 0;
1389 if (st->skip_to_keyframe) {
1390 av_free_packet(&cur_pkt);
1398 if (!got_packet && s->parse_queue)
1399 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1402 AVStream *st = s->streams[pkt->stream_index];
1403 if (st->skip_samples) {
1404 uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
1406 AV_WL32(p, st->skip_samples);
1407 av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
1409 st->skip_samples = 0;
1413 if(ret >= 0 && !(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
1414 av_packet_merge_side_data(pkt);
1416 if(s->debug & FF_FDEBUG_TS)
1417 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1419 av_ts2str(pkt->pts),
1420 av_ts2str(pkt->dts),
1428 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1430 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1436 ret = s->packet_buffer ?
1437 read_from_packet_buffer(&s->packet_buffer, &s->packet_buffer_end, pkt) :
1438 read_frame_internal(s, pkt);
1445 AVPacketList *pktl = s->packet_buffer;
1448 AVPacket *next_pkt = &pktl->pkt;
1450 if (next_pkt->dts != AV_NOPTS_VALUE) {
1451 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1452 // last dts seen for this stream. if any of packets following
1453 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1454 int64_t last_dts = next_pkt->dts;
1455 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1456 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1457 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1458 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1459 next_pkt->pts = pktl->pkt.dts;
1461 if (last_dts != AV_NOPTS_VALUE) {
1462 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1463 last_dts = pktl->pkt.dts;
1468 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1469 // Fixing the last reference frame had none pts issue (For MXF etc).
1470 // We only do this when
1472 // 2. we are not able to resolve a pts value for current packet.
1473 // 3. the packets for this stream at the end of the files had valid dts.
1474 next_pkt->pts = last_dts + next_pkt->duration;
1476 pktl = s->packet_buffer;
1479 /* read packet from packet buffer, if there is data */
1480 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1481 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1482 ret = read_from_packet_buffer(&s->packet_buffer,
1483 &s->packet_buffer_end, pkt);
1488 ret = read_frame_internal(s, pkt);
1490 if (pktl && ret != AVERROR(EAGAIN)) {
1497 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1498 &s->packet_buffer_end)) < 0)
1499 return AVERROR(ENOMEM);
1504 st = s->streams[pkt->stream_index];
1505 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1506 ff_reduce_index(s, st->index);
1507 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1510 if (is_relative(pkt->dts))
1511 pkt->dts -= RELATIVE_TS_BASE;
1512 if (is_relative(pkt->pts))
1513 pkt->pts -= RELATIVE_TS_BASE;
1518 /* XXX: suppress the packet queue */
1519 static void flush_packet_queue(AVFormatContext *s)
1521 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1522 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1523 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1525 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1528 /*******************************************************/
1531 int av_find_default_stream_index(AVFormatContext *s)
1533 int first_audio_index = -1;
1537 if (s->nb_streams <= 0)
1539 for(i = 0; i < s->nb_streams; i++) {
1541 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1542 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1545 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1546 first_audio_index = i;
1548 return first_audio_index >= 0 ? first_audio_index : 0;
1552 * Flush the frame reader.
1554 void ff_read_frame_flush(AVFormatContext *s)
1559 flush_packet_queue(s);
1561 /* for each stream, reset read state */
1562 for(i = 0; i < s->nb_streams; i++) {
1566 av_parser_close(st->parser);
1569 st->last_IP_pts = AV_NOPTS_VALUE;
1570 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1571 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1573 st->probe_packets = MAX_PROBE_PACKETS;
1575 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1576 st->pts_buffer[j]= AV_NOPTS_VALUE;
1580 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1584 for(i = 0; i < s->nb_streams; i++) {
1585 AVStream *st = s->streams[i];
1587 st->cur_dts = av_rescale(timestamp,
1588 st->time_base.den * (int64_t)ref_st->time_base.num,
1589 st->time_base.num * (int64_t)ref_st->time_base.den);
1593 void ff_reduce_index(AVFormatContext *s, int stream_index)
1595 AVStream *st= s->streams[stream_index];
1596 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1598 if((unsigned)st->nb_index_entries >= max_entries){
1600 for(i=0; 2*i<st->nb_index_entries; i++)
1601 st->index_entries[i]= st->index_entries[2*i];
1602 st->nb_index_entries= i;
1606 int ff_add_index_entry(AVIndexEntry **index_entries,
1607 int *nb_index_entries,
1608 unsigned int *index_entries_allocated_size,
1609 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1611 AVIndexEntry *entries, *ie;
1614 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1617 if(timestamp == AV_NOPTS_VALUE)
1618 return AVERROR(EINVAL);
1620 if (size < 0 || size > 0x3FFFFFFF)
1621 return AVERROR(EINVAL);
1623 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1624 timestamp -= RELATIVE_TS_BASE;
1626 entries = av_fast_realloc(*index_entries,
1627 index_entries_allocated_size,
1628 (*nb_index_entries + 1) *
1629 sizeof(AVIndexEntry));
1633 *index_entries= entries;
1635 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1638 index= (*nb_index_entries)++;
1639 ie= &entries[index];
1640 av_assert0(index==0 || ie[-1].timestamp < timestamp);
1642 ie= &entries[index];
1643 if(ie->timestamp != timestamp){
1644 if(ie->timestamp <= timestamp)
1646 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1647 (*nb_index_entries)++;
1648 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1649 distance= ie->min_distance;
1653 ie->timestamp = timestamp;
1654 ie->min_distance= distance;
1661 int av_add_index_entry(AVStream *st,
1662 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1664 timestamp = wrap_timestamp(st, timestamp);
1665 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1666 &st->index_entries_allocated_size, pos,
1667 timestamp, size, distance, flags);
1670 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1671 int64_t wanted_timestamp, int flags)
1679 //optimize appending index entries at the end
1680 if(b && entries[b-1].timestamp < wanted_timestamp)
1685 timestamp = entries[m].timestamp;
1686 if(timestamp >= wanted_timestamp)
1688 if(timestamp <= wanted_timestamp)
1691 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1693 if(!(flags & AVSEEK_FLAG_ANY)){
1694 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1695 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1704 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1707 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1708 wanted_timestamp, flags);
1711 static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
1712 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1714 int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
1715 if (stream_index >= 0)
1716 ts = wrap_timestamp(s->streams[stream_index], ts);
1720 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1722 AVInputFormat *avif= s->iformat;
1723 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1724 int64_t ts_min, ts_max, ts;
1729 if (stream_index < 0)
1732 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1735 ts_min= AV_NOPTS_VALUE;
1736 pos_limit= -1; //gcc falsely says it may be uninitialized
1738 st= s->streams[stream_index];
1739 if(st->index_entries){
1742 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1743 index= FFMAX(index, 0);
1744 e= &st->index_entries[index];
1746 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1748 ts_min= e->timestamp;
1749 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1750 pos_min, av_ts2str(ts_min));
1752 av_assert1(index==0);
1755 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1756 av_assert0(index < st->nb_index_entries);
1758 e= &st->index_entries[index];
1759 av_assert1(e->timestamp >= target_ts);
1761 ts_max= e->timestamp;
1762 pos_limit= pos_max - e->min_distance;
1763 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1764 pos_max, pos_limit, av_ts2str(ts_max));
1768 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1773 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1776 ff_read_frame_flush(s);
1777 ff_update_cur_dts(s, st, ts);
1782 int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
1783 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1786 int64_t limit, ts_max;
1787 int64_t filesize = avio_size(s->pb);
1788 int64_t pos_max = filesize - 1;
1791 pos_max = FFMAX(0, (pos_max) - step);
1792 ts_max = ff_read_timestamp(s, stream_index, &pos_max, limit, read_timestamp);
1794 }while(ts_max == AV_NOPTS_VALUE && 2*limit > step);
1795 if (ts_max == AV_NOPTS_VALUE)
1799 int64_t tmp_pos = pos_max + 1;
1800 int64_t tmp_ts = ff_read_timestamp(s, stream_index, &tmp_pos, INT64_MAX, read_timestamp);
1801 if(tmp_ts == AV_NOPTS_VALUE)
1803 av_assert0(tmp_pos > pos_max);
1806 if(tmp_pos >= filesize)
1818 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1819 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1820 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1821 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1828 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1830 if(ts_min == AV_NOPTS_VALUE){
1831 pos_min = s->data_offset;
1832 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1833 if (ts_min == AV_NOPTS_VALUE)
1837 if(ts_min >= target_ts){
1842 if(ts_max == AV_NOPTS_VALUE){
1843 if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
1848 if(ts_max <= target_ts){
1853 if(ts_min > ts_max){
1855 }else if(ts_min == ts_max){
1860 while (pos_min < pos_limit) {
1861 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1862 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1863 assert(pos_limit <= pos_max);
1866 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1867 // interpolate position (better than dichotomy)
1868 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1869 + pos_min - approximate_keyframe_distance;
1870 }else if(no_change==1){
1871 // bisection, if interpolation failed to change min or max pos last time
1872 pos = (pos_min + pos_limit)>>1;
1874 /* linear search if bisection failed, can only happen if there
1875 are very few or no keyframes between min/max */
1880 else if(pos > pos_limit)
1884 ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); //may pass pos_limit instead of -1
1889 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1890 pos_min, pos, pos_max,
1891 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1892 pos_limit, start_pos, no_change);
1893 if(ts == AV_NOPTS_VALUE){
1894 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1897 assert(ts != AV_NOPTS_VALUE);
1898 if (target_ts <= ts) {
1899 pos_limit = start_pos - 1;
1903 if (target_ts >= ts) {
1909 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1910 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1913 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1915 ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1916 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1917 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1923 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1924 int64_t pos_min, pos_max;
1926 pos_min = s->data_offset;
1927 pos_max = avio_size(s->pb) - 1;
1929 if (pos < pos_min) pos= pos_min;
1930 else if(pos > pos_max) pos= pos_max;
1932 avio_seek(s->pb, pos, SEEK_SET);
1934 s->io_repositioned = 1;
1939 static int seek_frame_generic(AVFormatContext *s,
1940 int stream_index, int64_t timestamp, int flags)
1947 st = s->streams[stream_index];
1949 index = av_index_search_timestamp(st, timestamp, flags);
1951 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1954 if(index < 0 || index==st->nb_index_entries-1){
1958 if(st->nb_index_entries){
1959 av_assert0(st->index_entries);
1960 ie= &st->index_entries[st->nb_index_entries-1];
1961 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1963 ff_update_cur_dts(s, st, ie->timestamp);
1965 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1971 read_status = av_read_frame(s, &pkt);
1972 } while (read_status == AVERROR(EAGAIN));
1973 if (read_status < 0)
1975 av_free_packet(&pkt);
1976 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1977 if(pkt.flags & AV_PKT_FLAG_KEY)
1979 if(nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS){
1980 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1985 index = av_index_search_timestamp(st, timestamp, flags);
1990 ff_read_frame_flush(s);
1991 if (s->iformat->read_seek){
1992 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1995 ie = &st->index_entries[index];
1996 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1998 ff_update_cur_dts(s, st, ie->timestamp);
2003 static int seek_frame_internal(AVFormatContext *s, int stream_index,
2004 int64_t timestamp, int flags)
2009 if (flags & AVSEEK_FLAG_BYTE) {
2010 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
2012 ff_read_frame_flush(s);
2013 return seek_frame_byte(s, stream_index, timestamp, flags);
2016 if(stream_index < 0){
2017 stream_index= av_find_default_stream_index(s);
2018 if(stream_index < 0)
2021 st= s->streams[stream_index];
2022 /* timestamp for default must be expressed in AV_TIME_BASE units */
2023 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
2026 /* first, we try the format specific seek */
2027 if (s->iformat->read_seek) {
2028 ff_read_frame_flush(s);
2029 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
2036 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
2037 ff_read_frame_flush(s);
2038 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
2039 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
2040 ff_read_frame_flush(s);
2041 return seek_frame_generic(s, stream_index, timestamp, flags);
2047 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2051 if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2052 int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2053 if ((flags & AVSEEK_FLAG_BACKWARD))
2057 return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2058 flags & ~AVSEEK_FLAG_BACKWARD);
2061 ret = seek_frame_internal(s, stream_index, timestamp, flags);
2064 ret = avformat_queue_attached_pictures(s);
2069 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
2071 if(min_ts > ts || max_ts < ts)
2073 if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2074 return AVERROR(EINVAL);
2077 flags |= AVSEEK_FLAG_ANY;
2078 flags &= ~AVSEEK_FLAG_BACKWARD;
2080 if (s->iformat->read_seek2) {
2082 ff_read_frame_flush(s);
2084 if (stream_index == -1 && s->nb_streams == 1) {
2085 AVRational time_base = s->streams[0]->time_base;
2086 ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2087 min_ts = av_rescale_rnd(min_ts, time_base.den,
2088 time_base.num * (int64_t)AV_TIME_BASE,
2089 AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
2090 max_ts = av_rescale_rnd(max_ts, time_base.den,
2091 time_base.num * (int64_t)AV_TIME_BASE,
2092 AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
2095 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
2098 ret = avformat_queue_attached_pictures(s);
2102 if(s->iformat->read_timestamp){
2103 //try to seek via read_timestamp()
2106 // Fall back on old API if new is not implemented but old is.
2107 // Note the old API has somewhat different semantics.
2108 if (s->iformat->read_seek || 1) {
2109 int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2110 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2111 if (ret<0 && ts != min_ts && max_ts != ts) {
2112 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2114 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2119 // try some generic seek like seek_frame_generic() but with new ts semantics
2120 return -1; //unreachable
2123 /*******************************************************/
2126 * Return TRUE if the stream has accurate duration in any stream.
2128 * @return TRUE if the stream has accurate duration for at least one component.
2130 static int has_duration(AVFormatContext *ic)
2135 for(i = 0;i < ic->nb_streams; i++) {
2136 st = ic->streams[i];
2137 if (st->duration != AV_NOPTS_VALUE)
2140 if (ic->duration != AV_NOPTS_VALUE)
2146 * Estimate the stream timings from the one of each components.
2148 * Also computes the global bitrate if possible.
2150 static void update_stream_timings(AVFormatContext *ic)
2152 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2153 int64_t duration, duration1, filesize;
2158 start_time = INT64_MAX;
2159 start_time_text = INT64_MAX;
2160 end_time = INT64_MIN;
2161 duration = INT64_MIN;
2162 for(i = 0;i < ic->nb_streams; i++) {
2163 st = ic->streams[i];
2164 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2165 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2166 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2167 if (start_time1 < start_time_text)
2168 start_time_text = start_time1;
2170 start_time = FFMIN(start_time, start_time1);
2171 end_time1 = AV_NOPTS_VALUE;
2172 if (st->duration != AV_NOPTS_VALUE) {
2173 end_time1 = start_time1
2174 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2175 end_time = FFMAX(end_time, end_time1);
2177 for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){
2178 if(p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2179 p->start_time = start_time1;
2180 if(p->end_time < end_time1)
2181 p->end_time = end_time1;
2184 if (st->duration != AV_NOPTS_VALUE) {
2185 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2186 duration = FFMAX(duration, duration1);
2189 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2190 start_time = start_time_text;
2191 else if(start_time > start_time_text)
2192 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2194 if (start_time != INT64_MAX) {
2195 ic->start_time = start_time;
2196 if (end_time != INT64_MIN) {
2197 if (ic->nb_programs) {
2198 for (i=0; i<ic->nb_programs; i++) {
2199 p = ic->programs[i];
2200 if(p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2201 duration = FFMAX(duration, p->end_time - p->start_time);
2204 duration = FFMAX(duration, end_time - start_time);
2207 if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2208 ic->duration = duration;
2210 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2211 /* compute the bitrate */
2212 double bitrate = (double)filesize * 8.0 * AV_TIME_BASE /
2213 (double)ic->duration;
2214 if (bitrate >= 0 && bitrate <= INT_MAX)
2215 ic->bit_rate = bitrate;
2219 static void fill_all_stream_timings(AVFormatContext *ic)
2224 update_stream_timings(ic);
2225 for(i = 0;i < ic->nb_streams; i++) {
2226 st = ic->streams[i];
2227 if (st->start_time == AV_NOPTS_VALUE) {
2228 if(ic->start_time != AV_NOPTS_VALUE)
2229 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2230 if(ic->duration != AV_NOPTS_VALUE)
2231 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2236 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2238 int64_t filesize, duration;
2239 int i, show_warning = 0;
2242 /* if bit_rate is already set, we believe it */
2243 if (ic->bit_rate <= 0) {
2245 for(i=0;i<ic->nb_streams;i++) {
2246 st = ic->streams[i];
2247 if (st->codec->bit_rate > 0) {
2248 if (INT_MAX - st->codec->bit_rate < bit_rate) {
2252 bit_rate += st->codec->bit_rate;
2255 ic->bit_rate = bit_rate;
2258 /* if duration is already set, we believe it */
2259 if (ic->duration == AV_NOPTS_VALUE &&
2260 ic->bit_rate != 0) {
2261 filesize = ic->pb ? avio_size(ic->pb) : 0;
2263 for(i = 0; i < ic->nb_streams; i++) {
2264 st = ic->streams[i];
2265 if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2266 && st->duration == AV_NOPTS_VALUE) {
2267 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2268 st->duration = duration;
2275 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2278 #define DURATION_MAX_READ_SIZE 250000LL
2279 #define DURATION_MAX_RETRY 4
2281 /* only usable for MPEG-PS streams */
2282 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2284 AVPacket pkt1, *pkt = &pkt1;
2286 int read_size, i, ret;
2288 int64_t filesize, offset, duration;
2291 /* flush packet queue */
2292 flush_packet_queue(ic);
2294 for (i=0; i<ic->nb_streams; i++) {
2295 st = ic->streams[i];
2296 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2297 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2300 av_parser_close(st->parser);
2305 /* estimate the end time (duration) */
2306 /* XXX: may need to support wrapping */
2307 filesize = ic->pb ? avio_size(ic->pb) : 0;
2308 end_time = AV_NOPTS_VALUE;
2310 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2314 avio_seek(ic->pb, offset, SEEK_SET);
2317 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2321 ret = ff_read_packet(ic, pkt);
2322 } while(ret == AVERROR(EAGAIN));
2325 read_size += pkt->size;
2326 st = ic->streams[pkt->stream_index];
2327 if (pkt->pts != AV_NOPTS_VALUE &&
2328 (st->start_time != AV_NOPTS_VALUE ||
2329 st->first_dts != AV_NOPTS_VALUE)) {
2330 duration = end_time = pkt->pts;
2331 if (st->start_time != AV_NOPTS_VALUE)
2332 duration -= st->start_time;
2334 duration -= st->first_dts;
2336 if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<=0 ||
2337 (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2338 st->duration = duration;
2339 st->info->last_duration = duration;
2342 av_free_packet(pkt);
2344 }while( end_time==AV_NOPTS_VALUE
2345 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2346 && ++retry <= DURATION_MAX_RETRY);
2348 fill_all_stream_timings(ic);
2350 avio_seek(ic->pb, old_offset, SEEK_SET);
2351 for (i=0; i<ic->nb_streams; i++) {
2353 st->cur_dts= st->first_dts;
2354 st->last_IP_pts = AV_NOPTS_VALUE;
2358 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2362 /* get the file size, if possible */
2363 if (ic->iformat->flags & AVFMT_NOFILE) {
2366 file_size = avio_size(ic->pb);
2367 file_size = FFMAX(0, file_size);
2370 if ((!strcmp(ic->iformat->name, "mpeg") ||
2371 !strcmp(ic->iformat->name, "mpegts")) &&
2372 file_size && ic->pb->seekable) {
2373 /* get accurate estimate from the PTSes */
2374 estimate_timings_from_pts(ic, old_offset);
2375 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2376 } else if (has_duration(ic)) {
2377 /* at least one component has timings - we use them for all
2379 fill_all_stream_timings(ic);
2380 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2382 /* less precise: use bitrate info */
2383 estimate_timings_from_bit_rate(ic);
2384 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2386 update_stream_timings(ic);
2390 AVStream av_unused *st;
2391 for(i = 0;i < ic->nb_streams; i++) {
2392 st = ic->streams[i];
2393 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2394 (double) st->start_time / AV_TIME_BASE,
2395 (double) st->duration / AV_TIME_BASE);
2397 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2398 (double) ic->start_time / AV_TIME_BASE,
2399 (double) ic->duration / AV_TIME_BASE,
2400 ic->bit_rate / 1000);
2404 static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2406 AVCodecContext *avctx = st->codec;
2408 #define FAIL(errmsg) do { \
2410 *errmsg_ptr = errmsg; \
2414 switch (avctx->codec_type) {
2415 case AVMEDIA_TYPE_AUDIO:
2416 if (!avctx->frame_size && determinable_frame_size(avctx))
2417 FAIL("unspecified frame size");
2418 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2419 FAIL("unspecified sample format");
2420 if (!avctx->sample_rate)
2421 FAIL("unspecified sample rate");
2422 if (!avctx->channels)
2423 FAIL("unspecified number of channels");
2424 if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2425 FAIL("no decodable DTS frames");
2427 case AVMEDIA_TYPE_VIDEO:
2429 FAIL("unspecified size");
2430 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2431 FAIL("unspecified pixel format");
2432 if (st->codec->codec_id == AV_CODEC_ID_RV30 || st->codec->codec_id == AV_CODEC_ID_RV40)
2433 if (!st->sample_aspect_ratio.num && !st->codec->sample_aspect_ratio.num && !st->codec_info_nb_frames)
2434 FAIL("no frame in rv30/40 and no sar");
2436 case AVMEDIA_TYPE_SUBTITLE:
2437 if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2438 FAIL("unspecified size");
2440 case AVMEDIA_TYPE_DATA:
2441 if(avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2444 if (avctx->codec_id == AV_CODEC_ID_NONE)
2445 FAIL("unknown codec");
2449 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2450 static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, AVDictionary **options)
2452 const AVCodec *codec;
2453 int got_picture = 1, ret = 0;
2454 AVFrame *frame = av_frame_alloc();
2455 AVSubtitle subtitle;
2456 AVPacket pkt = *avpkt;
2459 return AVERROR(ENOMEM);
2461 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2462 AVDictionary *thread_opt = NULL;
2464 codec = find_decoder(s, st, st->codec->codec_id);
2467 st->info->found_decoder = -1;
2472 /* force thread count to 1 since the h264 decoder will not extract SPS
2473 * and PPS to extradata during multi-threaded decoding */
2474 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2475 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2477 av_dict_free(&thread_opt);
2479 st->info->found_decoder = -1;
2482 st->info->found_decoder = 1;
2483 } else if (!st->info->found_decoder)
2484 st->info->found_decoder = 1;
2486 if (st->info->found_decoder < 0) {
2491 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2493 (!has_codec_parameters(st, NULL) ||
2494 !has_decode_delay_been_guessed(st) ||
2495 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2497 avcodec_get_frame_defaults(frame);
2498 switch(st->codec->codec_type) {
2499 case AVMEDIA_TYPE_VIDEO:
2500 ret = avcodec_decode_video2(st->codec, frame,
2501 &got_picture, &pkt);
2503 case AVMEDIA_TYPE_AUDIO:
2504 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2506 case AVMEDIA_TYPE_SUBTITLE:
2507 ret = avcodec_decode_subtitle2(st->codec, &subtitle,
2508 &got_picture, &pkt);
2516 st->nb_decoded_frames++;
2523 if(!pkt.data && !got_picture)
2527 avcodec_free_frame(&frame);
2531 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2533 while (tags->id != AV_CODEC_ID_NONE) {
2541 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2544 for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) {
2545 if(tag == tags[i].tag)
2548 for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) {
2549 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2552 return AV_CODEC_ID_NONE;
2555 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2559 case 32: return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2560 case 64: return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2561 default: return AV_CODEC_ID_NONE;
2566 if (sflags & (1 << (bps - 1))) {
2568 case 1: return AV_CODEC_ID_PCM_S8;
2569 case 2: return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2570 case 3: return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2571 case 4: return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2572 default: return AV_CODEC_ID_NONE;
2576 case 1: return AV_CODEC_ID_PCM_U8;
2577 case 2: return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2578 case 3: return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2579 case 4: return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2580 default: return AV_CODEC_ID_NONE;
2586 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id)
2589 if (!av_codec_get_tag2(tags, id, &tag))
2594 int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
2598 for(i=0; tags && tags[i]; i++){
2599 const AVCodecTag *codec_tags = tags[i];
2600 while (codec_tags->id != AV_CODEC_ID_NONE) {
2601 if (codec_tags->id == id) {
2602 *tag = codec_tags->tag;
2611 enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2614 for(i=0; tags && tags[i]; i++){
2615 enum AVCodecID id= ff_codec_get_id(tags[i], tag);
2616 if(id!=AV_CODEC_ID_NONE) return id;
2618 return AV_CODEC_ID_NONE;
2621 static void compute_chapters_end(AVFormatContext *s)
2624 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2626 for (i = 0; i < s->nb_chapters; i++)
2627 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2628 AVChapter *ch = s->chapters[i];
2629 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2632 for (j = 0; j < s->nb_chapters; j++) {
2633 AVChapter *ch1 = s->chapters[j];
2634 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2635 if (j != i && next_start > ch->start && next_start < end)
2638 ch->end = (end == INT64_MAX) ? ch->start : end;
2642 static int get_std_framerate(int i){
2643 if(i<60*12) return (i+1)*1001;
2644 else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2648 * Is the time base unreliable.
2649 * This is a heuristic to balance between quick acceptance of the values in
2650 * the headers vs. some extra checks.
2651 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2652 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2653 * And there are "variable" fps files this needs to detect as well.
2655 static int tb_unreliable(AVCodecContext *c){
2656 if( c->time_base.den >= 101L*c->time_base.num
2657 || c->time_base.den < 5L*c->time_base.num
2658 /* || c->codec_tag == AV_RL32("DIVX")
2659 || c->codec_tag == AV_RL32("XVID")*/
2660 || c->codec_tag == AV_RL32("mp4v")
2661 || c->codec_id == AV_CODEC_ID_MPEG2VIDEO
2662 || c->codec_id == AV_CODEC_ID_H264
2668 #if FF_API_FORMAT_PARAMETERS
2669 int av_find_stream_info(AVFormatContext *ic)
2671 return avformat_find_stream_info(ic, NULL);
2675 int ff_alloc_extradata(AVCodecContext *avctx, int size)
2679 if (size < 0 || size >= INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
2680 avctx->extradata_size = 0;
2681 return AVERROR(EINVAL);
2683 avctx->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
2684 if (avctx->extradata) {
2685 memset(avctx->extradata + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2686 avctx->extradata_size = size;
2689 avctx->extradata_size = 0;
2690 ret = AVERROR(ENOMEM);
2695 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2697 int i, count, ret = 0, j;
2700 AVPacket pkt1, *pkt;
2701 int64_t old_offset = avio_tell(ic->pb);
2702 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2703 int flush_codecs = ic->probesize > 0;
2706 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2708 for(i=0;i<ic->nb_streams;i++) {
2709 const AVCodec *codec;
2710 AVDictionary *thread_opt = NULL;
2711 st = ic->streams[i];
2713 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2714 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2715 /* if(!st->time_base.num)
2717 if(!st->codec->time_base.num)
2718 st->codec->time_base= st->time_base;
2720 //only for the split stuff
2721 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2722 st->parser = av_parser_init(st->codec->codec_id);
2724 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2725 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2726 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2727 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2729 } else if (st->need_parsing) {
2730 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2731 "%s, packets or times may be invalid.\n",
2732 avcodec_get_name(st->codec->codec_id));
2735 codec = find_decoder(ic, st, st->codec->codec_id);
2737 /* force thread count to 1 since the h264 decoder will not extract SPS
2738 * and PPS to extradata during multi-threaded decoding */
2739 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2741 /* Ensure that subtitle_header is properly set. */
2742 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2743 && codec && !st->codec->codec)
2744 avcodec_open2(st->codec, codec, options ? &options[i]
2747 //try to just open decoders, in case this is enough to get parameters
2748 if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
2749 if (codec && !st->codec->codec)
2750 avcodec_open2(st->codec, codec, options ? &options[i]
2754 av_dict_free(&thread_opt);
2757 for (i=0; i<ic->nb_streams; i++) {
2758 #if FF_API_R_FRAME_RATE
2759 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2761 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2762 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2768 if (ff_check_interrupt(&ic->interrupt_callback)){
2770 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2774 /* check if one codec still needs to be handled */
2775 for(i=0;i<ic->nb_streams;i++) {
2776 int fps_analyze_framecount = 20;
2778 st = ic->streams[i];
2779 if (!has_codec_parameters(st, NULL))
2781 /* if the timebase is coarse (like the usual millisecond precision
2782 of mkv), we need to analyze more frames to reliably arrive at
2784 if (av_q2d(st->time_base) > 0.0005)
2785 fps_analyze_framecount *= 2;
2786 if (ic->fps_probe_size >= 0)
2787 fps_analyze_framecount = ic->fps_probe_size;
2788 if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2789 fps_analyze_framecount = 0;
2790 /* variable fps and no guess at the real fps */
2791 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2792 && st->info->duration_count < fps_analyze_framecount
2793 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2795 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2797 if (st->first_dts == AV_NOPTS_VALUE &&
2798 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2799 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2802 if (i == ic->nb_streams) {
2803 /* NOTE: if the format has no header, then we need to read
2804 some packets to get most of the streams, so we cannot
2806 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2807 /* if we found the info for all the codecs, we can stop */
2809 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2814 /* we did not get all the codec info, but we read too much data */
2815 if (read_size >= ic->probesize) {
2817 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit of %d bytes reached\n", ic->probesize);
2818 for (i = 0; i < ic->nb_streams; i++)
2819 if (!ic->streams[i]->r_frame_rate.num &&
2820 ic->streams[i]->info->duration_count <= 1 &&
2821 strcmp(ic->iformat->name, "image2"))
2822 av_log(ic, AV_LOG_WARNING,
2823 "Stream #%d: not enough frames to estimate rate; "
2824 "consider increasing probesize\n", i);
2828 /* NOTE: a new stream can be added there if no header in file
2829 (AVFMTCTX_NOHEADER) */
2830 ret = read_frame_internal(ic, &pkt1);
2831 if (ret == AVERROR(EAGAIN))
2839 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2840 free_packet_buffer(&ic->packet_buffer, &ic->packet_buffer_end);
2842 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2843 &ic->packet_buffer_end);
2845 ret = AVERROR(ENOMEM);
2846 goto find_stream_info_err;
2848 if ((ret = av_dup_packet(pkt)) < 0)
2849 goto find_stream_info_err;
2852 st = ic->streams[pkt->stream_index];
2853 if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC))
2854 read_size += pkt->size;
2856 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2857 /* check for non-increasing dts */
2858 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2859 st->info->fps_last_dts >= pkt->dts) {
2860 av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: "
2861 "packet %d with DTS %"PRId64", packet %d with DTS "
2862 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2863 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2864 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2866 /* check for a discontinuity in dts - if the difference in dts
2867 * is more than 1000 times the average packet duration in the sequence,
2868 * we treat it as a discontinuity */
2869 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2870 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2871 (pkt->dts - st->info->fps_last_dts) / 1000 >
2872 (st->info->fps_last_dts - st->info->fps_first_dts) / (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2873 av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: "
2874 "packet %d with DTS %"PRId64", packet %d with DTS "
2875 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2876 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2877 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2880 /* update stored dts values */
2881 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2882 st->info->fps_first_dts = pkt->dts;
2883 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2885 st->info->fps_last_dts = pkt->dts;
2886 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2888 if (st->codec_info_nb_frames>1) {
2890 if (st->time_base.den > 0)
2891 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2892 if (st->avg_frame_rate.num > 0)
2893 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
2896 && st->codec_info_nb_frames>30
2897 && st->info->fps_first_dts != AV_NOPTS_VALUE
2898 && st->info->fps_last_dts != AV_NOPTS_VALUE)
2899 t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q));
2901 if (t >= ic->max_analyze_duration) {
2902 av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %d reached at %"PRId64" microseconds\n", ic->max_analyze_duration, t);
2905 if (pkt->duration) {
2906 st->info->codec_info_duration += pkt->duration;
2907 st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame==2 ? st->parser->repeat_pict + 1 : 2;
2910 #if FF_API_R_FRAME_RATE
2912 int64_t last = st->info->last_dts;
2914 if( pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last
2915 && pkt->dts - (uint64_t)last < INT64_MAX){
2916 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2917 int64_t duration= pkt->dts - last;
2919 if (!st->info->duration_error)
2920 st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
2921 if (!st->info->duration_error)
2922 return AVERROR(ENOMEM);
2924 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2925 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2926 for (i=0; i<MAX_STD_TIMEBASES; i++) {
2927 int framerate= get_std_framerate(i);
2928 double sdts= dts*framerate/(1001*12);
2930 int64_t ticks= llrint(sdts+j*0.5);
2931 double error= sdts - ticks + j*0.5;
2932 st->info->duration_error[j][0][i] += error;
2933 st->info->duration_error[j][1][i] += error*error;
2936 st->info->duration_count++;
2937 // ignore the first 4 values, they might have some random jitter
2938 if (st->info->duration_count > 3 && is_relative(pkt->dts) == is_relative(last))
2939 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2941 if (pkt->dts != AV_NOPTS_VALUE)
2942 st->info->last_dts = pkt->dts;
2945 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2946 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2947 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2948 if (ff_alloc_extradata(st->codec, i))
2949 return AVERROR(ENOMEM);
2950 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2954 /* if still no information, we try to open the codec and to
2955 decompress the frame. We try to avoid that in most cases as
2956 it takes longer and uses more memory. For MPEG-4, we need to
2957 decompress for QuickTime.
2959 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2960 least one frame of codec data, this makes sure the codec initializes
2961 the channel configuration and does not only trust the values from the container.
2963 try_decode_frame(ic, st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2965 st->codec_info_nb_frames++;
2970 AVPacket empty_pkt = { 0 };
2972 av_init_packet(&empty_pkt);
2974 for(i=0;i<ic->nb_streams;i++) {
2976 st = ic->streams[i];
2978 /* flush the decoders */
2979 if (st->info->found_decoder == 1) {
2981 err = try_decode_frame(ic, st, &empty_pkt,
2982 (options && i < orig_nb_streams) ?
2983 &options[i] : NULL);
2984 } while (err > 0 && !has_codec_parameters(st, NULL));
2987 av_log(ic, AV_LOG_INFO,
2988 "decoding for stream %d failed\n", st->index);
2994 // close codecs which were opened in try_decode_frame()
2995 for(i=0;i<ic->nb_streams;i++) {
2996 st = ic->streams[i];
2997 avcodec_close(st->codec);
2999 for(i=0;i<ic->nb_streams;i++) {
3000 st = ic->streams[i];
3001 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
3002 if(st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
3003 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
3004 if (avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
3005 st->codec->codec_tag= tag;
3008 /* estimate average framerate if not set by demuxer */
3009 if (st->info->codec_info_duration_fields && !st->avg_frame_rate.num && st->info->codec_info_duration) {
3011 double best_error = 0.01;
3013 if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2||
3014 st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||
3015 st->info->codec_info_duration < 0)
3017 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3018 st->info->codec_info_duration_fields*(int64_t)st->time_base.den,
3019 st->info->codec_info_duration*2*(int64_t)st->time_base.num, 60000);
3021 /* round guessed framerate to a "standard" framerate if it's
3022 * within 1% of the original estimate*/
3023 for (j = 1; j < MAX_STD_TIMEBASES; j++) {
3024 AVRational std_fps = { get_std_framerate(j), 12*1001 };
3025 double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1);
3027 if (error < best_error) {
3029 best_fps = std_fps.num;
3033 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3034 best_fps, 12*1001, INT_MAX);
3037 // the check for tb_unreliable() is not completely correct, since this is not about handling
3038 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
3039 // ipmovie.c produces.
3040 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
3041 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
3042 if (st->info->duration_count>1 && !st->r_frame_rate.num
3043 && tb_unreliable(st->codec)) {
3045 double best_error= 0.01;
3047 for (j=0; j<MAX_STD_TIMEBASES; j++) {
3050 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
3052 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
3055 int n= st->info->duration_count;
3056 double a= st->info->duration_error[k][0][j] / n;
3057 double error= st->info->duration_error[k][1][j]/n - a*a;
3059 if(error < best_error && best_error> 0.000000001){
3061 num = get_std_framerate(j);
3064 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
3067 // do not increase frame rate by more than 1 % in order to match a standard rate.
3068 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
3069 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
3072 if (!st->r_frame_rate.num){
3073 if( st->codec->time_base.den * (int64_t)st->time_base.num
3074 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
3075 st->r_frame_rate.num = st->codec->time_base.den;
3076 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
3078 st->r_frame_rate.num = st->time_base.den;
3079 st->r_frame_rate.den = st->time_base.num;
3082 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
3083 if(!st->codec->bits_per_coded_sample)
3084 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
3085 // set stream disposition based on audio service type
3086 switch (st->codec->audio_service_type) {
3087 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
3088 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
3089 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
3090 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
3091 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
3092 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
3093 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
3094 st->disposition = AV_DISPOSITION_COMMENT; break;
3095 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
3096 st->disposition = AV_DISPOSITION_KARAOKE; break;
3102 estimate_timings(ic, old_offset);
3104 if (ret >= 0 && ic->nb_streams)
3105 ret = -1; /* we could not have all the codec parameters before EOF */
3106 for(i=0;i<ic->nb_streams;i++) {
3108 st = ic->streams[i];
3109 if (!has_codec_parameters(st, &errmsg)) {
3111 avcodec_string(buf, sizeof(buf), st->codec, 0);
3112 av_log(ic, AV_LOG_WARNING,
3113 "Could not find codec parameters for stream %d (%s): %s\n"
3114 "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
3121 compute_chapters_end(ic);
3123 find_stream_info_err:
3124 for (i=0; i < ic->nb_streams; i++) {
3125 st = ic->streams[i];
3126 if (ic->streams[i]->codec && ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
3127 ic->streams[i]->codec->thread_count = 0;
3129 av_freep(&st->info->duration_error);
3130 av_freep(&ic->streams[i]->info);
3133 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
3137 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
3141 for (i = 0; i < ic->nb_programs; i++) {
3142 if (ic->programs[i] == last) {
3146 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3147 if (ic->programs[i]->stream_index[j] == s)
3148 return ic->programs[i];
3154 int av_find_best_stream(AVFormatContext *ic,
3155 enum AVMediaType type,
3156 int wanted_stream_nb,
3158 AVCodec **decoder_ret,
3161 int i, nb_streams = ic->nb_streams;
3162 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3163 unsigned *program = NULL;
3164 AVCodec *decoder = NULL, *best_decoder = NULL;
3166 if (related_stream >= 0 && wanted_stream_nb < 0) {
3167 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3169 program = p->stream_index;
3170 nb_streams = p->nb_stream_indexes;
3173 for (i = 0; i < nb_streams; i++) {
3174 int real_stream_index = program ? program[i] : i;
3175 AVStream *st = ic->streams[real_stream_index];
3176 AVCodecContext *avctx = st->codec;
3177 if (avctx->codec_type != type)
3179 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3181 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
3184 decoder = find_decoder(ic, st, st->codec->codec_id);
3187 ret = AVERROR_DECODER_NOT_FOUND;
3191 count = st->codec_info_nb_frames;
3192 bitrate = avctx->bit_rate;
3193 multiframe = FFMIN(5, count);
3194 if ((best_multiframe > multiframe) ||
3195 (best_multiframe == multiframe && best_bitrate > bitrate) ||
3196 (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3199 best_bitrate = bitrate;
3200 best_multiframe = multiframe;
3201 ret = real_stream_index;
3202 best_decoder = decoder;
3203 if (program && i == nb_streams - 1 && ret < 0) {
3205 nb_streams = ic->nb_streams;
3206 i = 0; /* no related stream found, try again with everything */
3210 *decoder_ret = best_decoder;
3214 /*******************************************************/
3216 int av_read_play(AVFormatContext *s)
3218 if (s->iformat->read_play)
3219 return s->iformat->read_play(s);
3221 return avio_pause(s->pb, 0);
3222 return AVERROR(ENOSYS);
3225 int av_read_pause(AVFormatContext *s)
3227 if (s->iformat->read_pause)
3228 return s->iformat->read_pause(s);
3230 return avio_pause(s->pb, 1);
3231 return AVERROR(ENOSYS);
3234 void ff_free_stream(AVFormatContext *s, AVStream *st){
3235 av_assert0(s->nb_streams>0);
3236 av_assert0(s->streams[ s->nb_streams-1 ] == st);
3239 av_parser_close(st->parser);
3241 if (st->attached_pic.data)
3242 av_free_packet(&st->attached_pic);
3243 av_dict_free(&st->metadata);
3244 av_freep(&st->probe_data.buf);
3245 av_freep(&st->index_entries);
3246 av_freep(&st->codec->extradata);
3247 av_freep(&st->codec->subtitle_header);
3248 av_freep(&st->codec);
3249 av_freep(&st->priv_data);
3251 av_freep(&st->info->duration_error);
3252 av_freep(&st->info);
3253 av_freep(&s->streams[ --s->nb_streams ]);
3256 void avformat_free_context(AVFormatContext *s)
3264 if (s->iformat && s->iformat->priv_class && s->priv_data)
3265 av_opt_free(s->priv_data);
3267 for(i=s->nb_streams-1; i>=0; i--) {
3268 ff_free_stream(s, s->streams[i]);
3270 for(i=s->nb_programs-1; i>=0; i--) {
3271 av_dict_free(&s->programs[i]->metadata);
3272 av_freep(&s->programs[i]->stream_index);
3273 av_freep(&s->programs[i]);
3275 av_freep(&s->programs);
3276 av_freep(&s->priv_data);
3277 while(s->nb_chapters--) {
3278 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
3279 av_freep(&s->chapters[s->nb_chapters]);
3281 av_freep(&s->chapters);
3282 av_dict_free(&s->metadata);
3283 av_freep(&s->streams);
3287 #if FF_API_CLOSE_INPUT_FILE
3288 void av_close_input_file(AVFormatContext *s)
3290 avformat_close_input(&s);
3294 void avformat_close_input(AVFormatContext **ps)
3305 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
3306 (s->flags & AVFMT_FLAG_CUSTOM_IO))
3309 flush_packet_queue(s);
3312 if (s->iformat->read_close)
3313 s->iformat->read_close(s);
3316 avformat_free_context(s);
3323 #if FF_API_NEW_STREAM
3324 AVStream *av_new_stream(AVFormatContext *s, int id)
3326 AVStream *st = avformat_new_stream(s, NULL);
3333 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
3339 if (s->nb_streams >= INT_MAX/sizeof(*streams))
3341 streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
3344 s->streams = streams;
3346 st = av_mallocz(sizeof(AVStream));
3349 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3353 st->info->last_dts = AV_NOPTS_VALUE;
3355 st->codec = avcodec_alloc_context3(c);
3357 /* no default bitrate if decoding */
3358 st->codec->bit_rate = 0;
3360 st->index = s->nb_streams;
3361 st->start_time = AV_NOPTS_VALUE;
3362 st->duration = AV_NOPTS_VALUE;
3363 /* we set the current DTS to 0 so that formats without any timestamps
3364 but durations get some timestamps, formats with some unknown
3365 timestamps have their first few packets buffered and the
3366 timestamps corrected before they are returned to the user */
3367 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3368 st->first_dts = AV_NOPTS_VALUE;
3369 st->probe_packets = MAX_PROBE_PACKETS;
3370 st->pts_wrap_reference = AV_NOPTS_VALUE;
3371 st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3373 /* default pts setting is MPEG-like */
3374 avpriv_set_pts_info(st, 33, 1, 90000);
3375 st->last_IP_pts = AV_NOPTS_VALUE;
3376 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3377 st->pts_buffer[i]= AV_NOPTS_VALUE;
3379 st->sample_aspect_ratio = (AVRational){0,1};
3381 #if FF_API_R_FRAME_RATE
3382 st->info->last_dts = AV_NOPTS_VALUE;
3384 st->info->fps_first_dts = AV_NOPTS_VALUE;
3385 st->info->fps_last_dts = AV_NOPTS_VALUE;
3387 s->streams[s->nb_streams++] = st;
3391 AVProgram *av_new_program(AVFormatContext *ac, int id)
3393 AVProgram *program=NULL;
3396 av_dlog(ac, "new_program: id=0x%04x\n", id);
3398 for(i=0; i<ac->nb_programs; i++)
3399 if(ac->programs[i]->id == id)
3400 program = ac->programs[i];
3403 program = av_mallocz(sizeof(AVProgram));
3406 dynarray_add(&ac->programs, &ac->nb_programs, program);
3407 program->discard = AVDISCARD_NONE;
3410 program->pts_wrap_reference = AV_NOPTS_VALUE;
3411 program->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3413 program->start_time =
3414 program->end_time = AV_NOPTS_VALUE;
3419 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3421 AVChapter *chapter = NULL;
3424 for(i=0; i<s->nb_chapters; i++)
3425 if(s->chapters[i]->id == id)
3426 chapter = s->chapters[i];
3429 chapter= av_mallocz(sizeof(AVChapter));
3432 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3434 av_dict_set(&chapter->metadata, "title", title, 0);
3436 chapter->time_base= time_base;
3437 chapter->start = start;
3443 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3446 AVProgram *program=NULL;
3449 if (idx >= ac->nb_streams) {
3450 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3454 for(i=0; i<ac->nb_programs; i++){
3455 if(ac->programs[i]->id != progid)
3457 program = ac->programs[i];
3458 for(j=0; j<program->nb_stream_indexes; j++)
3459 if(program->stream_index[j] == idx)
3462 tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int));
3465 program->stream_index = tmp;
3466 program->stream_index[program->nb_stream_indexes++] = idx;
3471 static void print_fps(double d, const char *postfix){
3472 uint64_t v= lrintf(d*100);
3473 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3474 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3475 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3478 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3480 if(m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))){
3481 AVDictionaryEntry *tag=NULL;
3483 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3484 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3485 if(strcmp("language", tag->key)){
3486 const char *p = tag->value;
3487 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3490 size_t len = strcspn(p, "\x8\xa\xb\xc\xd");
3491 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3492 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3494 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3495 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3498 av_log(ctx, AV_LOG_INFO, "\n");
3504 /* "user interface" functions */
3505 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3508 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3509 AVStream *st = ic->streams[i];
3510 int g = av_gcd(st->time_base.num, st->time_base.den);
3511 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3512 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3513 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3514 /* the pid is an important information, so we display it */
3515 /* XXX: add a generic system */
3516 if (flags & AVFMT_SHOW_IDS)
3517 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3519 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3520 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3521 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3522 if (st->sample_aspect_ratio.num && // default
3523 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3524 AVRational display_aspect_ratio;
3525 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3526 st->codec->width*st->sample_aspect_ratio.num,
3527 st->codec->height*st->sample_aspect_ratio.den,
3529 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3530 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3531 display_aspect_ratio.num, display_aspect_ratio.den);
3533 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3534 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3535 print_fps(av_q2d(st->avg_frame_rate), "fps");
3536 #if FF_API_R_FRAME_RATE
3537 if(st->r_frame_rate.den && st->r_frame_rate.num)
3538 print_fps(av_q2d(st->r_frame_rate), "tbr");
3540 if(st->time_base.den && st->time_base.num)
3541 print_fps(1/av_q2d(st->time_base), "tbn");
3542 if(st->codec->time_base.den && st->codec->time_base.num)
3543 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3545 if (st->disposition & AV_DISPOSITION_DEFAULT)
3546 av_log(NULL, AV_LOG_INFO, " (default)");
3547 if (st->disposition & AV_DISPOSITION_DUB)
3548 av_log(NULL, AV_LOG_INFO, " (dub)");
3549 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3550 av_log(NULL, AV_LOG_INFO, " (original)");
3551 if (st->disposition & AV_DISPOSITION_COMMENT)
3552 av_log(NULL, AV_LOG_INFO, " (comment)");
3553 if (st->disposition & AV_DISPOSITION_LYRICS)
3554 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3555 if (st->disposition & AV_DISPOSITION_KARAOKE)
3556 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3557 if (st->disposition & AV_DISPOSITION_FORCED)
3558 av_log(NULL, AV_LOG_INFO, " (forced)");
3559 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3560 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3561 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3562 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3563 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3564 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3565 av_log(NULL, AV_LOG_INFO, "\n");
3566 dump_metadata(NULL, st->metadata, " ");
3569 void av_dump_format(AVFormatContext *ic,
3575 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3576 if (ic->nb_streams && !printed)
3579 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3580 is_output ? "Output" : "Input",
3582 is_output ? ic->oformat->name : ic->iformat->name,
3583 is_output ? "to" : "from", url);
3584 dump_metadata(NULL, ic->metadata, " ");
3586 av_log(NULL, AV_LOG_INFO, " Duration: ");
3587 if (ic->duration != AV_NOPTS_VALUE) {
3588 int hours, mins, secs, us;
3589 int64_t duration = ic->duration + 5000;
3590 secs = duration / AV_TIME_BASE;
3591 us = duration % AV_TIME_BASE;
3596 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3597 (100 * us) / AV_TIME_BASE);
3599 av_log(NULL, AV_LOG_INFO, "N/A");
3601 if (ic->start_time != AV_NOPTS_VALUE) {
3603 av_log(NULL, AV_LOG_INFO, ", start: ");
3604 secs = ic->start_time / AV_TIME_BASE;
3605 us = abs(ic->start_time % AV_TIME_BASE);
3606 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3607 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3609 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3611 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3613 av_log(NULL, AV_LOG_INFO, "N/A");
3615 av_log(NULL, AV_LOG_INFO, "\n");
3617 for (i = 0; i < ic->nb_chapters; i++) {
3618 AVChapter *ch = ic->chapters[i];
3619 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3620 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3621 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3623 dump_metadata(NULL, ch->metadata, " ");
3625 if(ic->nb_programs) {
3626 int j, k, total = 0;
3627 for(j=0; j<ic->nb_programs; j++) {
3628 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3630 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3631 name ? name->value : "");
3632 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3633 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3634 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3635 printed[ic->programs[j]->stream_index[k]] = 1;
3637 total += ic->programs[j]->nb_stream_indexes;
3639 if (total < ic->nb_streams)
3640 av_log(NULL, AV_LOG_INFO, " No Program\n");
3642 for(i=0;i<ic->nb_streams;i++)
3644 dump_stream_format(ic, i, index, is_output);
3649 uint64_t ff_ntp_time(void)
3651 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3654 int av_get_frame_filename(char *buf, int buf_size,
3655 const char *path, int number)
3658 char *q, buf1[20], c;
3659 int nd, len, percentd_found;
3671 while (av_isdigit(*p)) {
3672 nd = nd * 10 + *p++ - '0';
3675 } while (av_isdigit(c));
3684 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3686 if ((q - buf + len) > buf_size - 1)
3688 memcpy(q, buf1, len);
3696 if ((q - buf) < buf_size - 1)
3700 if (!percentd_found)
3709 static void hex_dump_internal(void *avcl, FILE *f, int level,
3710 const uint8_t *buf, int size)
3713 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3715 for(i=0;i<size;i+=16) {
3722 PRINT(" %02x", buf[i+j]);
3727 for(j=0;j<len;j++) {
3729 if (c < ' ' || c > '~')
3738 void av_hex_dump(FILE *f, const uint8_t *buf, int size)
3740 hex_dump_internal(NULL, f, 0, buf, size);
3743 void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
3745 hex_dump_internal(avcl, NULL, level, buf, size);
3748 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3750 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3751 PRINT("stream #%d:\n", pkt->stream_index);
3752 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3753 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3754 /* DTS is _always_ valid after av_read_frame() */
3756 if (pkt->dts == AV_NOPTS_VALUE)
3759 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3760 /* PTS may not be known if B-frames are present. */
3762 if (pkt->pts == AV_NOPTS_VALUE)
3765 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3767 PRINT(" size=%d\n", pkt->size);
3770 av_hex_dump(f, pkt->data, pkt->size);
3773 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3775 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3778 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3781 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3784 void av_url_split(char *proto, int proto_size,
3785 char *authorization, int authorization_size,
3786 char *hostname, int hostname_size,
3788 char *path, int path_size,
3791 const char *p, *ls, *ls2, *at, *at2, *col, *brk;
3793 if (port_ptr) *port_ptr = -1;
3794 if (proto_size > 0) proto[0] = 0;
3795 if (authorization_size > 0) authorization[0] = 0;
3796 if (hostname_size > 0) hostname[0] = 0;
3797 if (path_size > 0) path[0] = 0;
3799 /* parse protocol */
3800 if ((p = strchr(url, ':'))) {
3801 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3806 /* no protocol means plain filename */
3807 av_strlcpy(path, url, path_size);
3811 /* separate path from hostname */
3812 ls = strchr(p, '/');
3813 ls2 = strchr(p, '?');
3817 ls = FFMIN(ls, ls2);
3819 av_strlcpy(path, ls, path_size);
3821 ls = &p[strlen(p)]; // XXX
3823 /* the rest is hostname, use that to parse auth/port */
3825 /* authorization (user[:pass]@hostname) */
3827 while ((at = strchr(p, '@')) && at < ls) {
3828 av_strlcpy(authorization, at2,
3829 FFMIN(authorization_size, at + 1 - at2));
3830 p = at + 1; /* skip '@' */
3833 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3835 av_strlcpy(hostname, p + 1,
3836 FFMIN(hostname_size, brk - p));
3837 if (brk[1] == ':' && port_ptr)
3838 *port_ptr = atoi(brk + 2);
3839 } else if ((col = strchr(p, ':')) && col < ls) {
3840 av_strlcpy(hostname, p,
3841 FFMIN(col + 1 - p, hostname_size));
3842 if (port_ptr) *port_ptr = atoi(col + 1);
3844 av_strlcpy(hostname, p,
3845 FFMIN(ls + 1 - p, hostname_size));
3849 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3852 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3855 'C', 'D', 'E', 'F' };
3856 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3859 'c', 'd', 'e', 'f' };
3860 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3862 for(i = 0; i < s; i++) {
3863 buff[i * 2] = hex_table[src[i] >> 4];
3864 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3870 int ff_hex_to_data(uint8_t *data, const char *p)
3877 p += strspn(p, SPACE_CHARS);
3880 c = av_toupper((unsigned char) *p++);
3881 if (c >= '0' && c <= '9')
3883 else if (c >= 'A' && c <= 'F')
3898 #if FF_API_SET_PTS_INFO
3899 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3900 unsigned int pts_num, unsigned int pts_den)
3902 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
3906 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3907 unsigned int pts_num, unsigned int pts_den)
3910 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
3911 if(new_tb.num != pts_num)
3912 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
3914 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3916 if(new_tb.num <= 0 || new_tb.den <= 0) {
3917 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
3920 s->time_base = new_tb;
3921 av_codec_set_pkt_timebase(s->codec, new_tb);
3922 s->pts_wrap_bits = pts_wrap_bits;
3925 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3928 const char *ptr = str;
3930 /* Parse key=value pairs. */
3933 char *dest = NULL, *dest_end;
3934 int key_len, dest_len = 0;
3936 /* Skip whitespace and potential commas. */
3937 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3944 if (!(ptr = strchr(key, '=')))
3947 key_len = ptr - key;
3949 callback_get_buf(context, key, key_len, &dest, &dest_len);
3950 dest_end = dest + dest_len - 1;
3954 while (*ptr && *ptr != '\"') {
3958 if (dest && dest < dest_end)
3962 if (dest && dest < dest_end)
3970 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3971 if (dest && dest < dest_end)
3979 int ff_find_stream_index(AVFormatContext *s, int id)
3982 for (i = 0; i < s->nb_streams; i++) {
3983 if (s->streams[i]->id == id)
3989 int64_t ff_iso8601_to_unix_time(const char *datestr)
3991 struct tm time1 = {0}, time2 = {0};
3993 ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1);
3994 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2);
3996 return av_timegm(&time2);
3998 return av_timegm(&time1);
4001 int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance)
4004 if (ofmt->query_codec)
4005 return ofmt->query_codec(codec_id, std_compliance);
4006 else if (ofmt->codec_tag)
4007 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4008 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4009 codec_id == ofmt->subtitle_codec)
4012 return AVERROR_PATCHWELCOME;
4015 int avformat_network_init(void)
4019 ff_network_inited_globally = 1;
4020 if ((ret = ff_network_init()) < 0)
4027 int avformat_network_deinit(void)
4036 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4037 uint64_t channel_layout, int32_t sample_rate,
4038 int32_t width, int32_t height)
4044 return AVERROR(EINVAL);
4047 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4049 if (channel_layout) {
4051 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4055 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4057 if (width || height) {
4059 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4061 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4063 return AVERROR(ENOMEM);
4064 bytestream_put_le32(&data, flags);
4066 bytestream_put_le32(&data, channels);
4068 bytestream_put_le64(&data, channel_layout);
4070 bytestream_put_le32(&data, sample_rate);
4071 if (width || height) {
4072 bytestream_put_le32(&data, width);
4073 bytestream_put_le32(&data, height);
4078 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4080 AVRational undef = {0, 1};
4081 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4082 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4083 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4085 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4086 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4087 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4088 stream_sample_aspect_ratio = undef;
4090 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4091 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4092 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4093 frame_sample_aspect_ratio = undef;
4095 if (stream_sample_aspect_ratio.num)
4096 return stream_sample_aspect_ratio;
4098 return frame_sample_aspect_ratio;
4101 AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
4103 AVRational fr = st->r_frame_rate;
4105 if (st->codec->ticks_per_frame > 1) {
4106 AVRational codec_fr = av_inv_q(st->codec->time_base);
4107 AVRational avg_fr = st->avg_frame_rate;
4108 codec_fr.den *= st->codec->ticks_per_frame;
4109 if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
4110 && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1)
4117 int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
4120 if (*spec <= '9' && *spec >= '0') /* opt:index */
4121 return strtol(spec, NULL, 0) == st->index;
4122 else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4123 *spec == 't') { /* opt:[vasdt] */
4124 enum AVMediaType type;
4127 case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4128 case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4129 case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4130 case 'd': type = AVMEDIA_TYPE_DATA; break;
4131 case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4132 default: av_assert0(0);
4134 if (type != st->codec->codec_type)
4136 if (*spec++ == ':') { /* possibly followed by :index */
4137 int i, index = strtol(spec, NULL, 0);
4138 for (i = 0; i < s->nb_streams; i++)
4139 if (s->streams[i]->codec->codec_type == type && index-- == 0)
4140 return i == st->index;
4144 } else if (*spec == 'p' && *(spec + 1) == ':') {
4148 prog_id = strtol(spec, &endptr, 0);
4149 for (i = 0; i < s->nb_programs; i++) {
4150 if (s->programs[i]->id != prog_id)
4153 if (*endptr++ == ':') {
4154 int stream_idx = strtol(endptr, NULL, 0);
4155 return stream_idx >= 0 &&
4156 stream_idx < s->programs[i]->nb_stream_indexes &&
4157 st->index == s->programs[i]->stream_index[stream_idx];
4160 for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
4161 if (st->index == s->programs[i]->stream_index[j])
4165 } else if (*spec == '#') {
4168 sid = strtol(spec + 1, &endptr, 0);
4170 return st->id == sid;
4171 } else if (!*spec) /* empty specifier, matches everything */
4174 av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
4175 return AVERROR(EINVAL);
4178 int ff_generate_avci_extradata(AVStream *st)
4180 static const uint8_t avci100_1080p_extradata[] = {
4182 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4183 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4184 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4185 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
4186 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
4187 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
4188 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
4189 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
4190 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4192 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4195 static const uint8_t avci100_1080i_extradata[] = {
4197 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4198 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4199 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4200 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
4201 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
4202 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
4203 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
4204 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
4205 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
4206 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
4207 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
4209 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4212 static const uint8_t avci50_1080i_extradata[] = {
4214 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4215 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4216 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4217 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
4218 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
4219 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
4220 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
4221 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
4222 0x81, 0x13, 0xf7, 0xff, 0x80, 0x02, 0x00, 0x01,
4223 0xf1, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
4224 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
4226 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4229 static const uint8_t avci100_720p_extradata[] = {
4231 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4232 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
4233 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
4234 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
4235 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
4236 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
4237 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
4238 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
4239 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
4240 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
4242 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
4246 const uint8_t *data = NULL;
4249 if (st->codec->width == 1920) {
4250 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4251 data = avci100_1080p_extradata;
4252 size = sizeof(avci100_1080p_extradata);
4254 data = avci100_1080i_extradata;
4255 size = sizeof(avci100_1080i_extradata);
4257 } else if (st->codec->width == 1440) {
4258 data = avci50_1080i_extradata;
4259 size = sizeof(avci50_1080i_extradata);
4260 } else if (st->codec->width == 1280) {
4261 data = avci100_720p_extradata;
4262 size = sizeof(avci100_720p_extradata);
4268 av_freep(&st->codec->extradata);
4269 if (ff_alloc_extradata(st->codec, size))
4270 return AVERROR(ENOMEM);
4271 memcpy(st->codec->extradata, data, size);