2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "avio_internal.h"
25 #include "libavcodec/internal.h"
26 #include "libavcodec/raw.h"
27 #include "libavcodec/bytestream.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/pixdesc.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/avstring.h"
36 #include "libavutil/mathematics.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/time.h"
39 #include "libavutil/timestamp.h"
41 #include "audiointerleave.h"
53 * various utility functions for use within FFmpeg
56 unsigned avformat_version(void)
58 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
59 return LIBAVFORMAT_VERSION_INT;
62 const char *avformat_configuration(void)
64 return FFMPEG_CONFIGURATION;
67 const char *avformat_license(void)
69 #define LICENSE_PREFIX "libavformat license: "
70 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
73 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
75 static int is_relative(int64_t ts) {
76 return ts > (RELATIVE_TS_BASE - (1LL<<48));
80 * Wrap a given time stamp, if there is an indication for an overflow
83 * @param timestamp the time stamp to wrap
84 * @return resulting time stamp
86 static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
88 if (st->pts_wrap_behavior != AV_PTS_WRAP_IGNORE &&
89 st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
90 if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET &&
91 timestamp < st->pts_wrap_reference)
92 return timestamp + (1ULL<<st->pts_wrap_bits);
93 else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
94 timestamp >= st->pts_wrap_reference)
95 return timestamp - (1ULL<<st->pts_wrap_bits);
100 MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
101 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, video_codec)
102 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
103 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
105 static AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID codec_id)
107 if (st->codec->codec)
108 return st->codec->codec;
110 switch(st->codec->codec_type){
111 case AVMEDIA_TYPE_VIDEO:
112 if(s->video_codec) return s->video_codec;
114 case AVMEDIA_TYPE_AUDIO:
115 if(s->audio_codec) return s->audio_codec;
117 case AVMEDIA_TYPE_SUBTITLE:
118 if(s->subtitle_codec) return s->subtitle_codec;
122 return avcodec_find_decoder(codec_id);
125 int av_format_get_probe_score(const AVFormatContext *s)
127 return s->probe_score;
130 /* an arbitrarily chosen "sane" max packet size -- 50M */
131 #define SANE_CHUNK_SIZE (50000000)
133 int ffio_limit(AVIOContext *s, int size)
136 int64_t remaining= s->maxsize - avio_tell(s);
137 if(remaining < size){
138 int64_t newsize= avio_size(s);
139 if(!s->maxsize || s->maxsize<newsize)
140 s->maxsize= newsize - !newsize;
141 remaining= s->maxsize - avio_tell(s);
142 remaining= FFMAX(remaining, 0);
145 if(s->maxsize>=0 && remaining+1 < size){
146 av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
154 * Read the data in sane-sized chunks and append to pkt.
155 * Return the number of bytes read or an error.
157 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
159 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
160 int orig_size = pkt->size;
164 int prev_size = pkt->size;
168 * When the caller requests a lot of data, limit it to the amount left
169 * in file or SANE_CHUNK_SIZE when it is not known
172 if (read_size > SANE_CHUNK_SIZE/10) {
173 read_size = ffio_limit(s, read_size);
174 // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
176 read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
179 ret = av_grow_packet(pkt, read_size);
183 ret = avio_read(s, pkt->data + prev_size, read_size);
184 if (ret != read_size) {
185 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
192 pkt->flags |= AV_PKT_FLAG_CORRUPT;
197 return pkt->size > orig_size ? pkt->size - orig_size : ret;
200 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
205 pkt->pos = avio_tell(s);
207 return append_packet_chunked(s, pkt, size);
210 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
213 return av_get_packet(s, pkt, size);
214 return append_packet_chunked(s, pkt, size);
218 int av_filename_number_test(const char *filename)
221 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
224 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
226 AVProbeData lpd = *pd;
227 AVInputFormat *fmt1 = NULL, *fmt;
228 int score, nodat = 0, score_max=0;
229 const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE];
232 lpd.buf = zerobuffer;
234 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
235 int id3len = ff_id3v2_tag_len(lpd.buf);
236 if (lpd.buf_size > id3len + 16) {
238 lpd.buf_size -= id3len;
244 while ((fmt1 = av_iformat_next(fmt1))) {
245 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
248 if (fmt1->read_probe) {
249 score = fmt1->read_probe(&lpd);
250 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
251 score = FFMAX(score, nodat ? AVPROBE_SCORE_EXTENSION / 2 - 1 : 1);
252 } else if (fmt1->extensions) {
253 if (av_match_ext(lpd.filename, fmt1->extensions)) {
254 score = AVPROBE_SCORE_EXTENSION;
257 if (score > score_max) {
260 }else if (score == score_max)
264 score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max);
265 *score_ret= score_max;
270 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
273 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
274 if(score_ret > *score_max){
275 *score_max= score_ret;
281 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
283 return av_probe_input_format2(pd, is_opened, &score);
286 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
288 static const struct {
289 const char *name; enum AVCodecID id; enum AVMediaType type;
291 { "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
292 { "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
293 { "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
294 { "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
295 { "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
296 { "loas" , AV_CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
297 { "m4v" , AV_CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
298 { "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
299 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
303 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
305 if (fmt && st->request_probe <= score) {
307 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
308 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
309 for (i = 0; fmt_id_type[i].name; i++) {
310 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
311 st->codec->codec_id = fmt_id_type[i].id;
312 st->codec->codec_type = fmt_id_type[i].type;
320 /************************************************************/
321 /* input media file */
323 int av_demuxer_open(AVFormatContext *ic){
326 if (ic->iformat->read_header) {
327 err = ic->iformat->read_header(ic);
332 if (ic->pb && !ic->data_offset)
333 ic->data_offset = avio_tell(ic->pb);
339 int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
340 const char *filename, void *logctx,
341 unsigned int offset, unsigned int max_probe_size)
343 AVProbeData pd = { filename ? filename : "", NULL, -offset };
344 unsigned char *buf = NULL;
346 int ret = 0, probe_size, buf_offset = 0;
349 if (!max_probe_size) {
350 max_probe_size = PROBE_BUF_MAX;
351 } else if (max_probe_size > PROBE_BUF_MAX) {
352 max_probe_size = PROBE_BUF_MAX;
353 } else if (max_probe_size < PROBE_BUF_MIN) {
354 av_log(logctx, AV_LOG_ERROR,
355 "Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
356 return AVERROR(EINVAL);
359 if (offset >= max_probe_size) {
360 return AVERROR(EINVAL);
363 if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) {
364 if (!av_strcasecmp(mime_type, "audio/aacp")) {
365 *fmt = av_find_input_format("aac");
367 av_freep(&mime_type);
370 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
371 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
373 if (probe_size < offset) {
376 score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;
378 /* read probe data */
379 if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
381 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
382 /* fail if error was not end of file, otherwise, lower score */
383 if (ret != AVERROR_EOF) {
388 ret = 0; /* error was end of file, nothing read */
390 pd.buf_size = buf_offset += ret;
391 pd.buf = &buf[offset];
393 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
395 /* guess file format */
396 *fmt = av_probe_input_format2(&pd, 1, &score);
398 if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration
399 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
401 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
403 FILE *f = fopen("probestat.tmp", "ab");
404 fprintf(f, "probe_size:%d format:%s score:%d filename:%s\n", probe_size, (*fmt)->name, score, filename);
412 return AVERROR_INVALIDDATA;
415 /* rewind. reuse probe buffer to avoid seeking */
416 ret = ffio_rewind_with_probe_data(pb, &buf, pd.buf_size);
418 return ret < 0 ? ret : score;
421 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
422 const char *filename, void *logctx,
423 unsigned int offset, unsigned int max_probe_size)
425 int ret = av_probe_input_buffer2(pb, fmt, filename, logctx, offset, max_probe_size);
426 return ret < 0 ? ret : 0;
430 /* open input file and probe the format if necessary */
431 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
434 AVProbeData pd = {filename, NULL, 0};
435 int score = AVPROBE_SCORE_RETRY;
438 s->flags |= AVFMT_FLAG_CUSTOM_IO;
440 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
441 else if (s->iformat->flags & AVFMT_NOFILE)
442 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
443 "will be ignored with AVFMT_NOFILE format.\n");
447 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
448 (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
451 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
452 &s->interrupt_callback, options)) < 0)
456 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
459 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
460 AVPacketList **plast_pktl){
461 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
466 (*plast_pktl)->next = pktl;
468 *packet_buffer = pktl;
470 /* add the packet in the buffered packet list */
476 int avformat_queue_attached_pictures(AVFormatContext *s)
479 for (i = 0; i < s->nb_streams; i++)
480 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
481 s->streams[i]->discard < AVDISCARD_ALL) {
482 AVPacket copy = s->streams[i]->attached_pic;
483 copy.buf = av_buffer_ref(copy.buf);
485 return AVERROR(ENOMEM);
487 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
492 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
494 AVFormatContext *s = *ps;
496 AVDictionary *tmp = NULL;
497 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
499 if (!s && !(s = avformat_alloc_context()))
500 return AVERROR(ENOMEM);
502 av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
503 return AVERROR(EINVAL);
509 av_dict_copy(&tmp, *options, 0);
511 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
514 if ((ret = init_input(s, filename, &tmp)) < 0)
516 s->probe_score = ret;
517 avio_skip(s->pb, s->skip_initial_bytes);
519 /* check filename in case an image number is expected */
520 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
521 if (!av_filename_number_test(filename)) {
522 ret = AVERROR(EINVAL);
527 s->duration = s->start_time = AV_NOPTS_VALUE;
528 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
530 /* allocate private data */
531 if (s->iformat->priv_data_size > 0) {
532 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
533 ret = AVERROR(ENOMEM);
536 if (s->iformat->priv_class) {
537 *(const AVClass**)s->priv_data = s->iformat->priv_class;
538 av_opt_set_defaults(s->priv_data);
539 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
544 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
546 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
548 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
549 if ((ret = s->iformat->read_header(s)) < 0)
552 if (id3v2_extra_meta) {
553 if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
554 !strcmp(s->iformat->name, "tta")) {
555 if((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
558 av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
560 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
562 if ((ret = avformat_queue_attached_pictures(s)) < 0)
565 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
566 s->data_offset = avio_tell(s->pb);
568 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
571 av_dict_free(options);
578 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
580 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
582 avformat_free_context(s);
587 /*******************************************************/
589 static void force_codec_ids(AVFormatContext *s, AVStream *st)
591 switch(st->codec->codec_type){
592 case AVMEDIA_TYPE_VIDEO:
593 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
595 case AVMEDIA_TYPE_AUDIO:
596 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
598 case AVMEDIA_TYPE_SUBTITLE:
599 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
604 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
606 if(st->request_probe>0){
607 AVProbeData *pd = &st->probe_data;
609 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
613 uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
615 av_log(s, AV_LOG_WARNING,
616 "Failed to reallocate probe buffer for stream %d\n",
621 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
622 pd->buf_size += pkt->size;
623 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
626 st->probe_packets = 0;
628 av_log(s, AV_LOG_WARNING, "nothing to probe for stream %d\n",
633 end= s->raw_packet_buffer_remaining_size <= 0
634 || st->probe_packets<=0;
636 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
637 int score= set_codec_from_probe_data(s, st, pd);
638 if( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_RETRY)
642 st->request_probe= -1;
643 if(st->codec->codec_id != AV_CODEC_ID_NONE){
644 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
646 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
648 force_codec_ids(s, st);
654 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
660 AVPacketList *pktl = s->raw_packet_buffer;
664 st = s->streams[pkt->stream_index];
665 if (s->raw_packet_buffer_remaining_size <= 0) {
666 if ((err = probe_codec(s, st, NULL)) < 0)
669 if(st->request_probe <= 0){
670 s->raw_packet_buffer = pktl->next;
671 s->raw_packet_buffer_remaining_size += pkt->size;
680 ret= s->iformat->read_packet(s, pkt);
682 if (!pktl || ret == AVERROR(EAGAIN))
684 for (i = 0; i < s->nb_streams; i++) {
686 if (st->probe_packets) {
687 if ((err = probe_codec(s, st, NULL)) < 0)
690 av_assert0(st->request_probe <= 0);
695 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
696 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
697 av_log(s, AV_LOG_WARNING,
698 "Dropped corrupted packet (stream = %d)\n",
704 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
705 av_packet_merge_side_data(pkt);
707 if(pkt->stream_index >= (unsigned)s->nb_streams){
708 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
712 st= s->streams[pkt->stream_index];
713 pkt->dts = wrap_timestamp(st, pkt->dts);
714 pkt->pts = wrap_timestamp(st, pkt->pts);
716 force_codec_ids(s, st);
718 /* TODO: audio: time filter; video: frame reordering (pts != dts) */
719 if (s->use_wallclock_as_timestamps)
720 pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
722 if(!pktl && st->request_probe <= 0)
725 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
726 s->raw_packet_buffer_remaining_size -= pkt->size;
728 if ((err = probe_codec(s, st, pkt)) < 0)
733 #if FF_API_READ_PACKET
734 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
736 return ff_read_packet(s, pkt);
741 /**********************************************************/
743 static int determinable_frame_size(AVCodecContext *avctx)
745 if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
746 avctx->codec_id == AV_CODEC_ID_MP1 ||
747 avctx->codec_id == AV_CODEC_ID_MP2 ||
748 avctx->codec_id == AV_CODEC_ID_MP3/* ||
749 avctx->codec_id == AV_CODEC_ID_CELT*/)
755 * Get the number of samples of an audio frame. Return -1 on error.
757 int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
761 /* give frame_size priority if demuxing */
762 if (!mux && enc->frame_size > 1)
763 return enc->frame_size;
765 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
768 /* Fall back on using frame_size if muxing. */
769 if (enc->frame_size > 1)
770 return enc->frame_size;
772 //For WMA we currently have no other means to calculate duration thus we
773 //do it here by assuming CBR, which is true for all known cases.
774 if(!mux && enc->bit_rate>0 && size>0 && enc->sample_rate>0 && enc->block_align>1) {
775 if (enc->codec_id == AV_CODEC_ID_WMAV1 || enc->codec_id == AV_CODEC_ID_WMAV2)
776 return ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
784 * Return the frame duration in seconds. Return 0 if not available.
786 void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
787 AVCodecParserContext *pc, AVPacket *pkt)
793 switch(st->codec->codec_type) {
794 case AVMEDIA_TYPE_VIDEO:
795 if (st->r_frame_rate.num && !pc) {
796 *pnum = st->r_frame_rate.den;
797 *pden = st->r_frame_rate.num;
798 } else if(st->time_base.num*1000LL > st->time_base.den) {
799 *pnum = st->time_base.num;
800 *pden = st->time_base.den;
801 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
802 *pnum = st->codec->time_base.num;
803 *pden = st->codec->time_base.den;
804 if (pc && pc->repeat_pict) {
805 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
806 *pden /= 1 + pc->repeat_pict;
808 *pnum *= 1 + pc->repeat_pict;
810 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
811 //Thus if we have no parser in such case leave duration undefined.
812 if(st->codec->ticks_per_frame>1 && !pc){
817 case AVMEDIA_TYPE_AUDIO:
818 frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
819 if (frame_size <= 0 || st->codec->sample_rate <= 0)
822 *pden = st->codec->sample_rate;
829 static int is_intra_only(AVCodecContext *enc){
830 const AVCodecDescriptor *desc;
832 if(enc->codec_type != AVMEDIA_TYPE_VIDEO)
835 desc = av_codec_get_codec_descriptor(enc);
837 desc = avcodec_descriptor_get(enc->codec_id);
838 av_codec_set_codec_descriptor(enc, desc);
841 return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
845 static int has_decode_delay_been_guessed(AVStream *st)
847 if(st->codec->codec_id != AV_CODEC_ID_H264) return 1;
848 if(!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
850 #if CONFIG_H264_DECODER
851 if(st->codec->has_b_frames &&
852 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
855 if(st->codec->has_b_frames<3)
856 return st->nb_decoded_frames >= 7;
857 else if(st->codec->has_b_frames<4)
858 return st->nb_decoded_frames >= 18;
860 return st->nb_decoded_frames >= 20;
863 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
867 if (pktl == s->parse_queue_end)
868 return s->packet_buffer;
872 static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index)
874 if (s->correct_ts_overflow && st->pts_wrap_bits < 63 &&
875 st->pts_wrap_reference == AV_NOPTS_VALUE && st->first_dts != AV_NOPTS_VALUE) {
878 // reference time stamp should be 60 s before first time stamp
879 int64_t pts_wrap_reference = st->first_dts - av_rescale(60, st->time_base.den, st->time_base.num);
880 // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
881 int pts_wrap_behavior = (st->first_dts < (1LL<<st->pts_wrap_bits) - (1LL<<st->pts_wrap_bits-3)) ||
882 (st->first_dts < (1LL<<st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
883 AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET;
885 AVProgram *first_program = av_find_program_from_stream(s, NULL, stream_index);
887 if (!first_program) {
888 int default_stream_index = av_find_default_stream_index(s);
889 if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
890 for (i=0; i<s->nb_streams; i++) {
891 s->streams[i]->pts_wrap_reference = pts_wrap_reference;
892 s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
896 st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
897 st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
901 AVProgram *program = first_program;
903 if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
904 pts_wrap_reference = program->pts_wrap_reference;
905 pts_wrap_behavior = program->pts_wrap_behavior;
908 program = av_find_program_from_stream(s, program, stream_index);
911 // update every program with differing pts_wrap_reference
912 program = first_program;
914 if (program->pts_wrap_reference != pts_wrap_reference) {
915 for (i=0; i<program->nb_stream_indexes; i++) {
916 s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
917 s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
920 program->pts_wrap_reference = pts_wrap_reference;
921 program->pts_wrap_behavior = pts_wrap_behavior;
923 program = av_find_program_from_stream(s, program, stream_index);
931 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
932 int64_t dts, int64_t pts, AVPacket *pkt)
934 AVStream *st= s->streams[stream_index];
935 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
936 int64_t pts_buffer[MAX_REORDER_DELAY+1];
940 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
943 delay = st->codec->has_b_frames;
944 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
946 shift = st->first_dts - RELATIVE_TS_BASE;
948 for (i=0; i<MAX_REORDER_DELAY+1; i++)
949 pts_buffer[i] = AV_NOPTS_VALUE;
951 if (is_relative(pts))
954 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
955 if(pktl->pkt.stream_index != stream_index)
957 if(is_relative(pktl->pkt.pts))
958 pktl->pkt.pts += shift;
960 if(is_relative(pktl->pkt.dts))
961 pktl->pkt.dts += shift;
963 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
964 st->start_time= pktl->pkt.pts;
966 if(pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
967 pts_buffer[0]= pktl->pkt.pts;
968 for(i=0; i<delay && pts_buffer[i] > pts_buffer[i+1]; i++)
969 FFSWAP(int64_t, pts_buffer[i], pts_buffer[i+1]);
970 if(pktl->pkt.dts == AV_NOPTS_VALUE)
971 pktl->pkt.dts= pts_buffer[0];
975 if (update_wrap_reference(s, st, stream_index) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
976 // correct first time stamps to negative values
977 st->first_dts = wrap_timestamp(st, st->first_dts);
978 st->cur_dts = wrap_timestamp(st, st->cur_dts);
979 pkt->dts = wrap_timestamp(st, pkt->dts);
980 pkt->pts = wrap_timestamp(st, pkt->pts);
981 pts = wrap_timestamp(st, pts);
984 if (st->start_time == AV_NOPTS_VALUE)
985 st->start_time = pts;
988 static void update_initial_durations(AVFormatContext *s, AVStream *st,
989 int stream_index, int duration)
991 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
992 int64_t cur_dts= RELATIVE_TS_BASE;
994 if(st->first_dts != AV_NOPTS_VALUE){
995 cur_dts= st->first_dts;
996 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
997 if(pktl->pkt.stream_index == stream_index){
998 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
1000 cur_dts -= duration;
1003 if(pktl && pktl->pkt.dts != st->first_dts) {
1004 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %d) in the queue\n",
1005 av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
1009 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
1012 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
1013 st->first_dts = cur_dts;
1014 }else if(st->cur_dts != RELATIVE_TS_BASE)
1017 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
1018 if(pktl->pkt.stream_index != stream_index)
1020 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
1021 && !pktl->pkt.duration){
1022 pktl->pkt.dts= cur_dts;
1023 if(!st->codec->has_b_frames)
1024 pktl->pkt.pts= cur_dts;
1025 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
1026 pktl->pkt.duration = duration;
1029 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
1032 st->cur_dts= cur_dts;
1035 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
1036 AVCodecParserContext *pc, AVPacket *pkt)
1038 int num, den, presentation_delayed, delay, i;
1041 if (s->flags & AVFMT_FLAG_NOFILLIN)
1044 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1045 pkt->dts= AV_NOPTS_VALUE;
1047 if (pc && pc->pict_type == AV_PICTURE_TYPE_B
1048 && !st->codec->has_b_frames)
1049 //FIXME Set low_delay = 0 when has_b_frames = 1
1050 st->codec->has_b_frames = 1;
1052 /* do we have a video B-frame ? */
1053 delay= st->codec->has_b_frames;
1054 presentation_delayed = 0;
1056 /* XXX: need has_b_frame, but cannot get it if the codec is
1059 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1060 presentation_delayed = 1;
1062 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
1063 st->pts_wrap_bits < 63 &&
1064 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
1065 if(is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > st->cur_dts) {
1066 pkt->dts -= 1LL<<st->pts_wrap_bits;
1068 pkt->pts += 1LL<<st->pts_wrap_bits;
1071 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1072 // we take the conservative approach and discard both
1073 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1074 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1075 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1076 if(strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1077 pkt->dts= AV_NOPTS_VALUE;
1080 if (pkt->duration == 0) {
1081 ff_compute_frame_duration(&num, &den, st, pc, pkt);
1083 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1086 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1087 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1089 /* correct timestamps with byte offset if demuxers only have timestamps
1090 on packet boundaries */
1091 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1092 /* this will estimate bitrate based on this frame's duration and size */
1093 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1094 if(pkt->pts != AV_NOPTS_VALUE)
1096 if(pkt->dts != AV_NOPTS_VALUE)
1100 /* This may be redundant, but it should not hurt. */
1101 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1102 presentation_delayed = 1;
1104 av_dlog(NULL, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1105 presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1106 /* interpolate PTS and DTS if they are not present */
1107 //We skip H264 currently because delay and has_b_frames are not reliably set
1108 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){
1109 if (presentation_delayed) {
1110 /* DTS = decompression timestamp */
1111 /* PTS = presentation timestamp */
1112 if (pkt->dts == AV_NOPTS_VALUE)
1113 pkt->dts = st->last_IP_pts;
1114 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1115 if (pkt->dts == AV_NOPTS_VALUE)
1116 pkt->dts = st->cur_dts;
1118 /* this is tricky: the dts must be incremented by the duration
1119 of the frame we are displaying, i.e. the last I- or P-frame */
1120 if (st->last_IP_duration == 0)
1121 st->last_IP_duration = pkt->duration;
1122 if(pkt->dts != AV_NOPTS_VALUE)
1123 st->cur_dts = pkt->dts + st->last_IP_duration;
1124 st->last_IP_duration = pkt->duration;
1125 st->last_IP_pts= pkt->pts;
1126 /* cannot compute PTS if not present (we can compute it only
1127 by knowing the future */
1128 } else if (pkt->pts != AV_NOPTS_VALUE ||
1129 pkt->dts != AV_NOPTS_VALUE ||
1131 int duration = pkt->duration;
1133 /* presentation is not delayed : PTS and DTS are the same */
1134 if (pkt->pts == AV_NOPTS_VALUE)
1135 pkt->pts = pkt->dts;
1136 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1138 if (pkt->pts == AV_NOPTS_VALUE)
1139 pkt->pts = st->cur_dts;
1140 pkt->dts = pkt->pts;
1141 if (pkt->pts != AV_NOPTS_VALUE)
1142 st->cur_dts = pkt->pts + duration;
1146 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
1147 st->pts_buffer[0]= pkt->pts;
1148 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1149 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1150 if(pkt->dts == AV_NOPTS_VALUE)
1151 pkt->dts= st->pts_buffer[0];
1153 if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
1154 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); // this should happen on the first packet
1156 if(pkt->dts > st->cur_dts)
1157 st->cur_dts = pkt->dts;
1159 av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1160 presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1163 if (is_intra_only(st->codec))
1164 pkt->flags |= AV_PKT_FLAG_KEY;
1166 pkt->convergence_duration = pc->convergence_duration;
1169 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1172 AVPacketList *pktl = *pkt_buf;
1173 *pkt_buf = pktl->next;
1174 av_free_packet(&pktl->pkt);
1177 *pkt_buf_end = NULL;
1181 * Parse a packet, add all split parts to parse_queue
1183 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1185 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1187 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1188 AVStream *st = s->streams[stream_index];
1189 uint8_t *data = pkt ? pkt->data : NULL;
1190 int size = pkt ? pkt->size : 0;
1191 int ret = 0, got_output = 0;
1194 av_init_packet(&flush_pkt);
1197 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1198 // preserve 0-size sync packets
1199 compute_pkt_fields(s, st, st->parser, pkt);
1202 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1205 av_init_packet(&out_pkt);
1206 len = av_parser_parse2(st->parser, st->codec,
1207 &out_pkt.data, &out_pkt.size, data, size,
1208 pkt->pts, pkt->dts, pkt->pos);
1210 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1212 /* increment read pointer */
1216 got_output = !!out_pkt.size;
1221 if (pkt->side_data) {
1222 out_pkt.side_data = pkt->side_data;
1223 out_pkt.side_data_elems = pkt->side_data_elems;
1224 pkt->side_data = NULL;
1225 pkt->side_data_elems = 0;
1228 /* set the duration */
1229 out_pkt.duration = 0;
1230 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1231 if (st->codec->sample_rate > 0) {
1232 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1233 (AVRational){ 1, st->codec->sample_rate },
1237 } else if (st->codec->time_base.num != 0 &&
1238 st->codec->time_base.den != 0) {
1239 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1240 st->codec->time_base,
1245 out_pkt.stream_index = st->index;
1246 out_pkt.pts = st->parser->pts;
1247 out_pkt.dts = st->parser->dts;
1248 out_pkt.pos = st->parser->pos;
1250 if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1251 out_pkt.pos = st->parser->frame_offset;
1253 if (st->parser->key_frame == 1 ||
1254 (st->parser->key_frame == -1 &&
1255 st->parser->pict_type == AV_PICTURE_TYPE_I))
1256 out_pkt.flags |= AV_PKT_FLAG_KEY;
1258 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1259 out_pkt.flags |= AV_PKT_FLAG_KEY;
1261 compute_pkt_fields(s, st, st->parser, &out_pkt);
1263 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1264 out_pkt.buf = pkt->buf;
1266 #if FF_API_DESTRUCT_PACKET
1267 FF_DISABLE_DEPRECATION_WARNINGS
1268 out_pkt.destruct = pkt->destruct;
1269 pkt->destruct = NULL;
1270 FF_ENABLE_DEPRECATION_WARNINGS
1273 if ((ret = av_dup_packet(&out_pkt)) < 0)
1276 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1277 av_free_packet(&out_pkt);
1278 ret = AVERROR(ENOMEM);
1284 /* end of the stream => close and free the parser */
1285 if (pkt == &flush_pkt) {
1286 av_parser_close(st->parser);
1291 av_free_packet(pkt);
1295 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1296 AVPacketList **pkt_buffer_end,
1300 av_assert0(*pkt_buffer);
1303 *pkt_buffer = pktl->next;
1305 *pkt_buffer_end = NULL;
1310 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1312 int ret = 0, i, got_packet = 0;
1314 av_init_packet(pkt);
1316 while (!got_packet && !s->parse_queue) {
1320 /* read next packet */
1321 ret = ff_read_packet(s, &cur_pkt);
1323 if (ret == AVERROR(EAGAIN))
1325 /* flush the parsers */
1326 for(i = 0; i < s->nb_streams; i++) {
1328 if (st->parser && st->need_parsing)
1329 parse_packet(s, NULL, st->index);
1331 /* all remaining packets are now in parse_queue =>
1332 * really terminate parsing */
1336 st = s->streams[cur_pkt.stream_index];
1338 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1339 cur_pkt.dts != AV_NOPTS_VALUE &&
1340 cur_pkt.pts < cur_pkt.dts) {
1341 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1342 cur_pkt.stream_index,
1343 av_ts2str(cur_pkt.pts),
1344 av_ts2str(cur_pkt.dts),
1347 if (s->debug & FF_FDEBUG_TS)
1348 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1349 cur_pkt.stream_index,
1350 av_ts2str(cur_pkt.pts),
1351 av_ts2str(cur_pkt.dts),
1356 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1357 st->parser = av_parser_init(st->codec->codec_id);
1359 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1360 "%s, packets or times may be invalid.\n",
1361 avcodec_get_name(st->codec->codec_id));
1362 /* no parser available: just output the raw packets */
1363 st->need_parsing = AVSTREAM_PARSE_NONE;
1364 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1365 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1366 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1367 st->parser->flags |= PARSER_FLAG_ONCE;
1368 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1369 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1373 if (!st->need_parsing || !st->parser) {
1374 /* no parsing needed: we just output the packet as is */
1376 compute_pkt_fields(s, st, NULL, pkt);
1377 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1378 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1379 ff_reduce_index(s, st->index);
1380 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1383 } else if (st->discard < AVDISCARD_ALL) {
1384 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1388 av_free_packet(&cur_pkt);
1390 if (pkt->flags & AV_PKT_FLAG_KEY)
1391 st->skip_to_keyframe = 0;
1392 if (st->skip_to_keyframe) {
1393 av_free_packet(&cur_pkt);
1401 if (!got_packet && s->parse_queue)
1402 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1404 if(s->debug & FF_FDEBUG_TS)
1405 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1407 av_ts2str(pkt->pts),
1408 av_ts2str(pkt->dts),
1416 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1418 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1424 ret = s->packet_buffer ?
1425 read_from_packet_buffer(&s->packet_buffer, &s->packet_buffer_end, pkt) :
1426 read_frame_internal(s, pkt);
1433 AVPacketList *pktl = s->packet_buffer;
1436 AVPacket *next_pkt = &pktl->pkt;
1438 if (next_pkt->dts != AV_NOPTS_VALUE) {
1439 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1440 // last dts seen for this stream. if any of packets following
1441 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1442 int64_t last_dts = next_pkt->dts;
1443 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1444 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1445 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1446 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1447 next_pkt->pts = pktl->pkt.dts;
1449 if (last_dts != AV_NOPTS_VALUE) {
1450 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1451 last_dts = pktl->pkt.dts;
1456 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1457 // Fixing the last reference frame had none pts issue (For MXF etc).
1458 // We only do this when
1460 // 2. we are not able to resolve a pts value for current packet.
1461 // 3. the packets for this stream at the end of the files had valid dts.
1462 next_pkt->pts = last_dts + next_pkt->duration;
1464 pktl = s->packet_buffer;
1467 /* read packet from packet buffer, if there is data */
1468 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1469 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1470 ret = read_from_packet_buffer(&s->packet_buffer,
1471 &s->packet_buffer_end, pkt);
1476 ret = read_frame_internal(s, pkt);
1478 if (pktl && ret != AVERROR(EAGAIN)) {
1485 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1486 &s->packet_buffer_end)) < 0)
1487 return AVERROR(ENOMEM);
1492 st = s->streams[pkt->stream_index];
1493 if (st->skip_samples) {
1494 uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
1496 AV_WL32(p, st->skip_samples);
1497 av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
1499 st->skip_samples = 0;
1502 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1503 ff_reduce_index(s, st->index);
1504 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1507 if (is_relative(pkt->dts))
1508 pkt->dts -= RELATIVE_TS_BASE;
1509 if (is_relative(pkt->pts))
1510 pkt->pts -= RELATIVE_TS_BASE;
1515 /* XXX: suppress the packet queue */
1516 static void flush_packet_queue(AVFormatContext *s)
1518 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1519 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1520 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1522 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1525 /*******************************************************/
1528 int av_find_default_stream_index(AVFormatContext *s)
1530 int first_audio_index = -1;
1534 if (s->nb_streams <= 0)
1536 for(i = 0; i < s->nb_streams; i++) {
1538 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1539 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1542 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1543 first_audio_index = i;
1545 return first_audio_index >= 0 ? first_audio_index : 0;
1549 * Flush the frame reader.
1551 void ff_read_frame_flush(AVFormatContext *s)
1556 flush_packet_queue(s);
1558 /* for each stream, reset read state */
1559 for(i = 0; i < s->nb_streams; i++) {
1563 av_parser_close(st->parser);
1566 st->last_IP_pts = AV_NOPTS_VALUE;
1567 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1568 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1570 st->probe_packets = MAX_PROBE_PACKETS;
1572 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1573 st->pts_buffer[j]= AV_NOPTS_VALUE;
1577 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1581 for(i = 0; i < s->nb_streams; i++) {
1582 AVStream *st = s->streams[i];
1584 st->cur_dts = av_rescale(timestamp,
1585 st->time_base.den * (int64_t)ref_st->time_base.num,
1586 st->time_base.num * (int64_t)ref_st->time_base.den);
1590 void ff_reduce_index(AVFormatContext *s, int stream_index)
1592 AVStream *st= s->streams[stream_index];
1593 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1595 if((unsigned)st->nb_index_entries >= max_entries){
1597 for(i=0; 2*i<st->nb_index_entries; i++)
1598 st->index_entries[i]= st->index_entries[2*i];
1599 st->nb_index_entries= i;
1603 int ff_add_index_entry(AVIndexEntry **index_entries,
1604 int *nb_index_entries,
1605 unsigned int *index_entries_allocated_size,
1606 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1608 AVIndexEntry *entries, *ie;
1611 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1614 if(timestamp == AV_NOPTS_VALUE)
1615 return AVERROR(EINVAL);
1617 if (size < 0 || size > 0x3FFFFFFF)
1618 return AVERROR(EINVAL);
1620 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1621 timestamp -= RELATIVE_TS_BASE;
1623 entries = av_fast_realloc(*index_entries,
1624 index_entries_allocated_size,
1625 (*nb_index_entries + 1) *
1626 sizeof(AVIndexEntry));
1630 *index_entries= entries;
1632 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1635 index= (*nb_index_entries)++;
1636 ie= &entries[index];
1637 av_assert0(index==0 || ie[-1].timestamp < timestamp);
1639 ie= &entries[index];
1640 if(ie->timestamp != timestamp){
1641 if(ie->timestamp <= timestamp)
1643 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1644 (*nb_index_entries)++;
1645 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1646 distance= ie->min_distance;
1650 ie->timestamp = timestamp;
1651 ie->min_distance= distance;
1658 int av_add_index_entry(AVStream *st,
1659 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1661 timestamp = wrap_timestamp(st, timestamp);
1662 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1663 &st->index_entries_allocated_size, pos,
1664 timestamp, size, distance, flags);
1667 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1668 int64_t wanted_timestamp, int flags)
1676 //optimize appending index entries at the end
1677 if(b && entries[b-1].timestamp < wanted_timestamp)
1682 timestamp = entries[m].timestamp;
1683 if(timestamp >= wanted_timestamp)
1685 if(timestamp <= wanted_timestamp)
1688 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1690 if(!(flags & AVSEEK_FLAG_ANY)){
1691 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1692 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1701 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1704 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1705 wanted_timestamp, flags);
1708 static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
1709 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1711 int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
1712 if (stream_index >= 0)
1713 ts = wrap_timestamp(s->streams[stream_index], ts);
1717 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1719 AVInputFormat *avif= s->iformat;
1720 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1721 int64_t ts_min, ts_max, ts;
1726 if (stream_index < 0)
1729 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1732 ts_min= AV_NOPTS_VALUE;
1733 pos_limit= -1; //gcc falsely says it may be uninitialized
1735 st= s->streams[stream_index];
1736 if(st->index_entries){
1739 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1740 index= FFMAX(index, 0);
1741 e= &st->index_entries[index];
1743 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1745 ts_min= e->timestamp;
1746 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1747 pos_min, av_ts2str(ts_min));
1749 av_assert1(index==0);
1752 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1753 av_assert0(index < st->nb_index_entries);
1755 e= &st->index_entries[index];
1756 av_assert1(e->timestamp >= target_ts);
1758 ts_max= e->timestamp;
1759 pos_limit= pos_max - e->min_distance;
1760 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1761 pos_max, pos_limit, av_ts2str(ts_max));
1765 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1770 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1773 ff_read_frame_flush(s);
1774 ff_update_cur_dts(s, st, ts);
1779 int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
1780 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1783 int64_t limit, ts_max;
1784 int64_t filesize = avio_size(s->pb);
1785 int64_t pos_max = filesize - 1;
1788 pos_max = FFMAX(0, (pos_max) - step);
1789 ts_max = ff_read_timestamp(s, stream_index, &pos_max, limit, read_timestamp);
1791 }while(ts_max == AV_NOPTS_VALUE && 2*limit > step);
1792 if (ts_max == AV_NOPTS_VALUE)
1796 int64_t tmp_pos = pos_max + 1;
1797 int64_t tmp_ts = ff_read_timestamp(s, stream_index, &tmp_pos, INT64_MAX, read_timestamp);
1798 if(tmp_ts == AV_NOPTS_VALUE)
1800 av_assert0(tmp_pos > pos_max);
1803 if(tmp_pos >= filesize)
1815 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1816 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1817 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1818 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1825 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1827 if(ts_min == AV_NOPTS_VALUE){
1828 pos_min = s->data_offset;
1829 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1830 if (ts_min == AV_NOPTS_VALUE)
1834 if(ts_min >= target_ts){
1839 if(ts_max == AV_NOPTS_VALUE){
1840 if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
1845 if(ts_max <= target_ts){
1850 if(ts_min > ts_max){
1852 }else if(ts_min == ts_max){
1857 while (pos_min < pos_limit) {
1858 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1859 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1860 assert(pos_limit <= pos_max);
1863 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1864 // interpolate position (better than dichotomy)
1865 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1866 + pos_min - approximate_keyframe_distance;
1867 }else if(no_change==1){
1868 // bisection, if interpolation failed to change min or max pos last time
1869 pos = (pos_min + pos_limit)>>1;
1871 /* linear search if bisection failed, can only happen if there
1872 are very few or no keyframes between min/max */
1877 else if(pos > pos_limit)
1881 ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); //may pass pos_limit instead of -1
1886 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1887 pos_min, pos, pos_max,
1888 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1889 pos_limit, start_pos, no_change);
1890 if(ts == AV_NOPTS_VALUE){
1891 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1894 assert(ts != AV_NOPTS_VALUE);
1895 if (target_ts <= ts) {
1896 pos_limit = start_pos - 1;
1900 if (target_ts >= ts) {
1906 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1907 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1910 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1912 ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1913 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1914 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1920 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1921 int64_t pos_min, pos_max;
1923 pos_min = s->data_offset;
1924 pos_max = avio_size(s->pb) - 1;
1926 if (pos < pos_min) pos= pos_min;
1927 else if(pos > pos_max) pos= pos_max;
1929 avio_seek(s->pb, pos, SEEK_SET);
1931 s->io_repositioned = 1;
1936 static int seek_frame_generic(AVFormatContext *s,
1937 int stream_index, int64_t timestamp, int flags)
1944 st = s->streams[stream_index];
1946 index = av_index_search_timestamp(st, timestamp, flags);
1948 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1951 if(index < 0 || index==st->nb_index_entries-1){
1955 if(st->nb_index_entries){
1956 av_assert0(st->index_entries);
1957 ie= &st->index_entries[st->nb_index_entries-1];
1958 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1960 ff_update_cur_dts(s, st, ie->timestamp);
1962 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1968 read_status = av_read_frame(s, &pkt);
1969 } while (read_status == AVERROR(EAGAIN));
1970 if (read_status < 0)
1972 av_free_packet(&pkt);
1973 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1974 if(pkt.flags & AV_PKT_FLAG_KEY)
1976 if(nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS){
1977 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1982 index = av_index_search_timestamp(st, timestamp, flags);
1987 ff_read_frame_flush(s);
1988 if (s->iformat->read_seek){
1989 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1992 ie = &st->index_entries[index];
1993 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1995 ff_update_cur_dts(s, st, ie->timestamp);
2000 static int seek_frame_internal(AVFormatContext *s, int stream_index,
2001 int64_t timestamp, int flags)
2006 if (flags & AVSEEK_FLAG_BYTE) {
2007 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
2009 ff_read_frame_flush(s);
2010 return seek_frame_byte(s, stream_index, timestamp, flags);
2013 if(stream_index < 0){
2014 stream_index= av_find_default_stream_index(s);
2015 if(stream_index < 0)
2018 st= s->streams[stream_index];
2019 /* timestamp for default must be expressed in AV_TIME_BASE units */
2020 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
2023 /* first, we try the format specific seek */
2024 if (s->iformat->read_seek) {
2025 ff_read_frame_flush(s);
2026 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
2033 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
2034 ff_read_frame_flush(s);
2035 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
2036 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
2037 ff_read_frame_flush(s);
2038 return seek_frame_generic(s, stream_index, timestamp, flags);
2044 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2048 if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2049 int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2050 if ((flags & AVSEEK_FLAG_BACKWARD))
2054 return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2055 flags & ~AVSEEK_FLAG_BACKWARD);
2058 ret = seek_frame_internal(s, stream_index, timestamp, flags);
2061 ret = avformat_queue_attached_pictures(s);
2066 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
2068 if(min_ts > ts || max_ts < ts)
2070 if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2071 return AVERROR(EINVAL);
2074 flags |= AVSEEK_FLAG_ANY;
2075 flags &= ~AVSEEK_FLAG_BACKWARD;
2077 if (s->iformat->read_seek2) {
2079 ff_read_frame_flush(s);
2081 if (stream_index == -1 && s->nb_streams == 1) {
2082 AVRational time_base = s->streams[0]->time_base;
2083 ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2084 min_ts = av_rescale_rnd(min_ts, time_base.den,
2085 time_base.num * (int64_t)AV_TIME_BASE,
2086 AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
2087 max_ts = av_rescale_rnd(max_ts, time_base.den,
2088 time_base.num * (int64_t)AV_TIME_BASE,
2089 AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
2092 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
2095 ret = avformat_queue_attached_pictures(s);
2099 if(s->iformat->read_timestamp){
2100 //try to seek via read_timestamp()
2103 // Fall back on old API if new is not implemented but old is.
2104 // Note the old API has somewhat different semantics.
2105 if (s->iformat->read_seek || 1) {
2106 int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2107 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2108 if (ret<0 && ts != min_ts && max_ts != ts) {
2109 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2111 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2116 // try some generic seek like seek_frame_generic() but with new ts semantics
2117 return -1; //unreachable
2120 /*******************************************************/
2123 * Return TRUE if the stream has accurate duration in any stream.
2125 * @return TRUE if the stream has accurate duration for at least one component.
2127 static int has_duration(AVFormatContext *ic)
2132 for(i = 0;i < ic->nb_streams; i++) {
2133 st = ic->streams[i];
2134 if (st->duration != AV_NOPTS_VALUE)
2137 if (ic->duration != AV_NOPTS_VALUE)
2143 * Estimate the stream timings from the one of each components.
2145 * Also computes the global bitrate if possible.
2147 static void update_stream_timings(AVFormatContext *ic)
2149 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2150 int64_t duration, duration1, filesize;
2155 start_time = INT64_MAX;
2156 start_time_text = INT64_MAX;
2157 end_time = INT64_MIN;
2158 duration = INT64_MIN;
2159 for(i = 0;i < ic->nb_streams; i++) {
2160 st = ic->streams[i];
2161 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2162 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2163 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2164 if (start_time1 < start_time_text)
2165 start_time_text = start_time1;
2167 start_time = FFMIN(start_time, start_time1);
2168 end_time1 = AV_NOPTS_VALUE;
2169 if (st->duration != AV_NOPTS_VALUE) {
2170 end_time1 = start_time1
2171 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2172 end_time = FFMAX(end_time, end_time1);
2174 for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){
2175 if(p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2176 p->start_time = start_time1;
2177 if(p->end_time < end_time1)
2178 p->end_time = end_time1;
2181 if (st->duration != AV_NOPTS_VALUE) {
2182 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2183 duration = FFMAX(duration, duration1);
2186 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2187 start_time = start_time_text;
2188 else if(start_time > start_time_text)
2189 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2191 if (start_time != INT64_MAX) {
2192 ic->start_time = start_time;
2193 if (end_time != INT64_MIN) {
2194 if (ic->nb_programs) {
2195 for (i=0; i<ic->nb_programs; i++) {
2196 p = ic->programs[i];
2197 if(p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2198 duration = FFMAX(duration, p->end_time - p->start_time);
2201 duration = FFMAX(duration, end_time - start_time);
2204 if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2205 ic->duration = duration;
2207 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2208 /* compute the bitrate */
2209 double bitrate = (double)filesize * 8.0 * AV_TIME_BASE /
2210 (double)ic->duration;
2211 if (bitrate >= 0 && bitrate <= INT_MAX)
2212 ic->bit_rate = bitrate;
2216 static void fill_all_stream_timings(AVFormatContext *ic)
2221 update_stream_timings(ic);
2222 for(i = 0;i < ic->nb_streams; i++) {
2223 st = ic->streams[i];
2224 if (st->start_time == AV_NOPTS_VALUE) {
2225 if(ic->start_time != AV_NOPTS_VALUE)
2226 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2227 if(ic->duration != AV_NOPTS_VALUE)
2228 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2233 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2235 int64_t filesize, duration;
2236 int i, show_warning = 0;
2239 /* if bit_rate is already set, we believe it */
2240 if (ic->bit_rate <= 0) {
2242 for(i=0;i<ic->nb_streams;i++) {
2243 st = ic->streams[i];
2244 if (st->codec->bit_rate > 0) {
2245 if (INT_MAX - st->codec->bit_rate < bit_rate) {
2249 bit_rate += st->codec->bit_rate;
2252 ic->bit_rate = bit_rate;
2255 /* if duration is already set, we believe it */
2256 if (ic->duration == AV_NOPTS_VALUE &&
2257 ic->bit_rate != 0) {
2258 filesize = ic->pb ? avio_size(ic->pb) : 0;
2260 for(i = 0; i < ic->nb_streams; i++) {
2261 st = ic->streams[i];
2262 if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2263 && st->duration == AV_NOPTS_VALUE) {
2264 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2265 st->duration = duration;
2272 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2275 #define DURATION_MAX_READ_SIZE 250000LL
2276 #define DURATION_MAX_RETRY 4
2278 /* only usable for MPEG-PS streams */
2279 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2281 AVPacket pkt1, *pkt = &pkt1;
2283 int read_size, i, ret;
2285 int64_t filesize, offset, duration;
2288 /* flush packet queue */
2289 flush_packet_queue(ic);
2291 for (i=0; i<ic->nb_streams; i++) {
2292 st = ic->streams[i];
2293 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2294 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2297 av_parser_close(st->parser);
2302 /* estimate the end time (duration) */
2303 /* XXX: may need to support wrapping */
2304 filesize = ic->pb ? avio_size(ic->pb) : 0;
2305 end_time = AV_NOPTS_VALUE;
2307 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2311 avio_seek(ic->pb, offset, SEEK_SET);
2314 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2318 ret = ff_read_packet(ic, pkt);
2319 } while(ret == AVERROR(EAGAIN));
2322 read_size += pkt->size;
2323 st = ic->streams[pkt->stream_index];
2324 if (pkt->pts != AV_NOPTS_VALUE &&
2325 (st->start_time != AV_NOPTS_VALUE ||
2326 st->first_dts != AV_NOPTS_VALUE)) {
2327 duration = end_time = pkt->pts;
2328 if (st->start_time != AV_NOPTS_VALUE)
2329 duration -= st->start_time;
2331 duration -= st->first_dts;
2333 if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<=0 ||
2334 (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2335 st->duration = duration;
2336 st->info->last_duration = duration;
2339 av_free_packet(pkt);
2341 }while( end_time==AV_NOPTS_VALUE
2342 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2343 && ++retry <= DURATION_MAX_RETRY);
2345 fill_all_stream_timings(ic);
2347 avio_seek(ic->pb, old_offset, SEEK_SET);
2348 for (i=0; i<ic->nb_streams; i++) {
2350 st->cur_dts= st->first_dts;
2351 st->last_IP_pts = AV_NOPTS_VALUE;
2355 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2359 /* get the file size, if possible */
2360 if (ic->iformat->flags & AVFMT_NOFILE) {
2363 file_size = avio_size(ic->pb);
2364 file_size = FFMAX(0, file_size);
2367 if ((!strcmp(ic->iformat->name, "mpeg") ||
2368 !strcmp(ic->iformat->name, "mpegts")) &&
2369 file_size && ic->pb->seekable) {
2370 /* get accurate estimate from the PTSes */
2371 estimate_timings_from_pts(ic, old_offset);
2372 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2373 } else if (has_duration(ic)) {
2374 /* at least one component has timings - we use them for all
2376 fill_all_stream_timings(ic);
2377 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2379 /* less precise: use bitrate info */
2380 estimate_timings_from_bit_rate(ic);
2381 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2383 update_stream_timings(ic);
2387 AVStream av_unused *st;
2388 for(i = 0;i < ic->nb_streams; i++) {
2389 st = ic->streams[i];
2390 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2391 (double) st->start_time / AV_TIME_BASE,
2392 (double) st->duration / AV_TIME_BASE);
2394 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2395 (double) ic->start_time / AV_TIME_BASE,
2396 (double) ic->duration / AV_TIME_BASE,
2397 ic->bit_rate / 1000);
2401 static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2403 AVCodecContext *avctx = st->codec;
2405 #define FAIL(errmsg) do { \
2407 *errmsg_ptr = errmsg; \
2411 switch (avctx->codec_type) {
2412 case AVMEDIA_TYPE_AUDIO:
2413 if (!avctx->frame_size && determinable_frame_size(avctx))
2414 FAIL("unspecified frame size");
2415 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2416 FAIL("unspecified sample format");
2417 if (!avctx->sample_rate)
2418 FAIL("unspecified sample rate");
2419 if (!avctx->channels)
2420 FAIL("unspecified number of channels");
2421 if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2422 FAIL("no decodable DTS frames");
2424 case AVMEDIA_TYPE_VIDEO:
2426 FAIL("unspecified size");
2427 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2428 FAIL("unspecified pixel format");
2429 if (st->codec->codec_id == AV_CODEC_ID_RV30 || st->codec->codec_id == AV_CODEC_ID_RV40)
2430 if (!st->sample_aspect_ratio.num && !st->codec->sample_aspect_ratio.num && !st->codec_info_nb_frames)
2431 FAIL("no frame in rv30/40 and no sar");
2433 case AVMEDIA_TYPE_SUBTITLE:
2434 if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2435 FAIL("unspecified size");
2437 case AVMEDIA_TYPE_DATA:
2438 if(avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2441 if (avctx->codec_id == AV_CODEC_ID_NONE)
2442 FAIL("unknown codec");
2446 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2447 static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, AVDictionary **options)
2449 const AVCodec *codec;
2450 int got_picture = 1, ret = 0;
2451 AVFrame *frame = av_frame_alloc();
2452 AVSubtitle subtitle;
2453 AVPacket pkt = *avpkt;
2456 return AVERROR(ENOMEM);
2458 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2459 AVDictionary *thread_opt = NULL;
2461 codec = find_decoder(s, st, st->codec->codec_id);
2464 st->info->found_decoder = -1;
2469 /* force thread count to 1 since the h264 decoder will not extract SPS
2470 * and PPS to extradata during multi-threaded decoding */
2471 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2472 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2474 av_dict_free(&thread_opt);
2476 st->info->found_decoder = -1;
2479 st->info->found_decoder = 1;
2480 } else if (!st->info->found_decoder)
2481 st->info->found_decoder = 1;
2483 if (st->info->found_decoder < 0) {
2488 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2490 (!has_codec_parameters(st, NULL) ||
2491 !has_decode_delay_been_guessed(st) ||
2492 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2494 avcodec_get_frame_defaults(frame);
2495 switch(st->codec->codec_type) {
2496 case AVMEDIA_TYPE_VIDEO:
2497 ret = avcodec_decode_video2(st->codec, frame,
2498 &got_picture, &pkt);
2500 case AVMEDIA_TYPE_AUDIO:
2501 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2503 case AVMEDIA_TYPE_SUBTITLE:
2504 ret = avcodec_decode_subtitle2(st->codec, &subtitle,
2505 &got_picture, &pkt);
2513 st->nb_decoded_frames++;
2520 if(!pkt.data && !got_picture)
2524 avcodec_free_frame(&frame);
2528 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2530 while (tags->id != AV_CODEC_ID_NONE) {
2538 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2541 for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) {
2542 if(tag == tags[i].tag)
2545 for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) {
2546 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2549 return AV_CODEC_ID_NONE;
2552 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2556 case 32: return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2557 case 64: return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2558 default: return AV_CODEC_ID_NONE;
2563 if (sflags & (1 << (bps - 1))) {
2565 case 1: return AV_CODEC_ID_PCM_S8;
2566 case 2: return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2567 case 3: return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2568 case 4: return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2569 default: return AV_CODEC_ID_NONE;
2573 case 1: return AV_CODEC_ID_PCM_U8;
2574 case 2: return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2575 case 3: return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2576 case 4: return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2577 default: return AV_CODEC_ID_NONE;
2583 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id)
2586 if (!av_codec_get_tag2(tags, id, &tag))
2591 int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
2595 for(i=0; tags && tags[i]; i++){
2596 const AVCodecTag *codec_tags = tags[i];
2597 while (codec_tags->id != AV_CODEC_ID_NONE) {
2598 if (codec_tags->id == id) {
2599 *tag = codec_tags->tag;
2608 enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2611 for(i=0; tags && tags[i]; i++){
2612 enum AVCodecID id= ff_codec_get_id(tags[i], tag);
2613 if(id!=AV_CODEC_ID_NONE) return id;
2615 return AV_CODEC_ID_NONE;
2618 static void compute_chapters_end(AVFormatContext *s)
2621 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2623 for (i = 0; i < s->nb_chapters; i++)
2624 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2625 AVChapter *ch = s->chapters[i];
2626 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2629 for (j = 0; j < s->nb_chapters; j++) {
2630 AVChapter *ch1 = s->chapters[j];
2631 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2632 if (j != i && next_start > ch->start && next_start < end)
2635 ch->end = (end == INT64_MAX) ? ch->start : end;
2639 static int get_std_framerate(int i){
2640 if(i<60*12) return (i+1)*1001;
2641 else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2645 * Is the time base unreliable.
2646 * This is a heuristic to balance between quick acceptance of the values in
2647 * the headers vs. some extra checks.
2648 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2649 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2650 * And there are "variable" fps files this needs to detect as well.
2652 static int tb_unreliable(AVCodecContext *c){
2653 if( c->time_base.den >= 101L*c->time_base.num
2654 || c->time_base.den < 5L*c->time_base.num
2655 /* || c->codec_tag == AV_RL32("DIVX")
2656 || c->codec_tag == AV_RL32("XVID")*/
2657 || c->codec_tag == AV_RL32("mp4v")
2658 || c->codec_id == AV_CODEC_ID_MPEG2VIDEO
2659 || c->codec_id == AV_CODEC_ID_H264
2665 #if FF_API_FORMAT_PARAMETERS
2666 int av_find_stream_info(AVFormatContext *ic)
2668 return avformat_find_stream_info(ic, NULL);
2672 int ff_alloc_extradata(AVCodecContext *avctx, int size)
2676 if (size < 0 || size >= INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
2677 avctx->extradata_size = 0;
2678 return AVERROR(EINVAL);
2680 avctx->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
2681 if (avctx->extradata) {
2682 memset(avctx->extradata + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2683 avctx->extradata_size = size;
2686 avctx->extradata_size = 0;
2687 ret = AVERROR(ENOMEM);
2692 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2694 int i, count, ret = 0, j;
2697 AVPacket pkt1, *pkt;
2698 int64_t old_offset = avio_tell(ic->pb);
2699 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2700 int flush_codecs = ic->probesize > 0;
2703 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2705 for(i=0;i<ic->nb_streams;i++) {
2706 const AVCodec *codec;
2707 AVDictionary *thread_opt = NULL;
2708 st = ic->streams[i];
2710 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2711 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2712 /* if(!st->time_base.num)
2714 if(!st->codec->time_base.num)
2715 st->codec->time_base= st->time_base;
2717 //only for the split stuff
2718 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2719 st->parser = av_parser_init(st->codec->codec_id);
2721 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2722 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2723 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2724 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2726 } else if (st->need_parsing) {
2727 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2728 "%s, packets or times may be invalid.\n",
2729 avcodec_get_name(st->codec->codec_id));
2732 codec = find_decoder(ic, st, st->codec->codec_id);
2734 /* force thread count to 1 since the h264 decoder will not extract SPS
2735 * and PPS to extradata during multi-threaded decoding */
2736 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2738 /* Ensure that subtitle_header is properly set. */
2739 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2740 && codec && !st->codec->codec)
2741 avcodec_open2(st->codec, codec, options ? &options[i]
2744 //try to just open decoders, in case this is enough to get parameters
2745 if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
2746 if (codec && !st->codec->codec)
2747 avcodec_open2(st->codec, codec, options ? &options[i]
2751 av_dict_free(&thread_opt);
2754 for (i=0; i<ic->nb_streams; i++) {
2755 #if FF_API_R_FRAME_RATE
2756 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2758 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2759 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2765 if (ff_check_interrupt(&ic->interrupt_callback)){
2767 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2771 /* check if one codec still needs to be handled */
2772 for(i=0;i<ic->nb_streams;i++) {
2773 int fps_analyze_framecount = 20;
2775 st = ic->streams[i];
2776 if (!has_codec_parameters(st, NULL))
2778 /* if the timebase is coarse (like the usual millisecond precision
2779 of mkv), we need to analyze more frames to reliably arrive at
2781 if (av_q2d(st->time_base) > 0.0005)
2782 fps_analyze_framecount *= 2;
2783 if (ic->fps_probe_size >= 0)
2784 fps_analyze_framecount = ic->fps_probe_size;
2785 if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2786 fps_analyze_framecount = 0;
2787 /* variable fps and no guess at the real fps */
2788 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2789 && st->info->duration_count < fps_analyze_framecount
2790 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2792 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2794 if (st->first_dts == AV_NOPTS_VALUE &&
2795 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2796 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2799 if (i == ic->nb_streams) {
2800 /* NOTE: if the format has no header, then we need to read
2801 some packets to get most of the streams, so we cannot
2803 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2804 /* if we found the info for all the codecs, we can stop */
2806 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2811 /* we did not get all the codec info, but we read too much data */
2812 if (read_size >= ic->probesize) {
2814 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit of %d bytes reached\n", ic->probesize);
2815 for (i = 0; i < ic->nb_streams; i++)
2816 if (!ic->streams[i]->r_frame_rate.num &&
2817 ic->streams[i]->info->duration_count <= 1 &&
2818 strcmp(ic->iformat->name, "image2"))
2819 av_log(ic, AV_LOG_WARNING,
2820 "Stream #%d: not enough frames to estimate rate; "
2821 "consider increasing probesize\n", i);
2825 /* NOTE: a new stream can be added there if no header in file
2826 (AVFMTCTX_NOHEADER) */
2827 ret = read_frame_internal(ic, &pkt1);
2828 if (ret == AVERROR(EAGAIN))
2836 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2837 free_packet_buffer(&ic->packet_buffer, &ic->packet_buffer_end);
2839 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2840 &ic->packet_buffer_end);
2842 ret = AVERROR(ENOMEM);
2843 goto find_stream_info_err;
2845 if ((ret = av_dup_packet(pkt)) < 0)
2846 goto find_stream_info_err;
2849 st = ic->streams[pkt->stream_index];
2850 if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC))
2851 read_size += pkt->size;
2853 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2854 /* check for non-increasing dts */
2855 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2856 st->info->fps_last_dts >= pkt->dts) {
2857 av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: "
2858 "packet %d with DTS %"PRId64", packet %d with DTS "
2859 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2860 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2861 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2863 /* check for a discontinuity in dts - if the difference in dts
2864 * is more than 1000 times the average packet duration in the sequence,
2865 * we treat it as a discontinuity */
2866 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2867 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2868 (pkt->dts - st->info->fps_last_dts) / 1000 >
2869 (st->info->fps_last_dts - st->info->fps_first_dts) / (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2870 av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: "
2871 "packet %d with DTS %"PRId64", packet %d with DTS "
2872 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2873 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2874 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2877 /* update stored dts values */
2878 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2879 st->info->fps_first_dts = pkt->dts;
2880 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2882 st->info->fps_last_dts = pkt->dts;
2883 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2885 if (st->codec_info_nb_frames>1) {
2887 if (st->time_base.den > 0)
2888 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2889 if (st->avg_frame_rate.num > 0)
2890 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
2893 && st->codec_info_nb_frames>30
2894 && st->info->fps_first_dts != AV_NOPTS_VALUE
2895 && st->info->fps_last_dts != AV_NOPTS_VALUE)
2896 t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q));
2898 if (t >= ic->max_analyze_duration) {
2899 av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %d reached at %"PRId64" microseconds\n", ic->max_analyze_duration, t);
2902 if (pkt->duration) {
2903 st->info->codec_info_duration += pkt->duration;
2904 st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame==2 ? st->parser->repeat_pict + 1 : 2;
2907 #if FF_API_R_FRAME_RATE
2909 int64_t last = st->info->last_dts;
2911 if( pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last
2912 && pkt->dts - (uint64_t)last < INT64_MAX){
2913 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2914 int64_t duration= pkt->dts - last;
2916 if (!st->info->duration_error)
2917 st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
2918 if (!st->info->duration_error)
2919 return AVERROR(ENOMEM);
2921 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2922 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2923 for (i=0; i<MAX_STD_TIMEBASES; i++) {
2924 int framerate= get_std_framerate(i);
2925 double sdts= dts*framerate/(1001*12);
2927 int64_t ticks= llrint(sdts+j*0.5);
2928 double error= sdts - ticks + j*0.5;
2929 st->info->duration_error[j][0][i] += error;
2930 st->info->duration_error[j][1][i] += error*error;
2933 st->info->duration_count++;
2934 // ignore the first 4 values, they might have some random jitter
2935 if (st->info->duration_count > 3 && is_relative(pkt->dts) == is_relative(last))
2936 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2938 if (pkt->dts != AV_NOPTS_VALUE)
2939 st->info->last_dts = pkt->dts;
2942 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2943 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2944 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2945 if (ff_alloc_extradata(st->codec, i))
2946 return AVERROR(ENOMEM);
2947 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2951 /* if still no information, we try to open the codec and to
2952 decompress the frame. We try to avoid that in most cases as
2953 it takes longer and uses more memory. For MPEG-4, we need to
2954 decompress for QuickTime.
2956 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2957 least one frame of codec data, this makes sure the codec initializes
2958 the channel configuration and does not only trust the values from the container.
2960 try_decode_frame(ic, st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2962 st->codec_info_nb_frames++;
2967 AVPacket empty_pkt = { 0 };
2969 av_init_packet(&empty_pkt);
2971 for(i=0;i<ic->nb_streams;i++) {
2973 st = ic->streams[i];
2975 /* flush the decoders */
2976 if (st->info->found_decoder == 1) {
2978 err = try_decode_frame(ic, st, &empty_pkt,
2979 (options && i < orig_nb_streams) ?
2980 &options[i] : NULL);
2981 } while (err > 0 && !has_codec_parameters(st, NULL));
2984 av_log(ic, AV_LOG_INFO,
2985 "decoding for stream %d failed\n", st->index);
2991 // close codecs which were opened in try_decode_frame()
2992 for(i=0;i<ic->nb_streams;i++) {
2993 st = ic->streams[i];
2994 avcodec_close(st->codec);
2996 for(i=0;i<ic->nb_streams;i++) {
2997 st = ic->streams[i];
2998 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2999 if(st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
3000 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
3001 if (avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
3002 st->codec->codec_tag= tag;
3005 /* estimate average framerate if not set by demuxer */
3006 if (st->info->codec_info_duration_fields && !st->avg_frame_rate.num && st->info->codec_info_duration) {
3008 double best_error = 0.01;
3010 if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2||
3011 st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||
3012 st->info->codec_info_duration < 0)
3014 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3015 st->info->codec_info_duration_fields*(int64_t)st->time_base.den,
3016 st->info->codec_info_duration*2*(int64_t)st->time_base.num, 60000);
3018 /* round guessed framerate to a "standard" framerate if it's
3019 * within 1% of the original estimate*/
3020 for (j = 1; j < MAX_STD_TIMEBASES; j++) {
3021 AVRational std_fps = { get_std_framerate(j), 12*1001 };
3022 double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1);
3024 if (error < best_error) {
3026 best_fps = std_fps.num;
3030 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3031 best_fps, 12*1001, INT_MAX);
3034 // the check for tb_unreliable() is not completely correct, since this is not about handling
3035 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
3036 // ipmovie.c produces.
3037 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
3038 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
3039 if (st->info->duration_count>1 && !st->r_frame_rate.num
3040 && tb_unreliable(st->codec)) {
3042 double best_error= 0.01;
3044 for (j=0; j<MAX_STD_TIMEBASES; j++) {
3047 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
3049 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
3052 int n= st->info->duration_count;
3053 double a= st->info->duration_error[k][0][j] / n;
3054 double error= st->info->duration_error[k][1][j]/n - a*a;
3056 if(error < best_error && best_error> 0.000000001){
3058 num = get_std_framerate(j);
3061 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
3064 // do not increase frame rate by more than 1 % in order to match a standard rate.
3065 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
3066 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
3069 if (!st->r_frame_rate.num){
3070 if( st->codec->time_base.den * (int64_t)st->time_base.num
3071 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
3072 st->r_frame_rate.num = st->codec->time_base.den;
3073 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
3075 st->r_frame_rate.num = st->time_base.den;
3076 st->r_frame_rate.den = st->time_base.num;
3079 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
3080 if(!st->codec->bits_per_coded_sample)
3081 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
3082 // set stream disposition based on audio service type
3083 switch (st->codec->audio_service_type) {
3084 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
3085 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
3086 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
3087 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
3088 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
3089 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
3090 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
3091 st->disposition = AV_DISPOSITION_COMMENT; break;
3092 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
3093 st->disposition = AV_DISPOSITION_KARAOKE; break;
3099 estimate_timings(ic, old_offset);
3101 if (ret >= 0 && ic->nb_streams)
3102 ret = -1; /* we could not have all the codec parameters before EOF */
3103 for(i=0;i<ic->nb_streams;i++) {
3105 st = ic->streams[i];
3106 if (!has_codec_parameters(st, &errmsg)) {
3108 avcodec_string(buf, sizeof(buf), st->codec, 0);
3109 av_log(ic, AV_LOG_WARNING,
3110 "Could not find codec parameters for stream %d (%s): %s\n"
3111 "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
3118 compute_chapters_end(ic);
3120 find_stream_info_err:
3121 for (i=0; i < ic->nb_streams; i++) {
3122 st = ic->streams[i];
3123 if (ic->streams[i]->codec && ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
3124 ic->streams[i]->codec->thread_count = 0;
3126 av_freep(&st->info->duration_error);
3127 av_freep(&ic->streams[i]->info);
3130 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
3134 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
3138 for (i = 0; i < ic->nb_programs; i++) {
3139 if (ic->programs[i] == last) {
3143 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3144 if (ic->programs[i]->stream_index[j] == s)
3145 return ic->programs[i];
3151 int av_find_best_stream(AVFormatContext *ic,
3152 enum AVMediaType type,
3153 int wanted_stream_nb,
3155 AVCodec **decoder_ret,
3158 int i, nb_streams = ic->nb_streams;
3159 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3160 unsigned *program = NULL;
3161 AVCodec *decoder = NULL, *best_decoder = NULL;
3163 if (related_stream >= 0 && wanted_stream_nb < 0) {
3164 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3166 program = p->stream_index;
3167 nb_streams = p->nb_stream_indexes;
3170 for (i = 0; i < nb_streams; i++) {
3171 int real_stream_index = program ? program[i] : i;
3172 AVStream *st = ic->streams[real_stream_index];
3173 AVCodecContext *avctx = st->codec;
3174 if (avctx->codec_type != type)
3176 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3178 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
3181 decoder = find_decoder(ic, st, st->codec->codec_id);
3184 ret = AVERROR_DECODER_NOT_FOUND;
3188 count = st->codec_info_nb_frames;
3189 bitrate = avctx->bit_rate;
3190 multiframe = FFMIN(5, count);
3191 if ((best_multiframe > multiframe) ||
3192 (best_multiframe == multiframe && best_bitrate > bitrate) ||
3193 (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3196 best_bitrate = bitrate;
3197 best_multiframe = multiframe;
3198 ret = real_stream_index;
3199 best_decoder = decoder;
3200 if (program && i == nb_streams - 1 && ret < 0) {
3202 nb_streams = ic->nb_streams;
3203 i = 0; /* no related stream found, try again with everything */
3207 *decoder_ret = best_decoder;
3211 /*******************************************************/
3213 int av_read_play(AVFormatContext *s)
3215 if (s->iformat->read_play)
3216 return s->iformat->read_play(s);
3218 return avio_pause(s->pb, 0);
3219 return AVERROR(ENOSYS);
3222 int av_read_pause(AVFormatContext *s)
3224 if (s->iformat->read_pause)
3225 return s->iformat->read_pause(s);
3227 return avio_pause(s->pb, 1);
3228 return AVERROR(ENOSYS);
3231 void ff_free_stream(AVFormatContext *s, AVStream *st){
3232 av_assert0(s->nb_streams>0);
3233 av_assert0(s->streams[ s->nb_streams-1 ] == st);
3236 av_parser_close(st->parser);
3238 if (st->attached_pic.data)
3239 av_free_packet(&st->attached_pic);
3240 av_dict_free(&st->metadata);
3241 av_freep(&st->probe_data.buf);
3242 av_freep(&st->index_entries);
3243 av_freep(&st->codec->extradata);
3244 av_freep(&st->codec->subtitle_header);
3245 av_freep(&st->codec);
3246 av_freep(&st->priv_data);
3248 av_freep(&st->info->duration_error);
3249 av_freep(&st->info);
3250 av_freep(&s->streams[ --s->nb_streams ]);
3253 void avformat_free_context(AVFormatContext *s)
3261 if (s->iformat && s->iformat->priv_class && s->priv_data)
3262 av_opt_free(s->priv_data);
3264 for(i=s->nb_streams-1; i>=0; i--) {
3265 ff_free_stream(s, s->streams[i]);
3267 for(i=s->nb_programs-1; i>=0; i--) {
3268 av_dict_free(&s->programs[i]->metadata);
3269 av_freep(&s->programs[i]->stream_index);
3270 av_freep(&s->programs[i]);
3272 av_freep(&s->programs);
3273 av_freep(&s->priv_data);
3274 while(s->nb_chapters--) {
3275 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
3276 av_freep(&s->chapters[s->nb_chapters]);
3278 av_freep(&s->chapters);
3279 av_dict_free(&s->metadata);
3280 av_freep(&s->streams);
3284 #if FF_API_CLOSE_INPUT_FILE
3285 void av_close_input_file(AVFormatContext *s)
3287 avformat_close_input(&s);
3291 void avformat_close_input(AVFormatContext **ps)
3302 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
3303 (s->flags & AVFMT_FLAG_CUSTOM_IO))
3306 flush_packet_queue(s);
3309 if (s->iformat->read_close)
3310 s->iformat->read_close(s);
3313 avformat_free_context(s);
3320 #if FF_API_NEW_STREAM
3321 AVStream *av_new_stream(AVFormatContext *s, int id)
3323 AVStream *st = avformat_new_stream(s, NULL);
3330 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
3336 if (s->nb_streams >= INT_MAX/sizeof(*streams))
3338 streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
3341 s->streams = streams;
3343 st = av_mallocz(sizeof(AVStream));
3346 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3350 st->info->last_dts = AV_NOPTS_VALUE;
3352 st->codec = avcodec_alloc_context3(c);
3354 /* no default bitrate if decoding */
3355 st->codec->bit_rate = 0;
3357 st->index = s->nb_streams;
3358 st->start_time = AV_NOPTS_VALUE;
3359 st->duration = AV_NOPTS_VALUE;
3360 /* we set the current DTS to 0 so that formats without any timestamps
3361 but durations get some timestamps, formats with some unknown
3362 timestamps have their first few packets buffered and the
3363 timestamps corrected before they are returned to the user */
3364 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3365 st->first_dts = AV_NOPTS_VALUE;
3366 st->probe_packets = MAX_PROBE_PACKETS;
3367 st->pts_wrap_reference = AV_NOPTS_VALUE;
3368 st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3370 /* default pts setting is MPEG-like */
3371 avpriv_set_pts_info(st, 33, 1, 90000);
3372 st->last_IP_pts = AV_NOPTS_VALUE;
3373 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3374 st->pts_buffer[i]= AV_NOPTS_VALUE;
3376 st->sample_aspect_ratio = (AVRational){0,1};
3378 #if FF_API_R_FRAME_RATE
3379 st->info->last_dts = AV_NOPTS_VALUE;
3381 st->info->fps_first_dts = AV_NOPTS_VALUE;
3382 st->info->fps_last_dts = AV_NOPTS_VALUE;
3384 s->streams[s->nb_streams++] = st;
3388 AVProgram *av_new_program(AVFormatContext *ac, int id)
3390 AVProgram *program=NULL;
3393 av_dlog(ac, "new_program: id=0x%04x\n", id);
3395 for(i=0; i<ac->nb_programs; i++)
3396 if(ac->programs[i]->id == id)
3397 program = ac->programs[i];
3400 program = av_mallocz(sizeof(AVProgram));
3403 dynarray_add(&ac->programs, &ac->nb_programs, program);
3404 program->discard = AVDISCARD_NONE;
3407 program->pts_wrap_reference = AV_NOPTS_VALUE;
3408 program->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3410 program->start_time =
3411 program->end_time = AV_NOPTS_VALUE;
3416 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3418 AVChapter *chapter = NULL;
3421 for(i=0; i<s->nb_chapters; i++)
3422 if(s->chapters[i]->id == id)
3423 chapter = s->chapters[i];
3426 chapter= av_mallocz(sizeof(AVChapter));
3429 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3431 av_dict_set(&chapter->metadata, "title", title, 0);
3433 chapter->time_base= time_base;
3434 chapter->start = start;
3440 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3443 AVProgram *program=NULL;
3446 if (idx >= ac->nb_streams) {
3447 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3451 for(i=0; i<ac->nb_programs; i++){
3452 if(ac->programs[i]->id != progid)
3454 program = ac->programs[i];
3455 for(j=0; j<program->nb_stream_indexes; j++)
3456 if(program->stream_index[j] == idx)
3459 tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int));
3462 program->stream_index = tmp;
3463 program->stream_index[program->nb_stream_indexes++] = idx;
3468 static void print_fps(double d, const char *postfix){
3469 uint64_t v= lrintf(d*100);
3470 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3471 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3472 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3475 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3477 if(m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))){
3478 AVDictionaryEntry *tag=NULL;
3480 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3481 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3482 if(strcmp("language", tag->key)){
3483 const char *p = tag->value;
3484 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3487 size_t len = strcspn(p, "\x8\xa\xb\xc\xd");
3488 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3489 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3491 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3492 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3495 av_log(ctx, AV_LOG_INFO, "\n");
3501 /* "user interface" functions */
3502 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3505 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3506 AVStream *st = ic->streams[i];
3507 int g = av_gcd(st->time_base.num, st->time_base.den);
3508 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3509 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3510 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3511 /* the pid is an important information, so we display it */
3512 /* XXX: add a generic system */
3513 if (flags & AVFMT_SHOW_IDS)
3514 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3516 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3517 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3518 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3519 if (st->sample_aspect_ratio.num && // default
3520 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3521 AVRational display_aspect_ratio;
3522 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3523 st->codec->width*st->sample_aspect_ratio.num,
3524 st->codec->height*st->sample_aspect_ratio.den,
3526 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3527 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3528 display_aspect_ratio.num, display_aspect_ratio.den);
3530 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3531 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3532 print_fps(av_q2d(st->avg_frame_rate), "fps");
3533 #if FF_API_R_FRAME_RATE
3534 if(st->r_frame_rate.den && st->r_frame_rate.num)
3535 print_fps(av_q2d(st->r_frame_rate), "tbr");
3537 if(st->time_base.den && st->time_base.num)
3538 print_fps(1/av_q2d(st->time_base), "tbn");
3539 if(st->codec->time_base.den && st->codec->time_base.num)
3540 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3542 if (st->disposition & AV_DISPOSITION_DEFAULT)
3543 av_log(NULL, AV_LOG_INFO, " (default)");
3544 if (st->disposition & AV_DISPOSITION_DUB)
3545 av_log(NULL, AV_LOG_INFO, " (dub)");
3546 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3547 av_log(NULL, AV_LOG_INFO, " (original)");
3548 if (st->disposition & AV_DISPOSITION_COMMENT)
3549 av_log(NULL, AV_LOG_INFO, " (comment)");
3550 if (st->disposition & AV_DISPOSITION_LYRICS)
3551 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3552 if (st->disposition & AV_DISPOSITION_KARAOKE)
3553 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3554 if (st->disposition & AV_DISPOSITION_FORCED)
3555 av_log(NULL, AV_LOG_INFO, " (forced)");
3556 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3557 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3558 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3559 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3560 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3561 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3562 av_log(NULL, AV_LOG_INFO, "\n");
3563 dump_metadata(NULL, st->metadata, " ");
3566 void av_dump_format(AVFormatContext *ic,
3572 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3573 if (ic->nb_streams && !printed)
3576 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3577 is_output ? "Output" : "Input",
3579 is_output ? ic->oformat->name : ic->iformat->name,
3580 is_output ? "to" : "from", url);
3581 dump_metadata(NULL, ic->metadata, " ");
3583 av_log(NULL, AV_LOG_INFO, " Duration: ");
3584 if (ic->duration != AV_NOPTS_VALUE) {
3585 int hours, mins, secs, us;
3586 int64_t duration = ic->duration + 5000;
3587 secs = duration / AV_TIME_BASE;
3588 us = duration % AV_TIME_BASE;
3593 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3594 (100 * us) / AV_TIME_BASE);
3596 av_log(NULL, AV_LOG_INFO, "N/A");
3598 if (ic->start_time != AV_NOPTS_VALUE) {
3600 av_log(NULL, AV_LOG_INFO, ", start: ");
3601 secs = ic->start_time / AV_TIME_BASE;
3602 us = abs(ic->start_time % AV_TIME_BASE);
3603 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3604 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3606 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3608 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3610 av_log(NULL, AV_LOG_INFO, "N/A");
3612 av_log(NULL, AV_LOG_INFO, "\n");
3614 for (i = 0; i < ic->nb_chapters; i++) {
3615 AVChapter *ch = ic->chapters[i];
3616 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3617 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3618 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3620 dump_metadata(NULL, ch->metadata, " ");
3622 if(ic->nb_programs) {
3623 int j, k, total = 0;
3624 for(j=0; j<ic->nb_programs; j++) {
3625 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3627 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3628 name ? name->value : "");
3629 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3630 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3631 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3632 printed[ic->programs[j]->stream_index[k]] = 1;
3634 total += ic->programs[j]->nb_stream_indexes;
3636 if (total < ic->nb_streams)
3637 av_log(NULL, AV_LOG_INFO, " No Program\n");
3639 for(i=0;i<ic->nb_streams;i++)
3641 dump_stream_format(ic, i, index, is_output);
3646 uint64_t ff_ntp_time(void)
3648 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3651 int av_get_frame_filename(char *buf, int buf_size,
3652 const char *path, int number)
3655 char *q, buf1[20], c;
3656 int nd, len, percentd_found;
3668 while (av_isdigit(*p)) {
3669 nd = nd * 10 + *p++ - '0';
3672 } while (av_isdigit(c));
3681 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3683 if ((q - buf + len) > buf_size - 1)
3685 memcpy(q, buf1, len);
3693 if ((q - buf) < buf_size - 1)
3697 if (!percentd_found)
3706 static void hex_dump_internal(void *avcl, FILE *f, int level,
3707 const uint8_t *buf, int size)
3710 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3712 for(i=0;i<size;i+=16) {
3719 PRINT(" %02x", buf[i+j]);
3724 for(j=0;j<len;j++) {
3726 if (c < ' ' || c > '~')
3735 void av_hex_dump(FILE *f, const uint8_t *buf, int size)
3737 hex_dump_internal(NULL, f, 0, buf, size);
3740 void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
3742 hex_dump_internal(avcl, NULL, level, buf, size);
3745 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3747 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3748 PRINT("stream #%d:\n", pkt->stream_index);
3749 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3750 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3751 /* DTS is _always_ valid after av_read_frame() */
3753 if (pkt->dts == AV_NOPTS_VALUE)
3756 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3757 /* PTS may not be known if B-frames are present. */
3759 if (pkt->pts == AV_NOPTS_VALUE)
3762 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3764 PRINT(" size=%d\n", pkt->size);
3767 av_hex_dump(f, pkt->data, pkt->size);
3770 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3772 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3775 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3778 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3781 void av_url_split(char *proto, int proto_size,
3782 char *authorization, int authorization_size,
3783 char *hostname, int hostname_size,
3785 char *path, int path_size,
3788 const char *p, *ls, *ls2, *at, *at2, *col, *brk;
3790 if (port_ptr) *port_ptr = -1;
3791 if (proto_size > 0) proto[0] = 0;
3792 if (authorization_size > 0) authorization[0] = 0;
3793 if (hostname_size > 0) hostname[0] = 0;
3794 if (path_size > 0) path[0] = 0;
3796 /* parse protocol */
3797 if ((p = strchr(url, ':'))) {
3798 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3803 /* no protocol means plain filename */
3804 av_strlcpy(path, url, path_size);
3808 /* separate path from hostname */
3809 ls = strchr(p, '/');
3810 ls2 = strchr(p, '?');
3814 ls = FFMIN(ls, ls2);
3816 av_strlcpy(path, ls, path_size);
3818 ls = &p[strlen(p)]; // XXX
3820 /* the rest is hostname, use that to parse auth/port */
3822 /* authorization (user[:pass]@hostname) */
3824 while ((at = strchr(p, '@')) && at < ls) {
3825 av_strlcpy(authorization, at2,
3826 FFMIN(authorization_size, at + 1 - at2));
3827 p = at + 1; /* skip '@' */
3830 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3832 av_strlcpy(hostname, p + 1,
3833 FFMIN(hostname_size, brk - p));
3834 if (brk[1] == ':' && port_ptr)
3835 *port_ptr = atoi(brk + 2);
3836 } else if ((col = strchr(p, ':')) && col < ls) {
3837 av_strlcpy(hostname, p,
3838 FFMIN(col + 1 - p, hostname_size));
3839 if (port_ptr) *port_ptr = atoi(col + 1);
3841 av_strlcpy(hostname, p,
3842 FFMIN(ls + 1 - p, hostname_size));
3846 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3849 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3852 'C', 'D', 'E', 'F' };
3853 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3856 'c', 'd', 'e', 'f' };
3857 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3859 for(i = 0; i < s; i++) {
3860 buff[i * 2] = hex_table[src[i] >> 4];
3861 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3867 int ff_hex_to_data(uint8_t *data, const char *p)
3874 p += strspn(p, SPACE_CHARS);
3877 c = av_toupper((unsigned char) *p++);
3878 if (c >= '0' && c <= '9')
3880 else if (c >= 'A' && c <= 'F')
3895 #if FF_API_SET_PTS_INFO
3896 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3897 unsigned int pts_num, unsigned int pts_den)
3899 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
3903 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3904 unsigned int pts_num, unsigned int pts_den)
3907 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
3908 if(new_tb.num != pts_num)
3909 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
3911 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3913 if(new_tb.num <= 0 || new_tb.den <= 0) {
3914 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
3917 s->time_base = new_tb;
3918 av_codec_set_pkt_timebase(s->codec, new_tb);
3919 s->pts_wrap_bits = pts_wrap_bits;
3922 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3925 const char *ptr = str;
3927 /* Parse key=value pairs. */
3930 char *dest = NULL, *dest_end;
3931 int key_len, dest_len = 0;
3933 /* Skip whitespace and potential commas. */
3934 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3941 if (!(ptr = strchr(key, '=')))
3944 key_len = ptr - key;
3946 callback_get_buf(context, key, key_len, &dest, &dest_len);
3947 dest_end = dest + dest_len - 1;
3951 while (*ptr && *ptr != '\"') {
3955 if (dest && dest < dest_end)
3959 if (dest && dest < dest_end)
3967 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3968 if (dest && dest < dest_end)
3976 int ff_find_stream_index(AVFormatContext *s, int id)
3979 for (i = 0; i < s->nb_streams; i++) {
3980 if (s->streams[i]->id == id)
3986 int64_t ff_iso8601_to_unix_time(const char *datestr)
3988 struct tm time1 = {0}, time2 = {0};
3990 ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1);
3991 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2);
3993 return av_timegm(&time2);
3995 return av_timegm(&time1);
3998 int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance)
4001 if (ofmt->query_codec)
4002 return ofmt->query_codec(codec_id, std_compliance);
4003 else if (ofmt->codec_tag)
4004 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4005 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4006 codec_id == ofmt->subtitle_codec)
4009 return AVERROR_PATCHWELCOME;
4012 int avformat_network_init(void)
4016 ff_network_inited_globally = 1;
4017 if ((ret = ff_network_init()) < 0)
4024 int avformat_network_deinit(void)
4033 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4034 uint64_t channel_layout, int32_t sample_rate,
4035 int32_t width, int32_t height)
4041 return AVERROR(EINVAL);
4044 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4046 if (channel_layout) {
4048 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4052 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4054 if (width || height) {
4056 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4058 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4060 return AVERROR(ENOMEM);
4061 bytestream_put_le32(&data, flags);
4063 bytestream_put_le32(&data, channels);
4065 bytestream_put_le64(&data, channel_layout);
4067 bytestream_put_le32(&data, sample_rate);
4068 if (width || height) {
4069 bytestream_put_le32(&data, width);
4070 bytestream_put_le32(&data, height);
4075 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4077 AVRational undef = {0, 1};
4078 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4079 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4080 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4082 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4083 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4084 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4085 stream_sample_aspect_ratio = undef;
4087 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4088 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4089 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4090 frame_sample_aspect_ratio = undef;
4092 if (stream_sample_aspect_ratio.num)
4093 return stream_sample_aspect_ratio;
4095 return frame_sample_aspect_ratio;
4098 AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
4100 AVRational fr = st->r_frame_rate;
4102 if (st->codec->ticks_per_frame > 1) {
4103 AVRational codec_fr = av_inv_q(st->codec->time_base);
4104 AVRational avg_fr = st->avg_frame_rate;
4105 codec_fr.den *= st->codec->ticks_per_frame;
4106 if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
4107 && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1)
4114 int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
4117 if (*spec <= '9' && *spec >= '0') /* opt:index */
4118 return strtol(spec, NULL, 0) == st->index;
4119 else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4120 *spec == 't') { /* opt:[vasdt] */
4121 enum AVMediaType type;
4124 case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4125 case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4126 case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4127 case 'd': type = AVMEDIA_TYPE_DATA; break;
4128 case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4129 default: av_assert0(0);
4131 if (type != st->codec->codec_type)
4133 if (*spec++ == ':') { /* possibly followed by :index */
4134 int i, index = strtol(spec, NULL, 0);
4135 for (i = 0; i < s->nb_streams; i++)
4136 if (s->streams[i]->codec->codec_type == type && index-- == 0)
4137 return i == st->index;
4141 } else if (*spec == 'p' && *(spec + 1) == ':') {
4145 prog_id = strtol(spec, &endptr, 0);
4146 for (i = 0; i < s->nb_programs; i++) {
4147 if (s->programs[i]->id != prog_id)
4150 if (*endptr++ == ':') {
4151 int stream_idx = strtol(endptr, NULL, 0);
4152 return stream_idx >= 0 &&
4153 stream_idx < s->programs[i]->nb_stream_indexes &&
4154 st->index == s->programs[i]->stream_index[stream_idx];
4157 for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
4158 if (st->index == s->programs[i]->stream_index[j])
4162 } else if (*spec == '#') {
4165 sid = strtol(spec + 1, &endptr, 0);
4167 return st->id == sid;
4168 } else if (!*spec) /* empty specifier, matches everything */
4171 av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
4172 return AVERROR(EINVAL);
4175 int ff_generate_avci_extradata(AVStream *st)
4177 static const uint8_t avci100_1080p_extradata[] = {
4179 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4180 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4181 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4182 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
4183 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
4184 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
4185 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
4186 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
4187 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4189 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4192 static const uint8_t avci100_1080i_extradata[] = {
4194 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4195 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4196 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4197 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
4198 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
4199 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
4200 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
4201 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
4202 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
4203 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
4204 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
4206 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4209 static const uint8_t avci50_1080i_extradata[] = {
4211 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4212 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4213 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4214 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
4215 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
4216 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
4217 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
4218 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
4219 0x81, 0x13, 0xf7, 0xff, 0x80, 0x02, 0x00, 0x01,
4220 0xf1, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
4221 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
4223 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4226 static const uint8_t avci100_720p_extradata[] = {
4228 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4229 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
4230 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
4231 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
4232 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
4233 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
4234 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
4235 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
4236 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
4237 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
4239 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
4243 const uint8_t *data = NULL;
4246 if (st->codec->width == 1920) {
4247 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4248 data = avci100_1080p_extradata;
4249 size = sizeof(avci100_1080p_extradata);
4251 data = avci100_1080i_extradata;
4252 size = sizeof(avci100_1080i_extradata);
4254 } else if (st->codec->width == 1440) {
4255 data = avci50_1080i_extradata;
4256 size = sizeof(avci50_1080i_extradata);
4257 } else if (st->codec->width == 1280) {
4258 data = avci100_720p_extradata;
4259 size = sizeof(avci100_720p_extradata);
4265 av_freep(&st->codec->extradata);
4266 if (ff_alloc_extradata(st->codec, size))
4267 return AVERROR(ENOMEM);
4268 memcpy(st->codec->extradata, data, size);