/* memory handling */
-void av_destruct_packet(AVPacket *pkt)
-{
- av_free(pkt->data);
- pkt->data = NULL; pkt->size = 0;
-}
-
-void av_init_packet(AVPacket *pkt)
-{
- pkt->pts = AV_NOPTS_VALUE;
- pkt->dts = AV_NOPTS_VALUE;
- pkt->pos = -1;
- pkt->duration = 0;
- pkt->convergence_duration = 0;
- pkt->flags = 0;
- pkt->stream_index = 0;
- pkt->destruct= av_destruct_packet_nofree;
-}
-
-int av_new_packet(AVPacket *pkt, int size)
-{
- uint8_t *data;
- if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
- return AVERROR(ENOMEM);
- data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
- if (!data)
- return AVERROR(ENOMEM);
- memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
-
- av_init_packet(pkt);
- pkt->data = data;
- pkt->size = size;
- pkt->destruct = av_destruct_packet;
- return 0;
-}
int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
{
if(ret<=0)
av_free_packet(pkt);
else
- pkt->size= ret;
+ av_shrink_packet(pkt, ret);
return ret;
}
-int av_dup_packet(AVPacket *pkt)
-{
- if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
- uint8_t *data;
- /* We duplicate the packet and don't forget to add the padding again. */
- if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
- return AVERROR(ENOMEM);
- data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
- if (!data) {
- return AVERROR(ENOMEM);
- }
- memcpy(data, pkt->data, pkt->size);
- memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
- pkt->data = data;
- pkt->destruct = av_destruct_packet;
- }
- return 0;
-}
int av_filename_number_test(const char *filename)
{
return av_probe_input_format2(pd, is_opened, &score);
}
-static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
+static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
{
AVInputFormat *fmt;
fmt = av_probe_input_format2(pd, 1, &score);
if (fmt) {
+ av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
+ pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
if (!strcmp(fmt->name, "mp3")) {
st->codec->codec_id = CODEC_ID_MP3;
st->codec->codec_type = CODEC_TYPE_AUDIO;
} else if (!strcmp(fmt->name, "ac3")) {
st->codec->codec_id = CODEC_ID_AC3;
st->codec->codec_type = CODEC_TYPE_AUDIO;
+ } else if (!strcmp(fmt->name, "eac3")) {
+ st->codec->codec_id = CODEC_ID_EAC3;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
} else if (!strcmp(fmt->name, "mpegvideo")) {
st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
st->codec->codec_type = CODEC_TYPE_VIDEO;
} else if (!strcmp(fmt->name, "h264")) {
st->codec->codec_id = CODEC_ID_H264;
st->codec->codec_type = CODEC_TYPE_VIDEO;
+ } else if (!strcmp(fmt->name, "dts")) {
+ st->codec->codec_id = CODEC_ID_DTS;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
}
}
return !!fmt;
}
if(!ap->prealloced_context)
- ic = av_alloc_format_context();
+ ic = avformat_alloc_context();
else
ic = *ic_ptr;
if (!ic) {
ff_metadata_demux_compat(ic);
#endif
+ ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
+
*ic_ptr = ic;
return 0;
fail:
int err, probe_size;
AVProbeData probe_data, *pd = &probe_data;
ByteIOContext *pb = NULL;
+ void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
pd->filename = "";
if (filename)
}
/* guess file format */
fmt = av_probe_input_format2(pd, 1, &score);
+ if(fmt){
+ if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
+ av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
+ }else
+ av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
+ }
}
av_freep(&pd->buf);
}
av_freep(&pd->buf);
if (pb)
url_fclose(pb);
+ if (ap && ap->prealloced_context)
+ av_free(*ic_ptr);
*ic_ptr = NULL;
return err;
int av_read_packet(AVFormatContext *s, AVPacket *pkt)
{
- int ret;
+ int ret, i;
AVStream *st;
for(;;){
if (pktl) {
*pkt = pktl->pkt;
- if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
+ if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
+ !s->streams[pkt->stream_index]->probe_packets ||
+ s->raw_packet_buffer_remaining_size < pkt->size){
+ AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
+ av_freep(&pd->buf);
+ pd->buf_size = 0;
s->raw_packet_buffer = pktl->next;
+ s->raw_packet_buffer_remaining_size += pkt->size;
av_free(pktl);
return 0;
}
av_init_packet(pkt);
ret= s->iformat->read_packet(s, pkt);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ if (!pktl || ret == AVERROR(EAGAIN))
+ return ret;
+ for (i = 0; i < s->nb_streams; i++)
+ s->streams[i]->probe_packets = 0;
+ continue;
+ }
st= s->streams[pkt->stream_index];
switch(st->codec->codec_type){
break;
}
- if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
+ if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
+ !st->probe_packets))
return ret;
add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
+ s->raw_packet_buffer_remaining_size -= pkt->size;
if(st->codec->codec_id == CODEC_ID_PROBE){
AVProbeData *pd = &st->probe_data;
+ --st->probe_packets;
+
pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
pd->buf_size += pkt->size;
memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
- set_codec_from_probe_data(st, pd, 1);
+ set_codec_from_probe_data(s, st, pd, 1);
if(st->codec->codec_id != CODEC_ID_PROBE){
pd->buf_size=0;
av_freep(&pd->buf);
/* used for example by ADPCM codecs */
if (enc->bit_rate == 0)
return -1;
- frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
+ frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
}
} else {
frame_size = enc->frame_size;
*pnum = st->codec->time_base.num;
*pden = st->codec->time_base.den;
if (pc && pc->repeat_pict) {
- *pden *= 2;
- *pnum = (*pnum) * (2 + pc->repeat_pict);
+ *pnum = (*pnum) * (1 + pc->repeat_pict);
}
}
break;
int num, den, presentation_delayed, delay, i;
int64_t offset;
+ if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
+ //FIXME Set low_delay = 0 when has_b_frames = 1
+ st->codec->has_b_frames = 1;
+
/* do we have a video B-frame ? */
delay= st->codec->has_b_frames;
presentation_delayed = 0;
// we take the conservative approach and discard both
// Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
- av_log(s, AV_LOG_ERROR, "invalid dts/pts combination\n");
+ av_log(s, AV_LOG_WARNING, "invalid dts/pts combination\n");
pkt->dts= pkt->pts= AV_NOPTS_VALUE;
}
pkt->dts += offset;
}
+ if (pc && pc->dts_sync_point >= 0) {
+ // we have synchronization info from the parser
+ int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
+ if (den > 0) {
+ int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
+ if (pkt->dts != AV_NOPTS_VALUE) {
+ // got DTS from the stream, update reference timestamp
+ st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
+ pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
+ } else if (st->reference_dts != AV_NOPTS_VALUE) {
+ // compute DTS based on reference timestamp
+ pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
+ pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
+ }
+ if (pc->dts_sync_point > 0)
+ st->reference_dts = pkt->dts; // new reference
+ }
+ }
+
/* This may be redundant, but it should not hurt. */
if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
presentation_delayed = 1;
// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
/* interpolate PTS and DTS if they are not present */
- if(delay==0 || (delay==1 && pc)){
+ //We skip H264 currently because delay and has_b_frames are not reliably set
+ if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
if (presentation_delayed) {
/* DTS = decompression timestamp */
/* PTS = presentation timestamp */
FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
if(pkt->dts == AV_NOPTS_VALUE)
pkt->dts= st->pts_buffer[0];
- if(delay>1){
+ if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
}
if(pkt->dts > st->cur_dts)
else if (pc) {
pkt->flags = 0;
/* keyframe computation */
- if (pc->pict_type == FF_I_TYPE)
- pkt->flags |= PKT_FLAG_KEY;
+ if (pc->key_frame == 1)
+ pkt->flags |= PKT_FLAG_KEY;
+ else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
+ pkt->flags |= PKT_FLAG_KEY;
}
+ if (pc)
+ pkt->convergence_duration = pc->convergence_duration;
}
-void av_destruct_packet_nofree(AVPacket *pkt)
-{
- pkt->data = NULL; pkt->size = 0;
-}
static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
*pkt = st->cur_pkt; st->cur_pkt.data= NULL;
compute_pkt_fields(s, st, NULL, pkt);
s->cur_st = NULL;
+ if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
+ (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
+ ff_reduce_index(s, st->index);
+ av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
+ }
break;
} else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
- len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
- st->cur_ptr, st->cur_len,
- st->cur_pkt.pts, st->cur_pkt.dts);
+ len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
+ st->cur_ptr, st->cur_len,
+ st->cur_pkt.pts, st->cur_pkt.dts,
+ st->cur_pkt.pos);
st->cur_pkt.pts = AV_NOPTS_VALUE;
st->cur_pkt.dts = AV_NOPTS_VALUE;
/* increment read pointer */
/* return packet if any */
if (pkt->size) {
- pkt->pos = st->cur_pkt.pos; // Isn't quite accurate but close.
got_packet:
pkt->duration = 0;
pkt->stream_index = st->index;
pkt->pts = st->parser->pts;
pkt->dts = st->parser->dts;
- pkt->destruct = av_destruct_packet_nofree;
+ pkt->pos = st->parser->pos;
+ pkt->destruct = NULL;
compute_pkt_fields(s, st, st->parser, pkt);
if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser && st->need_parsing) {
- av_parser_parse(st->parser, st->codec,
+ av_parser_parse2(st->parser, st->codec,
&pkt->data, &pkt->size,
NULL, 0,
- AV_NOPTS_VALUE, AV_NOPTS_VALUE);
+ AV_NOPTS_VALUE, AV_NOPTS_VALUE,
+ AV_NOPTS_VALUE);
if (pkt->size)
goto got_packet;
}
av_free_packet(&pktl->pkt);
av_free(pktl);
}
+ while(s->raw_packet_buffer){
+ pktl = s->raw_packet_buffer;
+ s->raw_packet_buffer = pktl->next;
+ av_free_packet(&pktl->pkt);
+ av_free(pktl);
+ }
+ s->packet_buffer_end=
+ s->raw_packet_buffer_end= NULL;
+ s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
}
/*******************************************************/
/**
* Flush the frame reader.
*/
-static void av_read_frame_flush(AVFormatContext *s)
+void av_read_frame_flush(AVFormatContext *s)
{
AVStream *st;
int i;
}
st->last_IP_pts = AV_NOPTS_VALUE;
st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
+ st->reference_dts = AV_NOPTS_VALUE;
/* fail safe */
st->cur_ptr = NULL;
st->cur_len = 0;
+
+ st->probe_packets = MAX_PROBE_PACKETS;
}
}
int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
AVInputFormat *avif= s->iformat;
- int64_t pos_min, pos_max, pos, pos_limit;
+ int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
int64_t ts_min, ts_max, ts;
int index;
AVStream *st;
pos_min= e->pos;
ts_min= e->timestamp;
#ifdef DEBUG_SEEK
- av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
- pos_min,ts_min);
+ av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
+ pos_min,ts_min);
#endif
}else{
assert(index==0);
ts_max= e->timestamp;
pos_limit= pos_max - e->min_distance;
#ifdef DEBUG_SEEK
- av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
- pos_max,pos_limit, ts_max);
+ av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
+ pos_max,pos_limit, ts_max);
#endif
}
}
else
no_change=0;
#ifdef DEBUG_SEEK
-av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
+ av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
+ pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
+ start_pos, no_change);
#endif
if(ts == AV_NOPTS_VALUE){
av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
return ret;
av_update_cur_dts(s, st, ie->timestamp);
}else{
- if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
+ if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
return ret;
}
for(i=0;; i++) {
return av_seek_frame_generic(s, stream_index, timestamp, flags);
}
+int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
+{
+ if(min_ts > ts || max_ts < ts)
+ return -1;
+
+ av_read_frame_flush(s);
+
+ if (s->iformat->read_seek2)
+ return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
+
+ if(s->iformat->read_timestamp){
+ //try to seek via read_timestamp()
+ }
+
+ //Fallback to old API if new is not implemented but old is
+ //Note the old has somewat different sematics
+ if(s->iformat->read_seek || 1)
+ return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
+
+ // try some generic seek like av_seek_frame_generic() but with new ts semantics
+}
+
/*******************************************************/
/**
val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
if(!enc->frame_size &&
(enc->codec_id == CODEC_ID_VORBIS ||
- enc->codec_id == CODEC_ID_AAC))
+ enc->codec_id == CODEC_ID_AAC ||
+ enc->codec_id == CODEC_ID_SPEEX))
return 0;
break;
case CODEC_TYPE_VIDEO:
return enc->codec_id != CODEC_ID_NONE && val != 0;
}
-static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
+static int try_decode_frame(AVStream *st, AVPacket *avpkt)
{
int16_t *samples;
AVCodec *codec;
int got_picture, data_size, ret=0;
AVFrame picture;
- if(!st->codec->codec){
- codec = avcodec_find_decoder(st->codec->codec_id);
- if (!codec)
- return -1;
- ret = avcodec_open(st->codec, codec);
- if (ret < 0)
- return ret;
- }
+ if(!st->codec->codec){
+ codec = avcodec_find_decoder(st->codec->codec_id);
+ if (!codec)
+ return -1;
+ ret = avcodec_open(st->codec, codec);
+ if (ret < 0)
+ return ret;
+ }
- if(!has_codec_parameters(st->codec)){
- switch(st->codec->codec_type) {
- case CODEC_TYPE_VIDEO:
- ret = avcodec_decode_video(st->codec, &picture,
- &got_picture, data, size);
- break;
- case CODEC_TYPE_AUDIO:
- data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
- samples = av_malloc(data_size);
- if (!samples)
- goto fail;
- ret = avcodec_decode_audio2(st->codec, samples,
- &data_size, data, size);
- av_free(samples);
- break;
- default:
- break;
+ if(!has_codec_parameters(st->codec)){
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ avcodec_get_frame_defaults(&picture);
+ ret = avcodec_decode_video2(st->codec, &picture,
+ &got_picture, avpkt);
+ break;
+ case CODEC_TYPE_AUDIO:
+ data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
+ samples = av_malloc(data_size);
+ if (!samples)
+ goto fail;
+ ret = avcodec_decode_audio3(st->codec, samples,
+ &data_size, avpkt);
+ av_free(samples);
+ break;
+ default:
+ break;
+ }
}
- }
fail:
return ret;
}
-unsigned int codec_get_tag(const AVCodecTag *tags, int id)
+unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id)
{
while (tags->id != CODEC_ID_NONE) {
if (tags->id == id)
return 0;
}
-enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
+enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
{
int i;
for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
{
int i;
for(i=0; tags && tags[i]; i++){
- int tag= codec_get_tag(tags[i], id);
+ int tag= ff_codec_get_tag(tags[i], id);
if(tag) return tag;
}
return 0;
{
int i;
for(i=0; tags && tags[i]; i++){
- enum CodecID id= codec_get_id(tags[i], tag);
+ enum CodecID id= ff_codec_get_id(tags[i], tag);
if(id!=CODEC_ID_NONE) return id;
}
return CODEC_ID_NONE;
}
}
-/* absolute maximum size we read until we abort */
-#define MAX_READ_SIZE 5000000
-
#define MAX_STD_TIMEBASES (60*12+5)
static int get_std_framerate(int i){
if(i<60*12) return i*1001;
|| c->time_base.den < 5L*c->time_base.num
/* || c->codec_tag == AV_RL32("DIVX")
|| c->codec_tag == AV_RL32("XVID")*/
- || c->codec_id == CODEC_ID_MPEG2VIDEO)
+ || c->codec_id == CODEC_ID_MPEG2VIDEO
+ || c->codec_id == CODEC_ID_H264
+ )
return 1;
return 0;
}
AVStream *st;
AVPacket pkt1, *pkt;
int64_t last_dts[MAX_STREAMS];
+ int64_t duration_gcd[MAX_STREAMS]={0};
int duration_count[MAX_STREAMS]={0};
double (*duration_error)[MAX_STD_TIMEBASES];
int64_t old_offset = url_ftell(ic->pb);
count = 0;
read_size = 0;
for(;;) {
+ if(url_interrupt_cb()){
+ ret= AVERROR(EINTR);
+ av_log(ic, AV_LOG_DEBUG, "interrupted\n");
+ break;
+ }
+
/* check if one codec still needs to be handled */
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
/* if we found the info for all the codecs, we can stop */
ret = count;
+ av_log(ic, AV_LOG_DEBUG, "All info found\n");
break;
}
}
/* we did not get all the codec info, but we read too much data */
- if (read_size >= MAX_READ_SIZE) {
+ if (read_size >= ic->probesize) {
ret = count;
+ av_log(ic, AV_LOG_WARNING, "MAX_READ_SIZE:%d reached\n", ic->probesize);
break;
}
if (!has_codec_parameters(st->codec)){
char buf[256];
avcodec_string(buf, sizeof(buf), st->codec, 0);
- av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
+ av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
} else {
ret = 0;
}
read_size += pkt->size;
st = ic->streams[pkt->stream_index];
- if(codec_info_nb_frames[st->index]>1)
+ if(codec_info_nb_frames[st->index]>1) {
+ if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
+ av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
+ break;
+ }
codec_info_duration[st->index] += pkt->duration;
+ }
if (pkt->duration != 0)
codec_info_nb_frames[st->index]++;
duration_error[index][i] += error*error;
}
duration_count[index]++;
+ // ignore the first 4 values, they might have some random jitter
+ if (duration_count[index] > 3)
+ duration_gcd[index] = av_gcd(duration_gcd[index], duration);
}
if(last == AV_NOPTS_VALUE || duration_count[index]<=1)
last_dts[pkt->stream_index]= pkt->dts;
st->codec->codec_id == CODEC_ID_PPM ||
st->codec->codec_id == CODEC_ID_SHORTEN ||
(st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
- try_decode_frame(st, pkt->data, pkt->size);
+ try_decode_frame(st, pkt);
- if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
- break;
- }
count++;
}
if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
+ // the check for tb_unreliable() is not completely correct, since this is not about handling
+ // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
+ // ipmovie.c produces.
+ if (tb_unreliable(st->codec) && duration_count[i] > 15 && duration_gcd[i] > 1)
+ av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * duration_gcd[i], INT_MAX);
if(duration_count[i]
&& tb_unreliable(st->codec) /*&&
//FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den*/){
+ int num = 0;
double best_error= 2*av_q2d(st->time_base);
best_error= best_error*best_error*duration_count[i]*1000*12*30;
// av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
if(error < best_error){
best_error= error;
- av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, get_std_framerate(j), 12*1001, INT_MAX);
+ num = get_std_framerate(j);
}
}
+ // do not increase frame rate by more than 1 % in order to match a standard rate.
+ if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
+ av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
}
if (!st->r_frame_rate.num){
if( st->codec->time_base.den * (int64_t)st->time_base.num
- <= st->codec->time_base.num * (int64_t)st->time_base.den){
+ <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
st->r_frame_rate.num = st->codec->time_base.den;
- st->r_frame_rate.den = st->codec->time_base.num;
+ st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
}else{
st->r_frame_rate.num = st->time_base.den;
st->r_frame_rate.den = st->time_base.num;
av_free(st->index_entries);
av_free(st->codec->extradata);
av_free(st->codec);
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(st->filename);
+#endif
av_free(st->priv_data);
av_free(st);
}
for(i=s->nb_programs-1; i>=0; i--) {
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_freep(&s->programs[i]->provider_name);
av_freep(&s->programs[i]->name);
+#endif
av_metadata_free(&s->programs[i]->metadata);
av_freep(&s->programs[i]->stream_index);
av_freep(&s->programs[i]);
flush_packet_queue(s);
av_freep(&s->priv_data);
while(s->nb_chapters--) {
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(s->chapters[s->nb_chapters]->title);
+#endif
av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
av_free(s->chapters[s->nb_chapters]);
}
timestamps corrected before they are returned to the user */
st->cur_dts = 0;
st->first_dts = AV_NOPTS_VALUE;
+ st->probe_packets = MAX_PROBE_PACKETS;
/* default pts setting is MPEG-like */
av_set_pts_info(st, 33, 1, 90000);
st->last_IP_pts = AV_NOPTS_VALUE;
for(i=0; i<MAX_REORDER_DELAY+1; i++)
st->pts_buffer[i]= AV_NOPTS_VALUE;
+ st->reference_dts = AV_NOPTS_VALUE;
st->sample_aspect_ratio = (AVRational){0,1};
return program;
}
-void av_set_program_name(AVProgram *program, char *provider_name, char *name)
-{
- assert(!provider_name == !name);
- if(name){
- av_free(program->provider_name);
- av_free(program-> name);
- program->provider_name = av_strdup(provider_name);
- program-> name = av_strdup( name);
- }
-}
-
AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
{
AVChapter *chapter = NULL;
return NULL;
dynarray_add(&s->chapters, &s->nb_chapters, chapter);
}
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(chapter->title);
- chapter->title = av_strdup(title);
+#endif
+ av_metadata_set(&chapter->metadata, "title", title);
chapter->id = id;
chapter->time_base= time_base;
chapter->start = start;
}else
st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
}
+
+ if(s->oformat->flags & AVFMT_GLOBALHEADER &&
+ !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
+ av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
}
if (!s->priv_data && s->oformat->priv_data_size > 0) {
if (pkt->duration == 0) {
compute_frame_duration(&num, &den, st, NULL, pkt);
if (den && num) {
- pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
+ pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
}
}
return ret;
}
-int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
- AVPacketList *pktl, **next_point, *this_pktl;
- int stream_count=0;
- int streams[MAX_STREAMS];
-
- if(pkt){
- AVStream *st= s->streams[ pkt->stream_index];
-
-// assert(pkt->destruct != av_destruct_packet); //FIXME
+void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
+ int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
+{
+ AVPacketList **next_point, *this_pktl;
- this_pktl = av_mallocz(sizeof(AVPacketList));
- this_pktl->pkt= *pkt;
- if(pkt->destruct == av_destruct_packet)
- pkt->destruct= NULL; // not shared -> must keep original from being freed
- else
- av_dup_packet(&this_pktl->pkt); //shared -> must dup
+ this_pktl = av_mallocz(sizeof(AVPacketList));
+ this_pktl->pkt= *pkt;
+ pkt->destruct= NULL; // do not free original but only the copy
+ av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
+ if(s->streams[pkt->stream_index]->last_in_packet_buffer){
+ next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
+ }else
next_point = &s->packet_buffer;
- while(*next_point){
- AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
- int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
- int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
- if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
- break;
- next_point= &(*next_point)->next;
+
+ if(*next_point){
+ if(compare(s, &s->packet_buffer_end->pkt, pkt)){
+ while(!compare(s, &(*next_point)->pkt, pkt)){
+ next_point= &(*next_point)->next;
+ }
+ goto next_non_null;
+ }else{
+ next_point = &(s->packet_buffer_end->next);
}
- this_pktl->next= *next_point;
- *next_point= this_pktl;
}
+ assert(!*next_point);
+
+ s->packet_buffer_end= this_pktl;
+next_non_null:
+
+ this_pktl->next= *next_point;
+
+ s->streams[pkt->stream_index]->last_in_packet_buffer=
+ *next_point= this_pktl;
+}
+
+int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
+{
+ AVStream *st = s->streams[ pkt ->stream_index];
+ AVStream *st2= s->streams[ next->stream_index];
+ int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
+ int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
+
+ if (pkt->dts == AV_NOPTS_VALUE)
+ return 0;
+
+ return next->dts * left > pkt->dts * right; //FIXME this can overflow
+}
- memset(streams, 0, sizeof(streams));
- pktl= s->packet_buffer;
- while(pktl){
-//av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
- if(streams[ pktl->pkt.stream_index ] == 0)
- stream_count++;
- streams[ pktl->pkt.stream_index ]++;
- pktl= pktl->next;
+int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
+ AVPacketList *pktl;
+ int stream_count=0;
+ int i;
+
+ if(pkt){
+ ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
}
+ for(i=0; i < s->nb_streams; i++)
+ stream_count+= !!s->streams[i]->last_in_packet_buffer;
+
if(stream_count && (s->nb_streams == stream_count || flush)){
pktl= s->packet_buffer;
*out= pktl->pkt;
s->packet_buffer= pktl->next;
+ if(!s->packet_buffer)
+ s->packet_buffer_end= NULL;
+
+ if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
+ s->streams[out->stream_index]->last_in_packet_buffer= NULL;
av_freep(&pktl);
return 1;
}else{
if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return -1;
- if(pkt->dts == AV_NOPTS_VALUE)
+ if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return -1;
for(;;){
}
}
+static void print_fps(double d, const char *postfix){
+ uint64_t v= lrintf(d*100);
+ if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
+ else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
+ else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
+}
+
/* "user interface" functions */
static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
{
int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
AVStream *st = ic->streams[i];
int g = av_gcd(st->time_base.num, st->time_base.den);
+ AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
avcodec_string(buf, sizeof(buf), st->codec, is_output);
av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
/* the pid is an important information, so we display it */
/* XXX: add a generic system */
if (flags & AVFMT_SHOW_IDS)
av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
- if (strlen(st->language) > 0)
- av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
+ if (lang)
+ av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
av_log(NULL, AV_LOG_INFO, ": %s", buf);
if (st->sample_aspect_ratio.num && // default
}
if(st->codec->codec_type == CODEC_TYPE_VIDEO){
if(st->r_frame_rate.den && st->r_frame_rate.num)
- av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
-/* else if(st->time_base.den && st->time_base.num)
- av_log(NULL, AV_LOG_INFO, ", %5.2f tb(m)", 1/av_q2d(st->time_base));*/
- else
- av_log(NULL, AV_LOG_INFO, ", %5.2f tb(c)", 1/av_q2d(st->codec->time_base));
+ print_fps(av_q2d(st->r_frame_rate), "tbr");
+ if(st->time_base.den && st->time_base.num)
+ print_fps(1/av_q2d(st->time_base), "tbn");
+ if(st->codec->time_base.den && st->codec->time_base.num)
+ print_fps(1/av_q2d(st->codec->time_base), "tbc");
}
av_log(NULL, AV_LOG_INFO, "\n");
}
if(ic->nb_programs) {
int j, k;
for(j=0; j<ic->nb_programs; j++) {
+ AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
+ "name", NULL, 0);
av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
- ic->programs[j]->name ? ic->programs[j]->name : "");
+ name ? name->value : "");
for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
}
} else
for(i=0;i<ic->nb_streams;i++)
dump_stream_format(ic, i, index, is_output);
+ if (ic->metadata) {
+ AVMetadataTag *tag=NULL;
+ av_log(NULL, AV_LOG_INFO, " Metadata\n");
+ while((tag=av_metadata_get(ic->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
+ av_log(NULL, AV_LOG_INFO, " %-16s: %s\n", tag->key, tag->value);
+ }
+ }
+
}
#if LIBAVFORMAT_VERSION_MAJOR < 53
p = datestr;
q = NULL;
if (!duration) {
+ if (!strncasecmp(datestr, "now", len))
+ return (int64_t) now * 1000000;
+
/* parse the year-month-day part */
for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
q = small_strptime(p, date_fmt[i], &dt);
}
void av_set_pts_info(AVStream *s, int pts_wrap_bits,
- int pts_num, int pts_den)
+ unsigned int pts_num, unsigned int pts_den)
{
- unsigned int gcd= av_gcd(pts_num, pts_den);
s->pts_wrap_bits = pts_wrap_bits;
- s->time_base.num = pts_num/gcd;
- s->time_base.den = pts_den/gcd;
- if(gcd>1)
- av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);
+ if(av_reduce(&s->time_base.num, &s->time_base.den, pts_num, pts_den, INT_MAX)){
+ if(s->time_base.num != pts_num)
+ av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/s->time_base.num);
+ }else
+ av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
+
+ if(!s->time_base.num || !s->time_base.den)
+ s->time_base.num= s->time_base.den= 0;
}