/* memory handling */
-void av_destruct_packet(AVPacket *pkt)
-{
- av_free(pkt->data);
- pkt->data = NULL; pkt->size = 0;
-}
-
-void av_init_packet(AVPacket *pkt)
-{
- pkt->pts = AV_NOPTS_VALUE;
- pkt->dts = AV_NOPTS_VALUE;
- pkt->pos = -1;
- pkt->duration = 0;
- pkt->convergence_duration = 0;
- pkt->flags = 0;
- pkt->stream_index = 0;
- pkt->destruct= av_destruct_packet_nofree;
-}
-
-int av_new_packet(AVPacket *pkt, int size)
-{
- uint8_t *data;
- if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
- return AVERROR(ENOMEM);
- data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
- if (!data)
- return AVERROR(ENOMEM);
- memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
-
- av_init_packet(pkt);
- pkt->data = data;
- pkt->size = size;
- pkt->destruct = av_destruct_packet;
- return 0;
-}
int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
{
if(ret<=0)
av_free_packet(pkt);
else
- pkt->size= ret;
+ av_shrink_packet(pkt, ret);
return ret;
}
-int av_dup_packet(AVPacket *pkt)
-{
- if (((pkt->destruct == av_destruct_packet_nofree) || (pkt->destruct == NULL)) && pkt->data) {
- uint8_t *data;
- /* We duplicate the packet and don't forget to add the padding again. */
- if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
- return AVERROR(ENOMEM);
- data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
- if (!data) {
- return AVERROR(ENOMEM);
- }
- memcpy(data, pkt->data, pkt->size);
- memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
- pkt->data = data;
- pkt->destruct = av_destruct_packet;
- }
- return 0;
-}
int av_filename_number_test(const char *filename)
{
return av_probe_input_format2(pd, is_opened, &score);
}
-static int set_codec_from_probe_data(AVStream *st, AVProbeData *pd, int score)
+static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
{
AVInputFormat *fmt;
fmt = av_probe_input_format2(pd, 1, &score);
if (fmt) {
+ av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
+ pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
if (!strcmp(fmt->name, "mp3")) {
st->codec->codec_id = CODEC_ID_MP3;
st->codec->codec_type = CODEC_TYPE_AUDIO;
} else if (!strcmp(fmt->name, "ac3")) {
st->codec->codec_id = CODEC_ID_AC3;
st->codec->codec_type = CODEC_TYPE_AUDIO;
+ } else if (!strcmp(fmt->name, "eac3")) {
+ st->codec->codec_id = CODEC_ID_EAC3;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
} else if (!strcmp(fmt->name, "mpegvideo")) {
st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
st->codec->codec_type = CODEC_TYPE_VIDEO;
} else if (!strcmp(fmt->name, "h264")) {
st->codec->codec_id = CODEC_ID_H264;
st->codec->codec_type = CODEC_TYPE_VIDEO;
+ } else if (!strcmp(fmt->name, "dts")) {
+ st->codec->codec_id = CODEC_ID_DTS;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
}
}
return !!fmt;
ff_metadata_demux_compat(ic);
#endif
+ ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
+
*ic_ptr = ic;
return 0;
fail:
int err, probe_size;
AVProbeData probe_data, *pd = &probe_data;
ByteIOContext *pb = NULL;
+ void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
pd->filename = "";
if (filename)
}
/* guess file format */
fmt = av_probe_input_format2(pd, 1, &score);
+ if(fmt){
+ if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
+ av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
+ }else
+ av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
+ }
}
av_freep(&pd->buf);
}
av_freep(&pd->buf);
if (pb)
url_fclose(pb);
+ if (ap && ap->prealloced_context)
+ av_free(*ic_ptr);
*ic_ptr = NULL;
return err;
int av_read_packet(AVFormatContext *s, AVPacket *pkt)
{
- int ret;
+ int ret, i;
AVStream *st;
for(;;){
if (pktl) {
*pkt = pktl->pkt;
- if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE){
+ if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
+ !s->streams[pkt->stream_index]->probe_packets ||
+ s->raw_packet_buffer_remaining_size < pkt->size){
+ AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
+ av_freep(&pd->buf);
+ pd->buf_size = 0;
s->raw_packet_buffer = pktl->next;
+ s->raw_packet_buffer_remaining_size += pkt->size;
av_free(pktl);
return 0;
}
av_init_packet(pkt);
ret= s->iformat->read_packet(s, pkt);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ if (!pktl || ret == AVERROR(EAGAIN))
+ return ret;
+ for (i = 0; i < s->nb_streams; i++)
+ s->streams[i]->probe_packets = 0;
+ continue;
+ }
st= s->streams[pkt->stream_index];
switch(st->codec->codec_type){
break;
}
- if(!pktl && st->codec->codec_id!=CODEC_ID_PROBE)
+ if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
+ !st->probe_packets))
return ret;
add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
+ s->raw_packet_buffer_remaining_size -= pkt->size;
if(st->codec->codec_id == CODEC_ID_PROBE){
AVProbeData *pd = &st->probe_data;
+ --st->probe_packets;
+
pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
pd->buf_size += pkt->size;
memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
- set_codec_from_probe_data(st, pd, 1);
+ set_codec_from_probe_data(s, st, pd, 1);
if(st->codec->codec_id != CODEC_ID_PROBE){
pd->buf_size=0;
av_freep(&pd->buf);
/* used for example by ADPCM codecs */
if (enc->bit_rate == 0)
return -1;
- frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
+ frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
}
} else {
frame_size = enc->frame_size;
int num, den, presentation_delayed, delay, i;
int64_t offset;
+ if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
+ //FIXME Set low_delay = 0 when has_b_frames = 1
+ st->codec->has_b_frames = 1;
+
/* do we have a video B-frame ? */
delay= st->codec->has_b_frames;
presentation_delayed = 0;
// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
/* interpolate PTS and DTS if they are not present */
- if(delay==0 || (delay==1 && pc)){
+ //We skip H264 currently because delay and has_b_frames are not reliably set
+ if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
if (presentation_delayed) {
/* DTS = decompression timestamp */
/* PTS = presentation timestamp */
FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
if(pkt->dts == AV_NOPTS_VALUE)
pkt->dts= st->pts_buffer[0];
- if(delay>1){
+ if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
}
if(pkt->dts > st->cur_dts)
pkt->convergence_duration = pc->convergence_duration;
}
-void av_destruct_packet_nofree(AVPacket *pkt)
-{
- pkt->data = NULL; pkt->size = 0;
-}
static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
*pkt = st->cur_pkt; st->cur_pkt.data= NULL;
compute_pkt_fields(s, st, NULL, pkt);
s->cur_st = NULL;
+ if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
+ (pkt->flags & PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
+ ff_reduce_index(s, st->index);
+ av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
+ }
break;
} else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
- len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
- st->cur_ptr, st->cur_len,
- st->cur_pkt.pts, st->cur_pkt.dts);
+ len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
+ st->cur_ptr, st->cur_len,
+ st->cur_pkt.pts, st->cur_pkt.dts,
+ st->cur_pkt.pos);
st->cur_pkt.pts = AV_NOPTS_VALUE;
st->cur_pkt.dts = AV_NOPTS_VALUE;
/* increment read pointer */
/* return packet if any */
if (pkt->size) {
- pkt->pos = st->cur_pkt.pos; // Isn't quite accurate but close.
got_packet:
pkt->duration = 0;
pkt->stream_index = st->index;
pkt->pts = st->parser->pts;
pkt->dts = st->parser->dts;
- pkt->destruct = av_destruct_packet_nofree;
+ pkt->pos = st->parser->pos;
+ pkt->destruct = NULL;
compute_pkt_fields(s, st, st->parser, pkt);
if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & PKT_FLAG_KEY){
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser && st->need_parsing) {
- av_parser_parse(st->parser, st->codec,
+ av_parser_parse2(st->parser, st->codec,
&pkt->data, &pkt->size,
NULL, 0,
- AV_NOPTS_VALUE, AV_NOPTS_VALUE);
+ AV_NOPTS_VALUE, AV_NOPTS_VALUE,
+ AV_NOPTS_VALUE);
if (pkt->size)
goto got_packet;
}
av_free_packet(&pktl->pkt);
av_free(pktl);
}
+ while(s->raw_packet_buffer){
+ pktl = s->raw_packet_buffer;
+ s->raw_packet_buffer = pktl->next;
+ av_free_packet(&pktl->pkt);
+ av_free(pktl);
+ }
+ s->packet_buffer_end=
+ s->raw_packet_buffer_end= NULL;
+ s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
}
/*******************************************************/
/**
* Flush the frame reader.
*/
-static void av_read_frame_flush(AVFormatContext *s)
+void av_read_frame_flush(AVFormatContext *s)
{
AVStream *st;
int i;
/* fail safe */
st->cur_ptr = NULL;
st->cur_len = 0;
+
+ st->probe_packets = MAX_PROBE_PACKETS;
}
}
int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
AVInputFormat *avif= s->iformat;
- int64_t pos_min, pos_max, pos, pos_limit;
+ int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
int64_t ts_min, ts_max, ts;
int index;
AVStream *st;
pos_min= e->pos;
ts_min= e->timestamp;
#ifdef DEBUG_SEEK
- av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
- pos_min,ts_min);
+ av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
+ pos_min,ts_min);
#endif
}else{
assert(index==0);
ts_max= e->timestamp;
pos_limit= pos_max - e->min_distance;
#ifdef DEBUG_SEEK
- av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
- pos_max,pos_limit, ts_max);
+ av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
+ pos_max,pos_limit, ts_max);
#endif
}
}
else
no_change=0;
#ifdef DEBUG_SEEK
-av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
+ av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
+ pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
+ start_pos, no_change);
#endif
if(ts == AV_NOPTS_VALUE){
av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
return ret;
av_update_cur_dts(s, st, ie->timestamp);
}else{
- if ((ret = url_fseek(s->pb, 0, SEEK_SET)) < 0)
+ if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
return ret;
}
for(i=0;; i++) {
return av_seek_frame_generic(s, stream_index, timestamp, flags);
}
+int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
+{
+ if(min_ts > ts || max_ts < ts)
+ return -1;
+
+ av_read_frame_flush(s);
+
+ if (s->iformat->read_seek2)
+ return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
+
+ if(s->iformat->read_timestamp){
+ //try to seek via read_timestamp()
+ }
+
+ //Fallback to old API if new is not implemented but old is
+ //Note the old has somewat different sematics
+ if(s->iformat->read_seek || 1)
+ return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
+
+ // try some generic seek like av_seek_frame_generic() but with new ts semantics
+}
+
/*******************************************************/
/**
val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
if(!enc->frame_size &&
(enc->codec_id == CODEC_ID_VORBIS ||
- enc->codec_id == CODEC_ID_AAC))
+ enc->codec_id == CODEC_ID_AAC ||
+ enc->codec_id == CODEC_ID_SPEEX))
return 0;
break;
case CODEC_TYPE_VIDEO:
return enc->codec_id != CODEC_ID_NONE && val != 0;
}
-static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
+static int try_decode_frame(AVStream *st, AVPacket *avpkt)
{
int16_t *samples;
AVCodec *codec;
int got_picture, data_size, ret=0;
AVFrame picture;
- if(!st->codec->codec){
- codec = avcodec_find_decoder(st->codec->codec_id);
- if (!codec)
- return -1;
- ret = avcodec_open(st->codec, codec);
- if (ret < 0)
- return ret;
- }
+ if(!st->codec->codec){
+ codec = avcodec_find_decoder(st->codec->codec_id);
+ if (!codec)
+ return -1;
+ ret = avcodec_open(st->codec, codec);
+ if (ret < 0)
+ return ret;
+ }
- if(!has_codec_parameters(st->codec)){
- switch(st->codec->codec_type) {
- case CODEC_TYPE_VIDEO:
- ret = avcodec_decode_video(st->codec, &picture,
- &got_picture, data, size);
- break;
- case CODEC_TYPE_AUDIO:
- data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
- samples = av_malloc(data_size);
- if (!samples)
- goto fail;
- ret = avcodec_decode_audio2(st->codec, samples,
- &data_size, data, size);
- av_free(samples);
- break;
- default:
- break;
+ if(!has_codec_parameters(st->codec)){
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ avcodec_get_frame_defaults(&picture);
+ ret = avcodec_decode_video2(st->codec, &picture,
+ &got_picture, avpkt);
+ break;
+ case CODEC_TYPE_AUDIO:
+ data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
+ samples = av_malloc(data_size);
+ if (!samples)
+ goto fail;
+ ret = avcodec_decode_audio3(st->codec, samples,
+ &data_size, avpkt);
+ av_free(samples);
+ break;
+ default:
+ break;
+ }
}
- }
fail:
return ret;
}
-unsigned int codec_get_tag(const AVCodecTag *tags, int id)
+unsigned int ff_codec_get_tag(const AVCodecTag *tags, int id)
{
while (tags->id != CODEC_ID_NONE) {
if (tags->id == id)
return 0;
}
-enum CodecID codec_get_id(const AVCodecTag *tags, unsigned int tag)
+enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
{
int i;
for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
{
int i;
for(i=0; tags && tags[i]; i++){
- int tag= codec_get_tag(tags[i], id);
+ int tag= ff_codec_get_tag(tags[i], id);
if(tag) return tag;
}
return 0;
{
int i;
for(i=0; tags && tags[i]; i++){
- enum CodecID id= codec_get_id(tags[i], tag);
+ enum CodecID id= ff_codec_get_id(tags[i], tag);
if(id!=CODEC_ID_NONE) return id;
}
return CODEC_ID_NONE;
}
}
-/* absolute maximum size we read until we abort */
-#define MAX_READ_SIZE 5000000
-
#define MAX_STD_TIMEBASES (60*12+5)
static int get_std_framerate(int i){
if(i<60*12) return i*1001;
for(;;) {
if(url_interrupt_cb()){
ret= AVERROR(EINTR);
+ av_log(ic, AV_LOG_DEBUG, "interrupted\n");
break;
}
if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
/* if we found the info for all the codecs, we can stop */
ret = count;
+ av_log(ic, AV_LOG_DEBUG, "All info found\n");
break;
}
}
/* we did not get all the codec info, but we read too much data */
- if (read_size >= MAX_READ_SIZE) {
+ if (read_size >= ic->probesize) {
ret = count;
+ av_log(ic, AV_LOG_WARNING, "MAX_READ_SIZE:%d reached\n", ic->probesize);
break;
}
if (!has_codec_parameters(st->codec)){
char buf[256];
avcodec_string(buf, sizeof(buf), st->codec, 0);
- av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
+ av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
} else {
ret = 0;
}
read_size += pkt->size;
st = ic->streams[pkt->stream_index];
- if(codec_info_nb_frames[st->index]>1)
+ if(codec_info_nb_frames[st->index]>1) {
+ if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
+ av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
+ break;
+ }
codec_info_duration[st->index] += pkt->duration;
+ }
if (pkt->duration != 0)
codec_info_nb_frames[st->index]++;
st->codec->codec_id == CODEC_ID_PPM ||
st->codec->codec_id == CODEC_ID_SHORTEN ||
(st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
- try_decode_frame(st, pkt->data, pkt->size);
+ try_decode_frame(st, pkt);
- if (st->time_base.den > 0 && av_rescale_q(codec_info_duration[st->index], st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
- break;
- }
count++;
}
av_free(st->index_entries);
av_free(st->codec->extradata);
av_free(st->codec);
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(st->filename);
+#endif
av_free(st->priv_data);
av_free(st);
}
for(i=s->nb_programs-1; i>=0; i--) {
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_freep(&s->programs[i]->provider_name);
av_freep(&s->programs[i]->name);
+#endif
av_metadata_free(&s->programs[i]->metadata);
av_freep(&s->programs[i]->stream_index);
av_freep(&s->programs[i]);
flush_packet_queue(s);
av_freep(&s->priv_data);
while(s->nb_chapters--) {
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(s->chapters[s->nb_chapters]->title);
+#endif
av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
av_free(s->chapters[s->nb_chapters]);
}
timestamps corrected before they are returned to the user */
st->cur_dts = 0;
st->first_dts = AV_NOPTS_VALUE;
+ st->probe_packets = MAX_PROBE_PACKETS;
/* default pts setting is MPEG-like */
av_set_pts_info(st, 33, 1, 90000);
return NULL;
dynarray_add(&s->chapters, &s->nb_chapters, chapter);
}
+#if LIBAVFORMAT_VERSION_INT < (53<<16)
av_free(chapter->title);
- chapter->title = av_strdup(title);
+#endif
+ av_metadata_set(&chapter->metadata, "title", title);
chapter->id = id;
chapter->time_base= time_base;
chapter->start = start;
this_pktl = av_mallocz(sizeof(AVPacketList));
this_pktl->pkt= *pkt;
- if(pkt->destruct == av_destruct_packet)
- pkt->destruct= NULL; // not shared -> must keep original from being freed
- else
- av_dup_packet(&this_pktl->pkt); //shared -> must dup
-
- next_point = &s->packet_buffer;
- while(*next_point){
- if(compare(s, &(*next_point)->pkt, pkt))
- break;
- next_point= &(*next_point)->next;
+ pkt->destruct= NULL; // do not free original but only the copy
+ av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
+
+ if(s->streams[pkt->stream_index]->last_in_packet_buffer){
+ next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
+ }else
+ next_point = &s->packet_buffer;
+
+ if(*next_point){
+ if(compare(s, &s->packet_buffer_end->pkt, pkt)){
+ while(!compare(s, &(*next_point)->pkt, pkt)){
+ next_point= &(*next_point)->next;
+ }
+ goto next_non_null;
+ }else{
+ next_point = &(s->packet_buffer_end->next);
+ }
}
+ assert(!*next_point);
+
+ s->packet_buffer_end= this_pktl;
+next_non_null:
+
this_pktl->next= *next_point;
+
+ s->streams[pkt->stream_index]->last_in_packet_buffer=
*next_point= this_pktl;
}
int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
AVPacketList *pktl;
int stream_count=0;
- int streams[MAX_STREAMS];
+ int i;
if(pkt){
ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
}
- memset(streams, 0, sizeof(streams));
- pktl= s->packet_buffer;
- while(pktl){
-//av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
- if(streams[ pktl->pkt.stream_index ] == 0)
- stream_count++;
- streams[ pktl->pkt.stream_index ]++;
- pktl= pktl->next;
- }
+ for(i=0; i < s->nb_streams; i++)
+ stream_count+= !!s->streams[i]->last_in_packet_buffer;
if(stream_count && (s->nb_streams == stream_count || flush)){
pktl= s->packet_buffer;
*out= pktl->pkt;
s->packet_buffer= pktl->next;
+ if(!s->packet_buffer)
+ s->packet_buffer_end= NULL;
+
+ if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
+ s->streams[out->stream_index]->last_in_packet_buffer= NULL;
av_freep(&pktl);
return 1;
}else{
int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
AVStream *st = ic->streams[i];
int g = av_gcd(st->time_base.num, st->time_base.den);
+ AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
avcodec_string(buf, sizeof(buf), st->codec, is_output);
av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
/* the pid is an important information, so we display it */
/* XXX: add a generic system */
if (flags & AVFMT_SHOW_IDS)
av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
- if (strlen(st->language) > 0)
- av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
+ if (lang)
+ av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
av_log(NULL, AV_LOG_INFO, ": %s", buf);
if (st->sample_aspect_ratio.num && // default
if(ic->nb_programs) {
int j, k;
for(j=0; j<ic->nb_programs; j++) {
+ AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
+ "name", NULL, 0);
av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
- ic->programs[j]->name ? ic->programs[j]->name : "");
+ name ? name->value : "");
for(k=0; k<ic->programs[j]->nb_stream_indexes; k++)
dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
}
} else
for(i=0;i<ic->nb_streams;i++)
dump_stream_format(ic, i, index, is_output);
+ if (ic->metadata) {
+ AVMetadataTag *tag=NULL;
+ av_log(NULL, AV_LOG_INFO, " Metadata\n");
+ while((tag=av_metadata_get(ic->metadata, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
+ av_log(NULL, AV_LOG_INFO, " %-16s: %s\n", tag->key, tag->value);
+ }
+ }
+
}
#if LIBAVFORMAT_VERSION_MAJOR < 53
}
void av_set_pts_info(AVStream *s, int pts_wrap_bits,
- int pts_num, int pts_den)
+ unsigned int pts_num, unsigned int pts_den)
{
- unsigned int gcd= av_gcd(pts_num, pts_den);
s->pts_wrap_bits = pts_wrap_bits;
- s->time_base.num = pts_num/gcd;
- s->time_base.den = pts_den/gcd;
- if(gcd>1)
- av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, gcd);
+ if(av_reduce(&s->time_base.num, &s->time_base.den, pts_num, pts_den, INT_MAX)){
+ if(s->time_base.num != pts_num)
+ av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/s->time_base.num);
+ }else
+ av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
+
+ if(!s->time_base.num || !s->time_base.den)
+ s->time_base.num= s->time_base.den= 0;
}