* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avformat.h"
-#include "opt.h"
-#include "avstring.h"
+#include "libavcodec/opt.h"
+#include "libavutil/avstring.h"
#include "riff.h"
#include <sys/time.h>
#include <time.h>
if (score > *score_max) {
*score_max = score;
fmt = fmt1;
- }
+ }else if (score == *score_max)
+ fmt = NULL;
}
return fmt;
}
{"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
{"analyzeduration", "how many microseconds are analyzed to estimate duration", OFFSET(max_analyze_duration), FF_OPT_TYPE_INT, 3*AV_TIME_BASE, 0, INT_MAX, D},
{"cryptokey", "decryption key", OFFSET(key), FF_OPT_TYPE_BINARY, 0, 0, 0, D},
-{"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, INT_MAX, 0, INT_MAX, D},
+{"indexmem", "max memory used for timestamp index (per stream)", OFFSET(max_index_size), FF_OPT_TYPE_INT, 1<<20, 0, INT_MAX, D},
+{"rtbufsize", "max memory used for buffering real-time frames", OFFSET(max_picture_buffer), FF_OPT_TYPE_INT, 3041280, 0, INT_MAX, D}, /* defaults to 1s of 15fps 352x288 YUYV422 video */
+{"fdebug", "print specific debug info", OFFSET(debug), FF_OPT_TYPE_FLAGS, DEFAULT, 0, INT_MAX, E|D, "fdebug"},
+{"ts", NULL, 0, FF_OPT_TYPE_CONST, FF_FDEBUG_TS, INT_MIN, INT_MAX, E|D, "fdebug"},
{NULL},
};
st->start_time = pts;
}
+static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
+{
+ AVPacketList *pktl= s->packet_buffer;
+ int64_t cur_dts= 0;
+
+ if(st->first_dts != AV_NOPTS_VALUE){
+ cur_dts= st->first_dts;
+ for(; pktl; pktl= pktl->next){
+ if(pktl->pkt.stream_index == pkt->stream_index){
+ if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
+ break;
+ cur_dts -= pkt->duration;
+ }
+ }
+ pktl= s->packet_buffer;
+ st->first_dts = cur_dts;
+ }else if(st->cur_dts)
+ return;
+
+ for(; pktl; pktl= pktl->next){
+ if(pktl->pkt.stream_index != pkt->stream_index)
+ continue;
+ if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
+ && !pktl->pkt.duration){
+ pktl->pkt.dts= cur_dts;
+ if(!st->codec->has_b_frames)
+ pktl->pkt.pts= cur_dts;
+ cur_dts += pkt->duration;
+ pktl->pkt.duration= pkt->duration;
+ }else
+ break;
+ }
+ if(st->first_dts == AV_NOPTS_VALUE)
+ st->cur_dts= cur_dts;
+}
+
static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
AVCodecParserContext *pc, AVPacket *pkt)
{
compute_frame_duration(&num, &den, st, pc, pkt);
if (den && num) {
pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
+
+ if(pkt->duration != 0 && s->packet_buffer)
+ update_initial_durations(s, st, pkt);
}
}
// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
/* interpolate PTS and DTS if they are not present */
- if(delay <=1){
+ if(delay==0 || (delay==1 && pc)){
if (presentation_delayed) {
/* DTS = decompression timestamp */
/* PTS = presentation timestamp */
return ret;
}
+ if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
+ s->cur_pkt.dts != AV_NOPTS_VALUE &&
+ s->cur_pkt.pts < s->cur_pkt.dts){
+ av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
+ s->cur_pkt.stream_index,
+ s->cur_pkt.pts,
+ s->cur_pkt.dts,
+ s->cur_pkt.size);
+// av_free_packet(&s->cur_pkt);
+// return -1;
+ }
+
st = s->streams[s->cur_pkt.stream_index];
- if(st->codec->debug & FF_DEBUG_PTS)
+ if(s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
s->cur_pkt.stream_index,
s->cur_pkt.pts,
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
}
if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
- st->parser->last_frame_offset=
+ st->parser->next_frame_offset=
st->parser->cur_offset= s->cur_pkt.pos;
}
}
}
}
- if(st->codec->debug & FF_DEBUG_PTS)
+ if(s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
pkt->stream_index,
pkt->pts,
int av_find_default_stream_index(AVFormatContext *s)
{
+ int first_audio_index = -1;
int i;
AVStream *st;
if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
return i;
}
+ if (first_audio_index < 0 && st->codec->codec_type == CODEC_TYPE_AUDIO)
+ first_audio_index = i;
}
- return 0;
+ return first_audio_index >= 0 ? first_audio_index : 0;
}
/**
int i;
AVPacket pkt;
- if(st->index_entries && st->nb_index_entries){
+ if(st->nb_index_entries){
+ assert(st->index_entries);
ie= &st->index_entries[st->nb_index_entries-1];
url_fseek(s->pb, ie->pos, SEEK_SET);
av_update_cur_dts(s, st, ie->timestamp);
int val;
switch(enc->codec_type) {
case CODEC_TYPE_AUDIO:
- val = enc->sample_rate;
+ val = enc->sample_rate && enc->channels;
break;
case CODEC_TYPE_VIDEO:
val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
val = 1;
break;
}
- return (enc->codec_id != CODEC_ID_NONE && val != 0);
+ return enc->codec_id != CODEC_ID_NONE && val != 0;
}
static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
switch(st->codec->codec_type) {
case CODEC_TYPE_VIDEO:
ret = avcodec_decode_video(st->codec, &picture,
- &got_picture, (uint8_t *)data, size);
+ &got_picture, data, size);
break;
case CODEC_TYPE_AUDIO:
data_size = FFMAX(size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
if (!samples)
goto fail;
ret = avcodec_decode_audio2(st->codec, samples,
- &data_size, (uint8_t *)data, size);
+ &data_size, data, size);
av_free(samples);
break;
default:
return CODEC_ID_NONE;
}
+static void compute_chapters_end(AVFormatContext *s)
+{
+ unsigned int i;
+
+ for (i=0; i+1<s->nb_chapters; i++)
+ if (s->chapters[i]->end == AV_NOPTS_VALUE) {
+ assert(s->chapters[i]->start <= s->chapters[i+1]->start);
+ assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
+ s->chapters[i]->end = s->chapters[i+1]->start;
+ }
+
+ if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
+ assert(s->start_time != AV_NOPTS_VALUE);
+ assert(s->duration > 0);
+ s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
+ AV_TIME_BASE_Q,
+ s->chapters[i]->time_base);
+ }
+}
+
/* absolute maximum size we read until we abort */
#define MAX_READ_SIZE 5000000
// if(st->codec->codec_type == CODEC_TYPE_VIDEO)
// av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
if(duration_count[index] < 2)
- memset(duration_error, 0, MAX_STREAMS * sizeof(*duration_error));
+ memset(duration_error[index], 0, sizeof(*duration_error));
for(i=1; i<MAX_STD_TIMEBASES; i++){
int framerate= get_std_framerate(i);
int ticks= lrintf(dur*framerate/(1001*12));
count++;
}
- // close codecs which where opened in try_decode_frame()
+ // close codecs which were opened in try_decode_frame()
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if(st->codec->codec)
url_fseek(ic->pb, ic->data_offset, SEEK_SET);
}
+ compute_chapters_end(ic);
+
#if 0
/* correct DTS for B-frame streams with no timestamps */
for(i=0;i<ic->nb_streams;i++) {
av_free(st->index_entries);
av_free(st->codec->extradata);
av_free(st->codec);
+ av_free(st->filename);
av_free(st);
}
for(i=s->nb_programs-1; i>=0; i--) {
av_freep(&s->programs[i]->stream_index);
av_freep(&s->programs[i]);
}
+ av_freep(&s->programs);
flush_packet_queue(s);
av_freep(&s->priv_data);
+ while(s->nb_chapters--) {
+ av_free(s->chapters[s->nb_chapters]->title);
+ av_free(s->chapters[s->nb_chapters]);
+ }
+ av_freep(&s->chapters);
av_free(s);
}
}
}
+AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
+{
+ AVChapter *chapter = NULL;
+ int i;
+
+ for(i=0; i<s->nb_chapters; i++)
+ if(s->chapters[i]->id == id)
+ chapter = s->chapters[i];
+
+ if(!chapter){
+ chapter= av_mallocz(sizeof(AVChapter));
+ if(!chapter)
+ return NULL;
+ dynarray_add(&s->chapters, &s->nb_chapters, chapter);
+ }
+ av_free(chapter->title);
+ chapter->title = av_strdup(title);
+ chapter->id = id;
+ chapter->time_base= time_base;
+ chapter->start = start;
+ chapter->end = end;
+
+ return chapter;
+}
/************************************************************/
/* output media file */
int av_write_frame(AVFormatContext *s, AVPacket *pkt)
{
- int ret;
+ int ret = compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
- ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return ret;
* @param flush 1 if no further packets are available as input and all
* remaining packets should be output
* @return 1 if a packet was output, 0 if no packet could be output,
- * < 0 if an error occured
+ * < 0 if an error occurred
*/
static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
if(s->oformat->interleave_packet)
secs %= 60;
hours = mins / 60;
mins %= 60;
- av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
- (10 * us) / AV_TIME_BASE);
+ av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
+ (100 * us) / AV_TIME_BASE);
} else {
av_log(NULL, AV_LOG_INFO, "N/A");
}