const char *ext, *p;
char ext1[32], *q;
+ if(!filename)
+ return 0;
+
ext = strrchr(filename, '.');
if (ext) {
ext++;
int filename_number_test(const char *filename)
{
char buf[1024];
+ if(!filename)
+ return -1;
return get_frame_filename(buf, sizeof(buf), filename, 1);
}
/**
* open a media file from an IO stream. 'fmt' must be specified.
*/
+
+static const char* format_to_name(void* ptr)
+{
+ AVFormatContext* fc = (AVFormatContext*) ptr;
+ if(fc->iformat) return fc->iformat->name;
+ else if(fc->oformat) return fc->oformat->name;
+ else return "NULL";
+}
+
+static const AVClass av_format_context_class = { "AVFormatContext", format_to_name };
+
+AVFormatContext *av_alloc_format_context(void)
+{
+ AVFormatContext *ic;
+ ic = av_mallocz(sizeof(AVFormatContext));
+ if (!ic) return ic;
+ ic->av_class = &av_format_context_class;
+ return ic;
+}
+
int av_open_input_stream(AVFormatContext **ic_ptr,
ByteIOContext *pb, const char *filename,
AVInputFormat *fmt, AVFormatParameters *ap)
int err;
AVFormatContext *ic;
- ic = av_mallocz(sizeof(AVFormatContext));
+ ic = av_alloc_format_context();
if (!ic) {
err = AVERROR_NOMEM;
goto fail;
ic->priv_data = NULL;
}
- /* default pts settings is MPEG like */
- av_set_pts_info(ic, 33, 1, 90000);
- ic->last_pkt_pts = AV_NOPTS_VALUE;
- ic->last_pkt_dts = AV_NOPTS_VALUE;
- ic->last_pkt_stream_pts = AV_NOPTS_VALUE;
- ic->last_pkt_stream_dts = AV_NOPTS_VALUE;
-
err = ic->iformat->read_header(ic, ap);
if (err < 0)
goto fail;
wrapping is handled by considering the next PTS/DTS as a delta to
the previous value. We handle the delta as a fraction to avoid any
rounding errors. */
-static inline int64_t convert_timestamp_units(AVFormatContext *s,
+static inline int64_t convert_timestamp_units(AVStream *s,
int64_t *plast_pkt_pts,
int *plast_pkt_pts_frac,
int64_t *plast_pkt_stream_pts,
shift = 64 - s->pts_wrap_bits;
delta_pts = ((stream_pts - *plast_pkt_stream_pts) << shift) >> shift;
/* XXX: overflow possible but very unlikely as it is a delta */
- delta_pts = delta_pts * AV_TIME_BASE * s->pts_num;
- pts = *plast_pkt_pts + (delta_pts / s->pts_den);
- pts_frac = *plast_pkt_pts_frac + (delta_pts % s->pts_den);
- if (pts_frac >= s->pts_den) {
- pts_frac -= s->pts_den;
+ delta_pts = delta_pts * AV_TIME_BASE * s->time_base.num;
+ pts = *plast_pkt_pts + (delta_pts / s->time_base.den);
+ pts_frac = *plast_pkt_pts_frac + (delta_pts % s->time_base.den);
+ if (pts_frac >= s->time_base.den) {
+ pts_frac -= s->time_base.den;
pts++;
}
} else {
/* no previous pts, so no wrapping possible */
- pts = (int64_t)(((double)stream_pts * AV_TIME_BASE * s->pts_num) /
- (double)s->pts_den);
+// pts = av_rescale(stream_pts, (int64_t)AV_TIME_BASE * s->time_base.num, s->time_base.den);
+ pts = (int64_t)(((double)stream_pts * AV_TIME_BASE * s->time_base.num) /
+ (double)s->time_base.den);
pts_frac = 0;
}
*plast_pkt_stream_pts = stream_pts;
} else {
/* presentation is not delayed : PTS and DTS are the same */
if (pkt->pts == AV_NOPTS_VALUE) {
- pkt->pts = st->cur_dts;
- pkt->dts = st->cur_dts;
+ if (pkt->dts == AV_NOPTS_VALUE) {
+ pkt->pts = st->cur_dts;
+ pkt->dts = st->cur_dts;
+ }
+ else {
+ st->cur_dts = pkt->dts;
+ pkt->pts = pkt->dts;
+ }
} else {
st->cur_dts = pkt->pts;
pkt->dts = pkt->pts;
/* no more packets: really terminates parsing */
return ret;
}
+
+ st = s->streams[s->cur_pkt.stream_index];
/* convert the packet time stamp units and handle wrapping */
- s->cur_pkt.pts = convert_timestamp_units(s,
- &s->last_pkt_pts, &s->last_pkt_pts_frac,
- &s->last_pkt_stream_pts,
+ s->cur_pkt.pts = convert_timestamp_units(st,
+ &st->last_pkt_pts, &st->last_pkt_pts_frac,
+ &st->last_pkt_stream_pts,
s->cur_pkt.pts);
- s->cur_pkt.dts = convert_timestamp_units(s,
- &s->last_pkt_dts, &s->last_pkt_dts_frac,
- &s->last_pkt_stream_dts,
+ s->cur_pkt.dts = convert_timestamp_units(st,
+ &st->last_pkt_dts, &st->last_pkt_dts_frac,
+ &st->last_pkt_stream_dts,
s->cur_pkt.dts);
#if 0
if (s->cur_pkt.stream_index == 0) {
(double)s->cur_pkt.dts / AV_TIME_BASE);
}
#endif
-
+
/* duration field */
- if (s->cur_pkt.duration != 0) {
- s->cur_pkt.duration = ((int64_t)s->cur_pkt.duration * AV_TIME_BASE * s->pts_num) /
- s->pts_den;
- }
+ s->cur_pkt.duration = av_rescale(s->cur_pkt.duration, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
- st = s->streams[s->cur_pkt.stream_index];
s->cur_st = st;
s->cur_ptr = s->cur_pkt.data;
s->cur_len = s->cur_pkt.size;
memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
}
st->nb_index_entries++;
+ }else{
+ if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
+ distance= ie->min_distance;
}
}else{
index= st->nb_index_entries++;
return a;
}
+#define DEBUG_SEEK
+
+int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts){
+ AVInputFormat *avif= s->iformat;
+ int64_t pos_min, pos_max, pos, pos_limit;
+ int64_t ts_min, ts_max, ts;
+ int64_t start_pos;
+ int index, no_change;
+ AVStream *st;
+
+ if (stream_index < 0) {
+ stream_index = av_find_default_stream_index(s);
+ if (stream_index < 0)
+ return -1;
+ }
+
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "read_seek: %d %lld\n", stream_index, target_ts);
+#endif
+
+ ts_max=
+ ts_min= AV_NOPTS_VALUE;
+ pos_limit= -1; //gcc falsely says it may be uninitalized
+
+ st= s->streams[stream_index];
+ if(st->index_entries){
+ AVIndexEntry *e;
+
+ index= av_index_search_timestamp(st, target_ts);
+ e= &st->index_entries[index];
+
+ if(e->timestamp <= target_ts || e->pos == e->min_distance){
+ pos_min= e->pos;
+ ts_min= e->timestamp;
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "unsing cached pos_min=0x%llx dts_min=%lld\n",
+ pos_min,ts_min);
+#endif
+ }else{
+ assert(index==0);
+ }
+ index++;
+ if(index < st->nb_index_entries){
+ e= &st->index_entries[index];
+ assert(e->timestamp >= target_ts);
+ pos_max= e->pos;
+ ts_max= e->timestamp;
+ pos_limit= pos_max - e->min_distance;
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "unsing cached pos_max=0x%llx pos_limit=0x%llx dts_max=%lld\n",
+ pos_max,pos_limit, ts_max);
+#endif
+ }
+ }
+
+ if(ts_min == AV_NOPTS_VALUE){
+ pos_min = s->data_offset;
+ ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
+ if (ts_min == AV_NOPTS_VALUE)
+ return -1;
+ }
+
+ if(ts_max == AV_NOPTS_VALUE){
+ int step= 1024;
+ pos_max = url_filesize(url_fileno(&s->pb)) - 1;
+ do{
+ pos_max -= step;
+ ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
+ step += step;
+ }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
+ if (ts_max == AV_NOPTS_VALUE)
+ return -1;
+
+ for(;;){
+ int64_t tmp_pos= pos_max + 1;
+ int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
+ if(tmp_ts == AV_NOPTS_VALUE)
+ break;
+ ts_max= tmp_ts;
+ pos_max= tmp_pos;
+ }
+ pos_limit= pos_max;
+ }
+
+ no_change=0;
+ while (pos_min < pos_limit) {
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "pos_min=0x%llx pos_max=0x%llx dts_min=%lld dts_max=%lld\n",
+ pos_min, pos_max,
+ ts_min, ts_max);
+#endif
+ assert(pos_limit <= pos_max);
+
+ if(no_change==0){
+ int64_t approximate_keyframe_distance= pos_max - pos_limit;
+ // interpolate position (better than dichotomy)
+ pos = (int64_t)((double)(pos_max - pos_min) *
+ (double)(target_ts - ts_min) /
+ (double)(ts_max - ts_min)) + pos_min - approximate_keyframe_distance;
+ }else if(no_change==1){
+ // bisection, if interpolation failed to change min or max pos last time
+ pos = (pos_min + pos_limit)>>1;
+ }else{
+ // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
+ pos=pos_min;
+ }
+ if(pos <= pos_min)
+ pos= pos_min + 1;
+ else if(pos > pos_limit)
+ pos= pos_limit;
+ start_pos= pos;
+
+ ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
+ if(pos == pos_max)
+ no_change++;
+ else
+ no_change=0;
+#ifdef DEBUG_SEEK
+av_log(s, AV_LOG_DEBUG, "%Ld %Ld %Ld / %Ld %Ld %Ld target:%Ld limit:%Ld start:%Ld noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
+#endif
+ assert(ts != AV_NOPTS_VALUE);
+ if (target_ts < ts) {
+ pos_limit = start_pos - 1;
+ pos_max = pos;
+ ts_max = ts;
+ } else {
+ pos_min = pos;
+ ts_min = ts;
+ /* check if we are lucky */
+ if (target_ts == ts)
+ break;
+ }
+ }
+
+ pos = pos_min;
+#ifdef DEBUG_SEEK
+ pos_min = pos;
+ ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
+ pos_min++;
+ ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
+ av_log(s, AV_LOG_DEBUG, "pos=0x%llx %lld<=%lld<=%lld\n",
+ pos, ts_min, target_ts, ts_max);
+#endif
+ /* do the seek */
+ url_fseek(&s->pb, pos, SEEK_SET);
+ st->cur_dts = ts_min;
+
+ return 0;
+}
+
static int av_seek_frame_generic(AVFormatContext *s,
int stream_index, int64_t timestamp)
{
if (ret >= 0) {
return 0;
}
-
- return av_seek_frame_generic(s, stream_index, timestamp);
+
+ if(s->iformat->read_timestamp)
+ return av_seek_frame_binary(s, stream_index, timestamp);
+ else
+ return av_seek_frame_generic(s, stream_index, timestamp);
}
/*******************************************************/
st = ic->streams[pkt->stream_index];
if (pkt->pts != AV_NOPTS_VALUE) {
if (st->start_time == AV_NOPTS_VALUE)
- st->start_time = (int64_t)((double)pkt->pts * ic->pts_num * (double)AV_TIME_BASE / ic->pts_den);
+ st->start_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
}
av_free_packet(pkt);
}
read_size += pkt->size;
st = ic->streams[pkt->stream_index];
if (pkt->pts != AV_NOPTS_VALUE) {
- end_time = (int64_t)((double)pkt->pts * ic->pts_num * (double)AV_TIME_BASE / ic->pts_den);
+ end_time = av_rescale(pkt->pts, st->time_base.num * (int64_t)AV_TIME_BASE, st->time_base.den);
duration = end_time - st->start_time;
if (duration > 0) {
if (st->duration == AV_NOPTS_VALUE ||
(st->codec.codec_id == CODEC_ID_FLV1 ||
st->codec.codec_id == CODEC_ID_H264 ||
st->codec.codec_id == CODEC_ID_H263 ||
+ st->codec.codec_id == CODEC_ID_VORBIS ||
(st->codec.codec_id == CODEC_ID_MPEG4 && !st->need_parsing)))
try_decode_frame(st, pkt->data, pkt->size);
st->id = id;
st->start_time = AV_NOPTS_VALUE;
st->duration = AV_NOPTS_VALUE;
+
+ /* default pts settings is MPEG like */
+ av_set_pts_info(st, 33, 1, 90000);
+ st->last_pkt_pts = AV_NOPTS_VALUE;
+ st->last_pkt_dts = AV_NOPTS_VALUE;
+ st->last_pkt_stream_pts = AV_NOPTS_VALUE;
+ st->last_pkt_stream_dts = AV_NOPTS_VALUE;
+
s->streams[s->nb_streams++] = st;
return st;
}
int ret, i;
AVStream *st;
- /* default pts settings is MPEG like */
- av_set_pts_info(s, 33, 1, 90000);
ret = s->oformat->write_header(s);
if (ret < 0)
return ret;
switch (st->codec.codec_type) {
case CODEC_TYPE_AUDIO:
av_frac_init(&st->pts, 0, 0,
- (int64_t)s->pts_num * st->codec.sample_rate);
+ (int64_t)st->time_base.num * st->codec.sample_rate);
break;
case CODEC_TYPE_VIDEO:
av_frac_init(&st->pts, 0, 0,
- (int64_t)s->pts_num * st->codec.frame_rate);
+ (int64_t)st->time_base.num * st->codec.frame_rate);
break;
default:
break;
int ret, frame_size;
st = s->streams[stream_index];
- pts_mask = (1LL << s->pts_wrap_bits) - 1;
- ret = s->oformat->write_packet(s, stream_index, buf, size,
- st->pts.val & pts_mask);
+ pts_mask = (1LL << st->pts_wrap_bits) - 1;
+
+ /* HACK/FIXME we skip all zero size audio packets so a encoder can pass pts by outputing zero size packets */
+ if(st->codec.codec_type==CODEC_TYPE_AUDIO && size==0)
+ ret = 0;
+ else
+ ret = s->oformat->write_packet(s, stream_index, buf, size,
+ st->pts.val & pts_mask);
+
if (ret < 0)
return ret;
switch (st->codec.codec_type) {
case CODEC_TYPE_AUDIO:
frame_size = get_audio_frame_size(&st->codec, size);
- if (frame_size >= 0) {
- av_frac_add(&st->pts,
- (int64_t)s->pts_den * frame_size);
+
+ /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
+ but it would be better if we had the real timestamps from the encoder */
+ if (frame_size >= 0 && (size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
+ av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
}
break;
case CODEC_TYPE_VIDEO:
- av_frac_add(&st->pts,
- (int64_t)s->pts_den * st->codec.frame_rate_base);
+ av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec.frame_rate_base);
break;
default:
break;
int i, flags;
char buf[256];
- fprintf(stderr, "%s #%d, %s, %s '%s':\n",
+ av_log(NULL, AV_LOG_DEBUG, "%s #%d, %s, %s '%s':\n",
is_output ? "Output" : "Input",
index,
is_output ? ic->oformat->name : ic->iformat->name,
is_output ? "to" : "from", url);
if (!is_output) {
- fprintf(stderr, " Duration: ");
+ av_log(NULL, AV_LOG_DEBUG, " Duration: ");
if (ic->duration != AV_NOPTS_VALUE) {
int hours, mins, secs, us;
secs = ic->duration / AV_TIME_BASE;
secs %= 60;
hours = mins / 60;
mins %= 60;
- fprintf(stderr, "%02d:%02d:%02d.%01d", hours, mins, secs,
+ av_log(NULL, AV_LOG_DEBUG, "%02d:%02d:%02d.%01d", hours, mins, secs,
(10 * us) / AV_TIME_BASE);
} else {
- fprintf(stderr, "N/A");
+ av_log(NULL, AV_LOG_DEBUG, "N/A");
}
- fprintf(stderr, ", bitrate: ");
+ av_log(NULL, AV_LOG_DEBUG, ", bitrate: ");
if (ic->bit_rate) {
- fprintf(stderr,"%d kb/s", ic->bit_rate / 1000);
+ av_log(NULL, AV_LOG_DEBUG,"%d kb/s", ic->bit_rate / 1000);
} else {
- fprintf(stderr, "N/A");
+ av_log(NULL, AV_LOG_DEBUG, "N/A");
}
- fprintf(stderr, "\n");
+ av_log(NULL, AV_LOG_DEBUG, "\n");
}
for(i=0;i<ic->nb_streams;i++) {
AVStream *st = ic->streams[i];
avcodec_string(buf, sizeof(buf), &st->codec, is_output);
- fprintf(stderr, " Stream #%d.%d", index, i);
+ av_log(NULL, AV_LOG_DEBUG, " Stream #%d.%d", index, i);
/* the pid is an important information, so we display it */
/* XXX: add a generic system */
if (is_output)
else
flags = ic->iformat->flags;
if (flags & AVFMT_SHOW_IDS) {
- fprintf(stderr, "[0x%x]", st->id);
+ av_log(NULL, AV_LOG_DEBUG, "[0x%x]", st->id);
}
- fprintf(stderr, ": %s\n", buf);
+ av_log(NULL, AV_LOG_DEBUG, ": %s\n", buf);
}
}
}
else {
/* Finally we give up and parse it as double */
- *frame_rate_base = DEFAULT_FRAME_RATE_BASE;
+ *frame_rate_base = DEFAULT_FRAME_RATE_BASE; //FIXME use av_d2q()
*frame_rate = (int)(strtod(arg, 0) * (*frame_rate_base) + 0.5);
}
if (!*frame_rate || !*frame_rate_base)
const char *q;
int is_utc, len;
char lastch;
+
+#undef time
time_t now = time(0);
len = strlen(datestr);
* @param pts_num numerator to convert to seconds (MPEG: 1)
* @param pts_den denominator to convert to seconds (MPEG: 90000)
*/
-void av_set_pts_info(AVFormatContext *s, int pts_wrap_bits,
+void av_set_pts_info(AVStream *s, int pts_wrap_bits,
int pts_num, int pts_den)
{
s->pts_wrap_bits = pts_wrap_bits;
- s->pts_num = pts_num;
- s->pts_den = pts_den;
+ s->time_base.num = pts_num;
+ s->time_base.den = pts_den;
}
/* fraction handling */