/*
* AVI demuxer
- * Copyright (c) 2001 Fabrice Bellard.
+ * Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+
+//#define DEBUG
+//#define DEBUG_SEEK
+
+#include <strings.h>
+#include "libavutil/intreadwrite.h"
+#include "libavutil/bswap.h"
+#include "libavcodec/bytestream.h"
#include "avformat.h"
#include "avi.h"
#include "dv.h"
#undef NDEBUG
#include <assert.h>
-//#define DEBUG
-//#define DEBUG_SEEK
-
typedef struct AVIStream {
int64_t frame_offset; /* current frame (video) or byte (audio) counter
(used to compute the pts) */
int prefix_count;
uint32_t pal[256];
int has_pal;
+ int dshow_block_align; ///< block align variable used to emulate bugs in the MS dshow demuxer
+
+ AVFormatContext *sub_ctx;
+ AVPacket sub_pkt;
+ uint8_t *sub_buffer;
} AVIStream;
typedef struct {
int64_t riff_end;
int64_t movi_end;
int64_t fsize;
- offset_t movi_list;
+ int64_t movi_list;
+ int64_t last_pkt_pos;
int index_loaded;
int is_odml;
int non_interleaved;
int stream_index;
DVDemuxContext* dv_demux;
+ int odml_depth;
+#define MAX_ODML_DEPTH 1000
} AVIContext;
static const char avi_headers[][8] = {
#ifdef DEBUG
static void print_tag(const char *str, unsigned int tag, int size)
{
- printf("%s: tag=%c%c%c%c size=0x%x\n",
+ dprintf(NULL, "%s: tag=%c%c%c%c size=0x%x\n",
str, tag & 0xff,
(tag >> 8) & 0xff,
(tag >> 16) & 0xff,
}
#endif
-static int get_riff(AVIContext *avi, ByteIOContext *pb)
+static inline int get_duration(AVIStream *ast, int len){
+ if(ast->sample_size){
+ return len;
+ }else if (ast->dshow_block_align){
+ return (len + ast->dshow_block_align - 1)/ast->dshow_block_align;
+ }else
+ return 1;
+}
+
+static int get_riff(AVFormatContext *s, ByteIOContext *pb)
{
+ AVIContext *avi = s->priv_data;
char header[8];
int i;
return -1;
if(header[7] == 0x19)
- av_log(NULL, AV_LOG_INFO, "file has been generated with a totally broken muxer\n");
+ av_log(s, AV_LOG_INFO, "This file has been generated by a totally broken muxer.\n");
return 0;
}
longs_pre_entry,index_type, entries_in_use, chunk_id, base);
#endif
- if(stream_id > s->nb_streams || stream_id < 0)
+ if(stream_id >= s->nb_streams || stream_id < 0)
return -1;
st= s->streams[stream_id];
ast = st->priv_data;
#ifdef DEBUG_SEEK
av_log(s, AV_LOG_ERROR, "pos:%"PRId64", len:%X\n", pos, len);
#endif
+ if(url_feof(pb))
+ return -1;
+
if(last_pos == pos || pos == base - 8)
avi->non_interleaved= 1;
- else
- av_add_index_entry(st, pos, ast->cum_len / FFMAX(1, ast->sample_size), len, 0, key ? AVINDEX_KEYFRAME : 0);
+ if(last_pos != pos && (len || !ast->sample_size))
+ av_add_index_entry(st, pos, ast->cum_len, len, 0, key ? AVINDEX_KEYFRAME : 0);
- if(ast->sample_size)
- ast->cum_len += len;
- else
- ast->cum_len ++;
+ ast->cum_len += get_duration(ast, len);
last_pos= pos;
}else{
int64_t offset, pos;
offset = get_le64(pb);
get_le32(pb); /* size */
duration = get_le32(pb);
+
+ if(url_feof(pb))
+ return -1;
+
pos = url_ftell(pb);
+ if(avi->odml_depth > MAX_ODML_DEPTH){
+ av_log(s, AV_LOG_ERROR, "Too deeply nested ODML indexes\n");
+ return -1;
+ }
+
url_fseek(pb, offset+8, SEEK_SET);
+ avi->odml_depth++;
read_braindead_odml_indx(s, frame_num);
+ avi->odml_depth--;
frame_num += duration;
url_fseek(pb, pos, SEEK_SET);
ts= st->index_entries[0].timestamp;
for(j=0; j<size; j+=max){
- av_add_index_entry(st, pos+j, ts + j/ast->sample_size, FFMIN(max, size-j), 0, AVINDEX_KEYFRAME);
+ av_add_index_entry(st, pos+j, ts+j, FFMIN(max, size-j), 0, AVINDEX_KEYFRAME);
}
}
}
-static int avi_read_tag(ByteIOContext *pb, char *buf, int maxlen, unsigned int size)
+static int avi_read_tag(AVFormatContext *s, AVStream *st, uint32_t tag, uint32_t size)
{
- offset_t i = url_ftell(pb);
+ ByteIOContext *pb = s->pb;
+ char key[5] = {0}, *value;
+
size += (size & 1);
- get_strz(pb, buf, maxlen);
- url_fseek(pb, i+size, SEEK_SET);
- return 0;
+
+ if (size == UINT_MAX)
+ return -1;
+ value = av_malloc(size+1);
+ if (!value)
+ return -1;
+ get_buffer(pb, value, size);
+ value[size]=0;
+
+ AV_WL32(key, tag);
+
+ return av_metadata_set2(st ? &st->metadata : &s->metadata, key, value,
+ AV_METADATA_DONT_STRDUP_VAL);
+}
+
+static void avi_read_info(AVFormatContext *s, uint64_t end)
+{
+ while (url_ftell(s->pb) < end) {
+ uint32_t tag = get_le32(s->pb);
+ uint32_t size = get_le32(s->pb);
+ avi_read_tag(s, NULL, tag, size);
+ }
+}
+
+static const char months[12][4] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
+
+static void avi_metadata_creation_time(AVMetadata **metadata, char *date)
+{
+ char month[4], time[9], buffer[64];
+ int i, day, year;
+ /* parse standard AVI date format (ie. "Mon Mar 10 15:04:43 2003") */
+ if (sscanf(date, "%*3s%*[ ]%3s%*[ ]%2d%*[ ]%8s%*[ ]%4d",
+ month, &day, time, &year) == 4) {
+ for (i=0; i<12; i++)
+ if (!strcasecmp(month, months[i])) {
+ snprintf(buffer, sizeof(buffer), "%.4d-%.2d-%.2d %s",
+ year, i+1, day, time);
+ av_metadata_set2(metadata, "creation_time", buffer, 0);
+ }
+ } else if (date[4] == '/' && date[7] == '/') {
+ date[4] = date[7] = '-';
+ av_metadata_set2(metadata, "creation_time", date, 0);
+ }
+}
+
+static void avi_read_nikon(AVFormatContext *s, uint64_t end)
+{
+ while (url_ftell(s->pb) < end) {
+ uint32_t tag = get_le32(s->pb);
+ uint32_t size = get_le32(s->pb);
+ switch (tag) {
+ case MKTAG('n', 'c', 't', 'g'): { /* Nikon Tags */
+ uint64_t tag_end = url_ftell(s->pb) + size;
+ while (url_ftell(s->pb) < tag_end) {
+ uint16_t tag = get_le16(s->pb);
+ uint16_t size = get_le16(s->pb);
+ const char *name = NULL;
+ char buffer[64] = {0};
+ size -= get_buffer(s->pb, buffer,
+ FFMIN(size, sizeof(buffer)-1));
+ switch (tag) {
+ case 0x03: name = "maker"; break;
+ case 0x04: name = "model"; break;
+ case 0x13: name = "creation_time";
+ if (buffer[4] == ':' && buffer[7] == ':')
+ buffer[4] = buffer[7] = '-';
+ break;
+ }
+ if (name)
+ av_metadata_set2(&s->metadata, name, buffer, 0);
+ url_fskip(s->pb, size);
+ }
+ break;
+ }
+ default:
+ url_fskip(s->pb, size);
+ break;
+ }
+ }
}
static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
AVIContext *avi = s->priv_data;
ByteIOContext *pb = s->pb;
- uint32_t tag, tag1, handler;
+ unsigned int tag, tag1, handler;
int codec_type, stream_index, frame_period, bit_rate;
- unsigned int size, nb_frames;
+ unsigned int size;
int i;
AVStream *st;
AVIStream *ast = NULL;
- char str_track[4];
int avih_width=0, avih_height=0;
int amv_file_format=0;
+ uint64_t list_end = 0;
avi->stream_index= -1;
- if (get_riff(avi, pb) < 0)
+ if (get_riff(s, pb) < 0)
return -1;
avi->fsize = url_fsize(pb);
if(avi->fsize<=0)
- avi->fsize= avi->riff_end;
+ avi->fsize= avi->riff_end == 8 ? INT64_MAX : avi->riff_end;
/* first list tag */
stream_index = -1;
switch(tag) {
case MKTAG('L', 'I', 'S', 'T'):
- /* ignored, except when start of video packets */
+ list_end = url_ftell(pb) + size;
+ /* Ignored, except at start of video packets. */
tag1 = get_le32(pb);
#ifdef DEBUG
print_tag("list", tag1, 0);
avi->movi_list = url_ftell(pb) - 4;
if(size) avi->movi_end = avi->movi_list + size + (size & 1);
else avi->movi_end = url_fsize(pb);
-#ifdef DEBUG
- printf("movi end=%"PRIx64"\n", avi->movi_end);
-#endif
+ dprintf(NULL, "movi end=%"PRIx64"\n", avi->movi_end);
goto end_of_header;
}
+ else if (tag1 == MKTAG('I', 'N', 'F', 'O'))
+ avi_read_info(s, list_end);
+ else if (tag1 == MKTAG('n', 'c', 'd', 't'))
+ avi_read_nikon(s, list_end);
+
+ break;
+ case MKTAG('I', 'D', 'I', 'T'): {
+ unsigned char date[64] = {0};
+ size += (size & 1);
+ size -= get_buffer(pb, date, FFMIN(size, sizeof(date)-1));
+ url_fskip(pb, size);
+ avi_metadata_creation_time(&s->metadata, date);
break;
+ }
case MKTAG('d', 'm', 'l', 'h'):
avi->is_odml = 1;
url_fskip(pb, size + (size & 1));
case MKTAG('a', 'm', 'v', 'h'):
amv_file_format=1;
case MKTAG('a', 'v', 'i', 'h'):
- /* avi header */
+ /* AVI header */
/* using frame_period is bad idea */
frame_period = get_le32(pb);
bit_rate = get_le32(pb) * 8;
/*
* After some consideration -- I don't think we
- * have to support anything but DV in a type1 AVIs.
+ * have to support anything but DV in type1 AVIs.
*/
if (s->nb_streams != 1)
goto fail;
ast = s->streams[0]->priv_data;
av_freep(&s->streams[0]->codec->extradata);
+ av_freep(&s->streams[0]->codec);
av_freep(&s->streams[0]);
s->nb_streams = 0;
- if (ENABLE_DV_DEMUXER) {
+ if (CONFIG_DV_DEMUXER) {
avi->dv_demux = dv_init_demux(s);
if (!avi->dv_demux)
goto fail;
}
/*
* else, leave duration alone; timing estimation in utils.c
- * will make a guess based on bit rate.
+ * will make a guess based on bitrate.
*/
stream_index = s->nb_streams - 1;
get_le32(pb); /* initial frame */
ast->scale = get_le32(pb);
ast->rate = get_le32(pb);
- if(ast->scale && ast->rate){
- }else if(frame_period){
- ast->rate = 1000000;
- ast->scale = frame_period;
- }else{
- ast->rate = 25;
- ast->scale = 1;
+ if(!(ast->scale && ast->rate)){
+ av_log(s, AV_LOG_WARNING, "scale/rate is %u/%u which is invalid. (This file has been generated by broken software.)\n", ast->scale, ast->rate);
+ if(frame_period){
+ ast->rate = 1000000;
+ ast->scale = frame_period;
+ }else{
+ ast->rate = 25;
+ ast->scale = 1;
+ }
}
av_set_pts_info(st, 64, ast->scale, ast->rate);
ast->cum_len=get_le32(pb); /* start */
- nb_frames = get_le32(pb);
+ st->nb_frames = get_le32(pb);
st->start_time = 0;
- st->duration = nb_frames;
get_le32(pb); /* buffer size */
get_le32(pb); /* quality */
ast->sample_size = get_le32(pb); /* sample ssize */
ast->cum_len *= FFMAX(1, ast->sample_size);
-// av_log(NULL, AV_LOG_DEBUG, "%d %d %d %d\n", ast->rate, ast->scale, ast->start, ast->sample_size);
+// av_log(s, AV_LOG_DEBUG, "%d %d %d %d\n", ast->rate, ast->scale, ast->start, ast->sample_size);
switch(tag1) {
case MKTAG('v', 'i', 'd', 's'):
- codec_type = CODEC_TYPE_VIDEO;
+ codec_type = AVMEDIA_TYPE_VIDEO;
ast->sample_size = 0;
break;
case MKTAG('a', 'u', 'd', 's'):
- codec_type = CODEC_TYPE_AUDIO;
+ codec_type = AVMEDIA_TYPE_AUDIO;
break;
case MKTAG('t', 'x', 't', 's'):
- //FIXME
- codec_type = CODEC_TYPE_DATA; //CODEC_TYPE_SUB ? FIXME
+ codec_type = AVMEDIA_TYPE_SUBTITLE;
+ break;
+ case MKTAG('d', 'a', 't', 's'):
+ codec_type = AVMEDIA_TYPE_DATA;
break;
default:
av_log(s, AV_LOG_ERROR, "unknown stream type %X\n", tag1);
goto fail;
}
+ if(ast->sample_size == 0)
+ st->duration = st->nb_frames;
ast->frame_offset= ast->cum_len;
url_fskip(pb, size - 12 * 4);
break;
if (stream_index >= (unsigned)s->nb_streams || avi->dv_demux) {
url_fskip(pb, size);
} else {
+ uint64_t cur_pos = url_ftell(pb);
+ if (cur_pos < list_end)
+ size = FFMIN(size, list_end - cur_pos);
st = s->streams[stream_index];
switch(codec_type) {
- case CODEC_TYPE_VIDEO:
+ case AVMEDIA_TYPE_VIDEO:
if(amv_file_format){
st->codec->width=avih_width;
st->codec->height=avih_height;
- st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_AMV;
url_fskip(pb, size);
break;
}
get_le32(pb); /* size */
st->codec->width = get_le32(pb);
- st->codec->height = get_le32(pb);
+ st->codec->height = (int32_t)get_le32(pb);
get_le16(pb); /* panes */
- st->codec->bits_per_sample= get_le16(pb); /* depth */
+ st->codec->bits_per_coded_sample= get_le16(pb); /* depth */
tag1 = get_le32(pb);
get_le32(pb); /* ImageSize */
get_le32(pb); /* XPelsPerMeter */
get_le32(pb); /* ClrUsed */
get_le32(pb); /* ClrImportant */
- if (tag1 == MKTAG('D', 'X', 'S', 'B')) {
- st->codec->codec_type = CODEC_TYPE_SUBTITLE;
+ if (tag1 == MKTAG('D', 'X', 'S', 'B') || tag1 == MKTAG('D','X','S','A')) {
+ st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codec->codec_tag = tag1;
st->codec->codec_id = CODEC_ID_XSUB;
break;
if(size > 10*4 && size<(1<<30)){
st->codec->extradata_size= size - 10*4;
st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!st->codec->extradata) {
+ st->codec->extradata_size= 0;
+ return AVERROR(ENOMEM);
+ }
get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
}
if(st->codec->extradata_size & 1) //FIXME check if the encoder really did this correctly
get_byte(pb);
- /* Extract palette from extradata if bpp <= 8 */
- /* This code assumes that extradata contains only palette */
- /* This is true for all paletted codecs implemented in ffmpeg */
- if (st->codec->extradata_size && (st->codec->bits_per_sample <= 8)) {
+ /* Extract palette from extradata if bpp <= 8. */
+ /* This code assumes that extradata contains only palette. */
+ /* This is true for all paletted codecs implemented in FFmpeg. */
+ if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) {
st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl));
-#ifdef WORDS_BIGENDIAN
+#if HAVE_BIGENDIAN
for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++)
- st->codec->palctrl->palette[i] = bswap_32(((uint32_t*)st->codec->extradata)[i]);
+ st->codec->palctrl->palette[i] = av_bswap32(((uint32_t*)st->codec->extradata)[i]);
#else
memcpy(st->codec->palctrl->palette, st->codec->extradata,
FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
#ifdef DEBUG
print_tag("video", tag1, 0);
#endif
- st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_tag = tag1;
- st->codec->codec_id = codec_get_id(codec_bmp_tags, tag1);
- st->need_parsing = AVSTREAM_PARSE_HEADERS; // this is needed to get the pict type which is needed for generating correct pts
+ st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag1);
+ st->need_parsing = AVSTREAM_PARSE_HEADERS; // This is needed to get the pict type which is necessary for generating correct pts.
+ // Support "Resolution 1:1" for Avid AVI Codec
+ if(tag1 == MKTAG('A', 'V', 'R', 'n') &&
+ st->codec->extradata_size >= 31 &&
+ !memcmp(&st->codec->extradata[28], "1:1", 3))
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+
+ if(st->codec->codec_tag==0 && st->codec->height > 0 && st->codec->extradata_size < 1U<<30){
+ st->codec->extradata_size+= 9;
+ st->codec->extradata= av_realloc(st->codec->extradata, st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if(st->codec->extradata)
+ memcpy(st->codec->extradata + st->codec->extradata_size - 9, "BottomUp", 9);
+ }
+ st->codec->height= FFABS(st->codec->height);
+
// url_fskip(pb, size - 5 * 4);
break;
- case CODEC_TYPE_AUDIO:
- get_wav_header(pb, st->codec, size);
+ case AVMEDIA_TYPE_AUDIO:
+ ff_get_wav_header(pb, st->codec, size);
+ ast->dshow_block_align= st->codec->block_align;
if(ast->sample_size && st->codec->block_align && ast->sample_size != st->codec->block_align){
av_log(s, AV_LOG_WARNING, "sample size (%d) != block align (%d)\n", ast->sample_size, st->codec->block_align);
ast->sample_size= st->codec->block_align;
}
- if (size%2) /* 2-aligned (fix for Stargate SG-1 - 3x18 - Shades of Grey.avi) */
+ if (size&1) /* 2-aligned (fix for Stargate SG-1 - 3x18 - Shades of Grey.avi) */
url_fskip(pb, 1);
/* Force parsing as several audio frames can be in
- * one packet and timestamps refer to packet start*/
+ * one packet and timestamps refer to packet start. */
st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
- /* ADTS header is in extradata, AAC without header must be stored as exact frames, parser not needed and it will fail */
+ /* ADTS header is in extradata, AAC without header must be
+ * stored as exact frames. Parser not needed and it will
+ * fail. */
if (st->codec->codec_id == CODEC_ID_AAC && st->codec->extradata_size)
st->need_parsing = AVSTREAM_PARSE_NONE;
/* AVI files with Xan DPCM audio (wrongly) declare PCM
* audio in the header but have Axan as stream_code_tag. */
- if (st->codec->stream_codec_tag == ff_get_fourcc("Axan")){
+ if (st->codec->stream_codec_tag == AV_RL32("Axan")){
st->codec->codec_id = CODEC_ID_XAN_DPCM;
st->codec->codec_tag = 0;
}
- if (amv_file_format)
+ if (amv_file_format){
st->codec->codec_id = CODEC_ID_ADPCM_IMA_AMV;
+ ast->dshow_block_align = 0;
+ }
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
+ st->codec->codec_id = CODEC_ID_PROBE;
break;
default:
- st->codec->codec_type = CODEC_TYPE_DATA;
+ st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id= CODEC_ID_NONE;
st->codec->codec_tag= 0;
url_fskip(pb, size);
get_le32(pb);
get_le32(pb);
- active_aspect.num= get_le16(pb);
active_aspect.den= get_le16(pb);
+ active_aspect.num= get_le16(pb);
active.num = get_le32(pb);
active.den = get_le32(pb);
get_le32(pb); //nbFieldsPerFrame
if(active_aspect.num && active_aspect.den && active.num && active.den){
- st->codec->sample_aspect_ratio= av_div_q(active_aspect, active);
+ st->sample_aspect_ratio= av_div_q(active_aspect, active);
//av_log(s, AV_LOG_ERROR, "vprp %d/%d %d/%d\n", active_aspect.num, active_aspect.den, active.num, active.den);
}
size -= 9*4;
}
url_fseek(pb, size, SEEK_CUR);
break;
- case MKTAG('I', 'N', 'A', 'M'):
- avi_read_tag(pb, s->title, sizeof(s->title), size);
- break;
- case MKTAG('I', 'A', 'R', 'T'):
- avi_read_tag(pb, s->author, sizeof(s->author), size);
- break;
- case MKTAG('I', 'C', 'O', 'P'):
- avi_read_tag(pb, s->copyright, sizeof(s->copyright), size);
- break;
- case MKTAG('I', 'C', 'M', 'T'):
- avi_read_tag(pb, s->comment, sizeof(s->comment), size);
- break;
- case MKTAG('I', 'G', 'N', 'R'):
- avi_read_tag(pb, s->genre, sizeof(s->genre), size);
- break;
- case MKTAG('I', 'P', 'R', 'D'):
- avi_read_tag(pb, s->album, sizeof(s->album), size);
- break;
- case MKTAG('I', 'P', 'R', 'T'):
- avi_read_tag(pb, str_track, sizeof(str_track), size);
- sscanf(str_track, "%d", &s->track);
- break;
+ case MKTAG('s', 't', 'r', 'n'):
+ if(s->nb_streams){
+ avi_read_tag(s, s->streams[s->nb_streams-1], tag, size);
+ break;
+ }
default:
if(size > 1000000){
- av_log(s, AV_LOG_ERROR, "well something went wrong during header parsing, "
- "ill ignore it and try to continue anyway\n");
+ av_log(s, AV_LOG_ERROR, "Something went wrong during header parsing, "
+ "I will ignore it and try to continue anyway.\n");
avi->movi_list = url_ftell(pb) - 4;
avi->movi_end = url_fsize(pb);
goto end_of_header;
/* check stream number */
if (stream_index != s->nb_streams - 1) {
fail:
- for(i=0;i<s->nb_streams;i++) {
- av_freep(&s->streams[i]->codec->extradata);
- av_freep(&s->streams[i]);
- }
return -1;
}
avi_load_index(s);
avi->index_loaded = 1;
avi->non_interleaved |= guess_ni_flag(s);
- if(avi->non_interleaved)
+ for(i=0; i<s->nb_streams; i++){
+ AVStream *st = s->streams[i];
+ if(st->nb_index_entries)
+ break;
+ }
+ if(i==s->nb_streams && avi->non_interleaved) {
+ av_log(s, AV_LOG_WARNING, "non-interleaved AVI without index, switching to interleaved\n");
+ avi->non_interleaved=0;
+ }
+
+ if(avi->non_interleaved) {
+ av_log(s, AV_LOG_INFO, "non-interleaved AVI\n");
clean_index(s);
+ }
+
+ ff_metadata_conv_ctx(s, NULL, ff_avi_metadata_conv);
return 0;
}
+static int read_gab2_sub(AVStream *st, AVPacket *pkt) {
+ if (!strcmp(pkt->data, "GAB2") && AV_RL16(pkt->data+5) == 2) {
+ uint8_t desc[256], *d = desc;
+ uint8_t *end, *ptr = pkt->data+7;
+ unsigned int size, desc_len = bytestream_get_le32(&ptr);
+ int score = AVPROBE_SCORE_MAX / 2;
+ AVIStream *ast = st->priv_data;
+ AVInputFormat *sub_demuxer;
+ AVRational time_base;
+ ByteIOContext *pb;
+ AVProbeData pd;
+
+ if (desc_len > FFMAX(pkt->size-17, 0))
+ return 0;
+
+ end = ptr + desc_len;
+ while (ptr < end-1) {
+ uint8_t tmp;
+ uint32_t ch;
+ GET_UTF16(ch, ptr < end-1 ? bytestream_get_le16(&ptr) : 0, break;);
+ PUT_UTF8(ch, tmp, if(d-desc < sizeof(desc)-1) *d++ = tmp;);
+ }
+ *d = 0;
+ if (*desc)
+ av_metadata_set2(&st->metadata, "title", desc, 0);
+
+ ptr = end + 2;
+ size = bytestream_get_le32(&ptr);
+ size = FFMIN(size, pkt->size+pkt->data-ptr);
+
+ pd = (AVProbeData) { .buf = ptr, .buf_size = size };
+ if (!(sub_demuxer = av_probe_input_format2(&pd, 1, &score)))
+ return 0;
+
+ pb = av_alloc_put_byte(ptr, size, 0, NULL, NULL, NULL, NULL);
+ if (!av_open_input_stream(&ast->sub_ctx, pb, "", sub_demuxer, NULL)) {
+ av_read_packet(ast->sub_ctx, &ast->sub_pkt);
+ *st->codec = *ast->sub_ctx->streams[0]->codec;
+ ast->sub_ctx->streams[0]->codec->extradata = NULL;
+ time_base = ast->sub_ctx->streams[0]->time_base;
+ av_set_pts_info(st, 64, time_base.num, time_base.den);
+ }
+ ast->sub_buffer = pkt->data;
+ memset(pkt, 0, sizeof(*pkt));
+ return 1;
+ }
+ return 0;
+}
+
+static AVStream *get_subtitle_pkt(AVFormatContext *s, AVStream *next_st,
+ AVPacket *pkt)
+{
+ AVIStream *ast, *next_ast = next_st->priv_data;
+ int64_t ts, next_ts, ts_min = INT64_MAX;
+ AVStream *st, *sub_st = NULL;
+ int i;
+
+ next_ts = av_rescale_q(next_ast->frame_offset, next_st->time_base,
+ AV_TIME_BASE_Q);
+
+ for (i=0; i<s->nb_streams; i++) {
+ st = s->streams[i];
+ ast = st->priv_data;
+ if (st->discard < AVDISCARD_ALL && ast && ast->sub_pkt.data) {
+ ts = av_rescale_q(ast->sub_pkt.dts, st->time_base, AV_TIME_BASE_Q);
+ if (ts <= next_ts && ts < ts_min) {
+ ts_min = ts;
+ sub_st = st;
+ }
+ }
+ }
+
+ if (sub_st) {
+ ast = sub_st->priv_data;
+ *pkt = ast->sub_pkt;
+ pkt->stream_index = sub_st->index;
+ if (av_read_packet(ast->sub_ctx, &ast->sub_pkt) < 0)
+ ast->sub_pkt.data = NULL;
+ }
+ return sub_st;
+}
+
+static int get_stream_idx(int *d){
+ if( d[0] >= '0' && d[0] <= '9'
+ && d[1] >= '0' && d[1] <= '9'){
+ return (d[0] - '0') * 10 + (d[1] - '0');
+ }else{
+ return 100; //invalid stream ID
+ }
+}
+
static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVIContext *avi = s->priv_data;
ByteIOContext *pb = s->pb;
- int n, d[8], size;
- offset_t i, sync;
+ int n, d[8];
+ unsigned int size;
+ int64_t i, sync;
void* dstr;
- if (ENABLE_DV_DEMUXER && avi->dv_demux) {
- size = dv_get_packet(avi->dv_demux, pkt);
+ if (CONFIG_DV_DEMUXER && avi->dv_demux) {
+ int size = dv_get_packet(avi->dv_demux, pkt);
if (size >= 0)
return size;
}
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
int64_t ts= ast->frame_offset;
+ int64_t last_ts;
- if(ast->sample_size)
- ts /= ast->sample_size;
- ts= av_rescale(ts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
+ if(!st->nb_index_entries)
+ continue;
+
+ last_ts = st->index_entries[st->nb_index_entries - 1].timestamp;
+ if(!ast->remaining && ts > last_ts)
+ continue;
-// av_log(NULL, AV_LOG_DEBUG, "%"PRId64" %d/%d %"PRId64"\n", ts, st->time_base.num, st->time_base.den, ast->frame_offset);
+ ts = av_rescale_q(ts, st->time_base, (AVRational){FFMAX(1, ast->sample_size), AV_TIME_BASE});
+
+// av_log(s, AV_LOG_DEBUG, "%"PRId64" %d/%d %"PRId64"\n", ts, st->time_base.num, st->time_base.den, ast->frame_offset);
if(ts < best_ts){
best_ts= ts;
best_st= st;
best_stream_index= i;
}
}
+ if(!best_st)
+ return -1;
+
best_ast = best_st->priv_data;
- best_ts= av_rescale(best_ts, best_st->time_base.den, AV_TIME_BASE * (int64_t)best_st->time_base.num); //FIXME a little ugly
+ best_ts = av_rescale_q(best_ts, (AVRational){FFMAX(1, best_ast->sample_size), AV_TIME_BASE}, best_st->time_base);
if(best_ast->remaining)
i= av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
- else
+ else{
i= av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY);
+ if(i>=0)
+ best_ast->frame_offset= best_st->index_entries[i].timestamp;
+ }
-// av_log(NULL, AV_LOG_DEBUG, "%d\n", i);
+// av_log(s, AV_LOG_DEBUG, "%d\n", i);
if(i>=0){
int64_t pos= best_st->index_entries[i].pos;
pos += best_ast->packet_size - best_ast->remaining;
url_fseek(s->pb, pos + 8, SEEK_SET);
-// av_log(NULL, AV_LOG_DEBUG, "pos=%"PRId64"\n", pos);
+// av_log(s, AV_LOG_DEBUG, "pos=%"PRId64"\n", pos);
assert(best_ast->remaining <= best_ast->packet_size);
if(avi->stream_index >= 0){
AVStream *st= s->streams[ avi->stream_index ];
AVIStream *ast= st->priv_data;
- int size;
+ int size, err;
+
+ if(get_subtitle_pkt(s, st, pkt))
+ return 0;
if(ast->sample_size <= 1) // minorityreport.AVI block_align=1024 sample_size=1 IMA-ADPCM
size= INT_MAX;
else if(ast->sample_size < 32)
- size= 64*ast->sample_size;
+ // arbitrary multiplier to avoid tiny packets for raw PCM data
+ size= 1024*ast->sample_size;
else
size= ast->sample_size;
if(size > ast->remaining)
size= ast->remaining;
- av_get_packet(pb, pkt, size);
+ avi->last_pkt_pos= url_ftell(pb);
+ err= av_get_packet(pb, pkt, size);
+ if(err<0)
+ return err;
if(ast->has_pal && pkt->data && pkt->size<(unsigned)INT_MAX/2){
+ void *ptr= av_realloc(pkt->data, pkt->size + 4*256 + FF_INPUT_BUFFER_PADDING_SIZE);
+ if(ptr){
ast->has_pal=0;
pkt->size += 4*256;
- pkt->data = av_realloc(pkt->data, pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
- if(pkt->data)
+ pkt->data= ptr;
memcpy(pkt->data + pkt->size - 4*256, ast->pal, 4*256);
+ }else
+ av_log(s, AV_LOG_ERROR, "Failed to append palette\n");
}
- if (ENABLE_DV_DEMUXER && avi->dv_demux) {
+ if (CONFIG_DV_DEMUXER && avi->dv_demux) {
dstr = pkt->destruct;
size = dv_produce_packet(avi->dv_demux, pkt,
pkt->data, pkt->size);
pkt->destruct = dstr;
- pkt->flags |= PKT_FLAG_KEY;
+ pkt->flags |= AV_PKT_FLAG_KEY;
+ if (size < 0)
+ av_free_packet(pkt);
+ } else if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
+ && !st->codec->codec_tag && read_gab2_sub(st, pkt)) {
+ ast->frame_offset++;
+ avi->stream_index = -1;
+ ast->remaining = 0;
+ goto resync;
} else {
- /* XXX: how to handle B frames in avi ? */
+ /* XXX: How to handle B-frames in AVI? */
pkt->dts = ast->frame_offset;
// pkt->dts += ast->start;
if(ast->sample_size)
pkt->dts /= ast->sample_size;
-//av_log(NULL, AV_LOG_DEBUG, "dts:%"PRId64" offset:%"PRId64" %d/%d smpl_siz:%d base:%d st:%d size:%d\n", pkt->dts, ast->frame_offset, ast->scale, ast->rate, ast->sample_size, AV_TIME_BASE, avi->stream_index, size);
+//av_log(s, AV_LOG_DEBUG, "dts:%"PRId64" offset:%"PRId64" %d/%d smpl_siz:%d base:%d st:%d size:%d\n", pkt->dts, ast->frame_offset, ast->scale, ast->rate, ast->sample_size, AV_TIME_BASE, avi->stream_index, size);
pkt->stream_index = avi->stream_index;
- if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
AVIndexEntry *e;
int index;
assert(st->index_entries);
- index= av_index_search_timestamp(st, pkt->dts, 0);
+ index= av_index_search_timestamp(st, ast->frame_offset, 0);
e= &st->index_entries[index];
if(index >= 0 && e->timestamp == ast->frame_offset){
if (e->flags & AVINDEX_KEYFRAME)
- pkt->flags |= PKT_FLAG_KEY;
+ pkt->flags |= AV_PKT_FLAG_KEY;
}
} else {
- pkt->flags |= PKT_FLAG_KEY;
+ pkt->flags |= AV_PKT_FLAG_KEY;
}
- if(ast->sample_size)
- ast->frame_offset += pkt->size;
- else
- ast->frame_offset++;
+ ast->frame_offset += get_duration(ast, pkt->size);
}
ast->remaining -= size;
if(!ast->remaining){
size= d[4] + (d[5]<<8) + (d[6]<<16) + (d[7]<<24);
- if( d[2] >= '0' && d[2] <= '9'
- && d[3] >= '0' && d[3] <= '9'){
- n= (d[2] - '0') * 10 + (d[3] - '0');
- }else{
- n= 100; //invalid stream id
- }
-//av_log(NULL, AV_LOG_DEBUG, "%X %X %X %X %X %X %X %X %"PRId64" %d %d\n", d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n);
- if(i + size > avi->fsize || d[0]<0)
+ n= get_stream_idx(d+2);
+//av_log(s, AV_LOG_DEBUG, "%X %X %X %X %X %X %X %X %"PRId64" %d %d\n", d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n);
+ if(i + (uint64_t)size > avi->fsize || d[0]<0)
continue;
//parse ix##
||(d[0] == 'J' && d[1] == 'U' && d[2] == 'N' && d[3] == 'K')
||(d[0] == 'i' && d[1] == 'd' && d[2] == 'x' && d[3] == '1')){
url_fskip(pb, size);
-//av_log(NULL, AV_LOG_DEBUG, "SKIP\n");
+//av_log(s, AV_LOG_DEBUG, "SKIP\n");
goto resync;
}
- if( d[0] >= '0' && d[0] <= '9'
- && d[1] >= '0' && d[1] <= '9'){
- n= (d[0] - '0') * 10 + (d[1] - '0');
- }else{
- n= 100; //invalid stream id
+ //parse stray LIST
+ if(d[0] == 'L' && d[1] == 'I' && d[2] == 'S' && d[3] == 'T'){
+ url_fskip(pb, 4);
+ goto resync;
+ }
+
+ n= get_stream_idx(d);
+
+ if(!((i-avi->last_pkt_pos)&1) && get_stream_idx(d+1) < s->nb_streams)
+ continue;
+
+ //detect ##ix chunk and skip
+ if(d[2] == 'i' && d[3] == 'x' && n < s->nb_streams){
+ url_fskip(pb, size);
+ goto resync;
}
//parse ##dc/##wb
if(n < s->nb_streams){
- AVStream *st;
- AVIStream *ast;
- st = s->streams[n];
- ast = st->priv_data;
-
- if( (st->discard >= AVDISCARD_DEFAULT && size==0)
- /*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & PKT_FLAG_KEY))*/ //FIXME needs a little reordering
- || st->discard >= AVDISCARD_ALL){
- if(ast->sample_size) ast->frame_offset += pkt->size;
- else ast->frame_offset++;
+ AVStream *st;
+ AVIStream *ast;
+ st = s->streams[n];
+ ast = st->priv_data;
+
+ if(s->nb_streams>=2){
+ AVStream *st1 = s->streams[1];
+ AVIStream *ast1= st1->priv_data;
+ //workaround for broken small-file-bug402.avi
+ if( d[2] == 'w' && d[3] == 'b'
+ && n==0
+ && st ->codec->codec_type == AVMEDIA_TYPE_VIDEO
+ && st1->codec->codec_type == AVMEDIA_TYPE_AUDIO
+ && ast->prefix == 'd'*256+'c'
+ && (d[2]*256+d[3] == ast1->prefix || !ast1->prefix_count)
+ ){
+ n=1;
+ st = st1;
+ ast = ast1;
+ av_log(s, AV_LOG_WARNING, "Invalid stream + prefix combination, assuming audio.\n");
+ }
+ }
+
+
+ if( (st->discard >= AVDISCARD_DEFAULT && size==0)
+ /*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & AV_PKT_FLAG_KEY))*/ //FIXME needs a little reordering
+ || st->discard >= AVDISCARD_ALL){
+ ast->frame_offset += get_duration(ast, size);
url_fskip(pb, size);
goto resync;
- }
+ }
- if (d[2] == 'p' && d[3] == 'c' && size<=4*256+4) {
+ if (d[2] == 'p' && d[3] == 'c' && size<=4*256+4) {
int k = get_byte(pb);
int last = (k + get_byte(pb) - 1) & 0xFF;
ast->pal[k] = get_be32(pb)>>8;// b + (g << 8) + (r << 16);
ast->has_pal= 1;
goto resync;
- } else
- if( ((ast->prefix_count<5 || sync+9 > i) && d[2]<128 && d[3]<128) ||
- d[2]*256+d[3] == ast->prefix /*||
- (d[2] == 'd' && d[3] == 'c') ||
- (d[2] == 'w' && d[3] == 'b')*/) {
-
-//av_log(NULL, AV_LOG_DEBUG, "OK\n");
- if(d[2]*256+d[3] == ast->prefix)
- ast->prefix_count++;
- else{
- ast->prefix= d[2]*256+d[3];
- ast->prefix_count= 0;
- }
+ } else if( ((ast->prefix_count<5 || sync+9 > i) && d[2]<128 && d[3]<128) ||
+ d[2]*256+d[3] == ast->prefix /*||
+ (d[2] == 'd' && d[3] == 'c') ||
+ (d[2] == 'w' && d[3] == 'b')*/) {
+
+//av_log(s, AV_LOG_DEBUG, "OK\n");
+ if(d[2]*256+d[3] == ast->prefix)
+ ast->prefix_count++;
+ else{
+ ast->prefix= d[2]*256+d[3];
+ ast->prefix_count= 0;
+ }
- avi->stream_index= n;
- ast->packet_size= size + 8;
- ast->remaining= size;
+ avi->stream_index= n;
+ ast->packet_size= size + 8;
+ ast->remaining= size;
- {
- uint64_t pos= url_ftell(pb) - 8;
- if(!st->index_entries || !st->nb_index_entries || st->index_entries[st->nb_index_entries - 1].pos < pos){
- av_add_index_entry(st, pos, ast->frame_offset / FFMAX(1, ast->sample_size), size, 0, AVINDEX_KEYFRAME);
+ if(size || !ast->sample_size){
+ uint64_t pos= url_ftell(pb) - 8;
+ if(!st->index_entries || !st->nb_index_entries || st->index_entries[st->nb_index_entries - 1].pos < pos){
+ av_add_index_entry(st, pos, ast->frame_offset, size, 0, AVINDEX_KEYFRAME);
+ }
}
+ goto resync;
}
- goto resync;
- }
}
}
- return -1;
+ return AVERROR_EOF;
}
-/* XXX: we make the implicit supposition that the position are sorted
- for each stream */
+/* XXX: We make the implicit supposition that the positions are sorted
+ for each stream. */
static int avi_read_idx1(AVFormatContext *s, int size)
{
AVIContext *avi = s->priv_data;
if (nb_index_entries <= 0)
return -1;
- /* read the entries and sort them in each stream component */
+ /* Read the entries and sort them in each stream component. */
for(i = 0; i < nb_index_entries; i++) {
tag = get_le32(pb);
flags = get_le32(pb);
pos = get_le32(pb);
len = get_le32(pb);
#if defined(DEBUG_SEEK)
- av_log(NULL, AV_LOG_DEBUG, "%d: tag=0x%x flags=0x%x pos=0x%x len=%d/",
+ av_log(s, AV_LOG_DEBUG, "%d: tag=0x%x flags=0x%x pos=0x%x len=%d/",
i, tag, flags, pos, len);
#endif
if(i==0 && pos > avi->movi_list)
ast = st->priv_data;
#if defined(DEBUG_SEEK)
- av_log(NULL, AV_LOG_DEBUG, "%d cum_len=%"PRId64"\n", len, ast->cum_len);
+ av_log(s, AV_LOG_DEBUG, "%d cum_len=%"PRId64"\n", len, ast->cum_len);
#endif
+ if(url_feof(pb))
+ return -1;
+
if(last_pos == pos)
avi->non_interleaved= 1;
- else
- av_add_index_entry(st, pos, ast->cum_len / FFMAX(1, ast->sample_size), len, 0, (flags&AVIIF_INDEX) ? AVINDEX_KEYFRAME : 0);
- if(ast->sample_size)
- ast->cum_len += len;
- else
- ast->cum_len ++;
+ else if(len || !ast->sample_size)
+ av_add_index_entry(st, pos, ast->cum_len, len, 0, (flags&AVIIF_INDEX) ? AVINDEX_KEYFRAME : 0);
+ ast->cum_len += get_duration(ast, len);
last_pos= pos;
}
return 0;
int i;
int64_t last_start=0;
int64_t first_end= INT64_MAX;
+ int64_t oldpos= url_ftell(s->pb);
for(i=0; i<s->nb_streams; i++){
AVStream *st = s->streams[i];
int n= st->nb_index_entries;
+ unsigned int size;
if(n <= 0)
continue;
+ if(n >= 2){
+ int64_t pos= st->index_entries[0].pos;
+ url_fseek(s->pb, pos + 4, SEEK_SET);
+ size= get_le32(s->pb);
+ if(pos + size > st->index_entries[1].pos)
+ last_start= INT64_MAX;
+ }
+
if(st->index_entries[0].pos > last_start)
last_start= st->index_entries[0].pos;
if(st->index_entries[n-1].pos < first_end)
first_end= st->index_entries[n-1].pos;
}
+ url_fseek(s->pb, oldpos, SEEK_SET);
return last_start > first_end;
}
AVIContext *avi = s->priv_data;
ByteIOContext *pb = s->pb;
uint32_t tag, size;
- offset_t pos= url_ftell(pb);
+ int64_t pos= url_ftell(pb);
+ int ret = -1;
- url_fseek(pb, avi->movi_end, SEEK_SET);
+ if (url_fseek(pb, avi->movi_end, SEEK_SET) < 0)
+ goto the_end; // maybe truncated file
#ifdef DEBUG_SEEK
printf("movi_end=0x%"PRIx64"\n", avi->movi_end);
#endif
case MKTAG('i', 'd', 'x', '1'):
if (avi_read_idx1(s, size) < 0)
goto skip;
- else
+ ret = 0;
goto the_end;
break;
default:
skip:
size += (size & 1);
- url_fskip(pb, size);
+ if (url_fseek(pb, size, SEEK_CUR) < 0)
+ goto the_end; // something is wrong here
break;
}
}
the_end:
url_fseek(pb, pos, SEEK_SET);
- return 0;
+ return ret;
+}
+
+static void seek_subtitle(AVStream *st, AVStream *st2, int64_t timestamp)
+{
+ AVIStream *ast2 = st2->priv_data;
+ int64_t ts2 = av_rescale_q(timestamp, st->time_base, st2->time_base);
+ av_free_packet(&ast2->sub_pkt);
+ if (avformat_seek_file(ast2->sub_ctx, 0, INT64_MIN, ts2, ts2, 0) >= 0 ||
+ avformat_seek_file(ast2->sub_ctx, 0, ts2, ts2, INT64_MAX, 0) >= 0)
+ av_read_packet(ast2->sub_ctx, &ast2->sub_pkt);
}
static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
AVStream *st;
int i, index;
int64_t pos;
+ AVIStream *ast;
if (!avi->index_loaded) {
/* we only load the index on demand */
assert(stream_index>= 0);
st = s->streams[stream_index];
- index= av_index_search_timestamp(st, timestamp, flags);
+ ast= st->priv_data;
+ index= av_index_search_timestamp(st, timestamp * FFMAX(ast->sample_size, 1), flags);
if(index<0)
return -1;
/* find the position */
pos = st->index_entries[index].pos;
- timestamp = st->index_entries[index].timestamp;
+ timestamp = st->index_entries[index].timestamp / FFMAX(ast->sample_size, 1);
-// av_log(NULL, AV_LOG_DEBUG, "XX %"PRId64" %d %"PRId64"\n", timestamp, index, st->index_entries[index].timestamp);
+// av_log(s, AV_LOG_DEBUG, "XX %"PRId64" %d %"PRId64"\n", timestamp, index, st->index_entries[index].timestamp);
- if (ENABLE_DV_DEMUXER && avi->dv_demux) {
+ if (CONFIG_DV_DEMUXER && avi->dv_demux) {
/* One and only one real stream for DV in AVI, and it has video */
- /* offsets. Calling with other stream indices should have failed */
+ /* offsets. Calling with other stream indexes should have failed */
/* the av_index_search_timestamp call above. */
assert(stream_index == 0);
/* Feed the DV video stream version of the timestamp to the */
- /* DV demux so it can synth correct timestamps */
+ /* DV demux so it can synthesize correct timestamps. */
dv_offset_reset(avi->dv_demux, timestamp);
url_fseek(s->pb, pos, SEEK_SET);
ast2->packet_size=
ast2->remaining= 0;
+ if (ast2->sub_ctx) {
+ seek_subtitle(st, st2, timestamp);
+ continue;
+ }
+
if (st2->nb_index_entries <= 0)
continue;
// assert(st2->codec->block_align);
- assert(st2->time_base.den == ast2->rate);
- assert(st2->time_base.num == ast2->scale);
+ assert((int64_t)st2->time_base.num*ast2->rate == (int64_t)st2->time_base.den*ast2->scale);
index = av_index_search_timestamp(
st2,
- av_rescale(timestamp, st2->time_base.den*(int64_t)st->time_base.num, st->time_base.den * (int64_t)st2->time_base.num),
+ av_rescale_q(timestamp, st->time_base, st2->time_base) * FFMAX(ast2->sample_size, 1),
flags | AVSEEK_FLAG_BACKWARD);
if(index<0)
index=0;
index++;
}
-// av_log(NULL, AV_LOG_DEBUG, "%"PRId64" %d %"PRId64"\n", timestamp, index, st2->index_entries[index].timestamp);
+// av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %"PRId64"\n", timestamp, index, st2->index_entries[index].timestamp);
/* extract the current frame number */
ast2->frame_offset = st2->index_entries[index].timestamp;
- if(ast2->sample_size)
- ast2->frame_offset *=ast2->sample_size;
}
/* do the seek */
for(i=0;i<s->nb_streams;i++) {
AVStream *st = s->streams[i];
AVIStream *ast = st->priv_data;
- av_free(ast);
av_free(st->codec->palctrl);
+ if (ast) {
+ if (ast->sub_ctx) {
+ av_freep(&ast->sub_ctx->pb);
+ av_close_input_stream(ast->sub_ctx);
+ }
+ av_free(ast->sub_buffer);
+ av_free_packet(&ast->sub_pkt);
+ }
}
if (avi->dv_demux)
AVInputFormat avi_demuxer = {
"avi",
- "avi format",
+ NULL_IF_CONFIG_SMALL("AVI format"),
sizeof(AVIContext),
avi_probe,
avi_read_header,