/*
* MOV demuxer
- * Copyright (c) 2001 Fabrice Bellard.
+ * Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
//#define DEBUG
+#include "libavutil/intreadwrite.h"
+#include "libavutil/avstring.h"
#include "avformat.h"
#include "riff.h"
#include "isom.h"
#include "dv.h"
-#include "mpeg4audio.h"
-#include "mpegaudiodata.h"
+#include "libavcodec/mpeg4audio.h"
+#include "libavcodec/mpegaudiodata.h"
-#ifdef CONFIG_ZLIB
+#if CONFIG_ZLIB
#include <zlib.h>
#endif
int first;
int count;
int id;
-} MOV_stsc_t;
+} MOVStsc;
typedef struct {
uint32_t type;
char *path;
-} MOV_dref_t;
+} MOVDref;
typedef struct {
uint32_t type;
int64_t offset;
int64_t size; /* total size (excluding the size and type fields) */
-} MOV_atom_t;
+} MOVAtom;
struct MOVParseTableEntry;
unsigned int chunk_count;
int64_t *chunk_offsets;
unsigned int stts_count;
- MOV_stts_t *stts_data;
+ MOVStts *stts_data;
unsigned int ctts_count;
- MOV_stts_t *ctts_data;
- unsigned int edit_count; /* number of 'edit' (elst atom) */
- unsigned int sample_to_chunk_sz;
- MOV_stsc_t *sample_to_chunk;
- int sample_to_ctime_index;
- int sample_to_ctime_sample;
+ MOVStts *ctts_data;
+ unsigned int stsc_count;
+ MOVStsc *stsc_data;
+ int ctts_index;
+ int ctts_sample;
unsigned int sample_size;
unsigned int sample_count;
int *sample_sizes;
int *keyframes;
int time_scale;
int time_rate;
+ int time_offset; ///< time offset of the first edit list entry
int current_sample;
unsigned int bytes_per_frame;
unsigned int samples_per_frame;
int dv_audio_container;
- int pseudo_stream_id;
+ int pseudo_stream_id; ///< -1 means demux all ids
int16_t audio_cid; ///< stsd audio compression id
unsigned drefs_count;
- MOV_dref_t *drefs;
+ MOVDref *drefs;
int dref_id;
+ int wrong_dts; ///< dts are wrong due to negative ctts
+ int width; ///< tkhd width
+ int height; ///< tkhd height
} MOVStreamContext;
typedef struct MOVContext {
MOVFragment fragment; ///< current fragment in moof atom
MOVTrackExt *trex_data;
unsigned trex_count;
+ int itunes_metadata; ///< metadata are itunes style
} MOVContext;
/* links atom IDs to parse functions */
typedef struct MOVParseTableEntry {
uint32_t type;
- int (*parse)(MOVContext *ctx, ByteIOContext *pb, MOV_atom_t atom);
+ int (*parse)(MOVContext *ctx, ByteIOContext *pb, MOVAtom atom);
} MOVParseTableEntry;
static const MOVParseTableEntry mov_default_parse_table[];
-static int mov_read_default(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_default(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
int64_t total_size = 0;
- MOV_atom_t a;
+ MOVAtom a;
int i;
int err = 0;
if (mov_default_parse_table[i].type == 0) { /* skip leaf atoms data */
url_fskip(pb, a.size);
} else {
- offset_t start_pos = url_ftell(pb);
+ int64_t start_pos = url_ftell(pb);
int64_t left;
err = mov_default_parse_table[i].parse(c, pb, a);
if (url_is_streamed(pb) && c->found_moov && c->found_mdat)
return err;
}
-static int mov_read_dref(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_dref(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
MOVStreamContext *sc = st->priv_data;
entries = get_be32(pb);
if (entries >= UINT_MAX / sizeof(*sc->drefs))
return -1;
- sc->drefs_count = entries;
sc->drefs = av_mallocz(entries * sizeof(*sc->drefs));
+ if (!sc->drefs)
+ return AVERROR(ENOMEM);
+ sc->drefs_count = entries;
for (i = 0; i < sc->drefs_count; i++) {
- MOV_dref_t *dref = &sc->drefs[i];
+ MOVDref *dref = &sc->drefs[i];
uint32_t size = get_be32(pb);
- offset_t next = url_ftell(pb) + size - 4;
+ int64_t next = url_ftell(pb) + size - 4;
dref->type = get_le32(pb);
get_be32(pb); // version + flags
return 0;
}
-static int mov_read_hdlr(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_hdlr(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
uint32_t type;
{ CODEC_ID_NONE, 0 },
};
-static int mov_read_esds(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_esds(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
int tag, len;
MPEG4AudioConfig cfg;
ff_mpeg4audio_get_config(&cfg, st->codec->extradata,
st->codec->extradata_size);
- if (!cfg.chan_config || cfg.chan_config > 7)
+ if (cfg.chan_config > 7)
return -1;
st->codec->channels = ff_mpeg4audio_channels[cfg.chan_config];
if (cfg.object_type == 29 && cfg.sampling_index < 3) // old mp3on4
return 0;
}
+static int mov_read_pasp(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
+{
+ const int num = get_be32(pb);
+ const int den = get_be32(pb);
+ AVStream * const st = c->fc->streams[c->fc->nb_streams-1];
+ if (den != 0) {
+ if ((st->sample_aspect_ratio.den != 1 || st->sample_aspect_ratio.num) && // default
+ (den != st->sample_aspect_ratio.den || num != st->sample_aspect_ratio.num))
+ av_log(c->fc, AV_LOG_WARNING,
+ "sample aspect ratio already set to %d:%d, overriding by 'pasp' atom\n",
+ st->sample_aspect_ratio.num, st->sample_aspect_ratio.den);
+ st->sample_aspect_ratio.num = num;
+ st->sample_aspect_ratio.den = den;
+ }
+ return 0;
+}
+
/* this atom contains actual media data */
-static int mov_read_mdat(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_mdat(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
if(atom.size == 0) /* wrong one (MP4) */
return 0;
return 0; /* now go for moov */
}
-static int mov_read_ftyp(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_ftyp(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
uint32_t type = get_le32(pb);
}
/* this atom should contain all header atoms */
-static int mov_read_moov(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_moov(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
if (mov_read_default(c, pb, atom) < 0)
return -1;
return 0; /* now go for mdat */
}
-static int mov_read_moof(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_moof(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
c->fragment.moof_offset = url_ftell(pb) - 8;
dprintf(c->fc, "moof offset %llx\n", c->fragment.moof_offset);
return mov_read_default(c, pb, atom);
}
-static int mov_read_mdhd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_mdhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
MOVStreamContext *sc = st->priv_data;
int version = get_byte(pb);
- int lang;
+ char language[4] = {0};
+ unsigned lang;
if (version > 1)
return -1; /* unsupported */
st->duration = (version == 1) ? get_be64(pb) : get_be32(pb); /* duration */
lang = get_be16(pb); /* language */
- ff_mov_lang_to_iso639(lang, st->language);
+ if (ff_mov_lang_to_iso639(lang, language))
+ av_metadata_set(&st->metadata, "language", language);
get_be16(pb); /* quality */
return 0;
}
-static int mov_read_mvhd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_mvhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
int version = get_byte(pb); /* version */
get_be24(pb); /* flags */
return 0;
}
-static int mov_read_smi(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_smi(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
return 0;
}
-static int mov_read_enda(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_enda(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
int little_endian = get_be16(pb);
- if (little_endian) {
+ dprintf(c->fc, "enda %d\n", little_endian);
+ if (little_endian == 1) {
switch (st->codec->codec_id) {
case CODEC_ID_PCM_S24BE:
st->codec->codec_id = CODEC_ID_PCM_S24LE;
case CODEC_ID_PCM_S32BE:
st->codec->codec_id = CODEC_ID_PCM_S32LE;
break;
+ case CODEC_ID_PCM_F32BE:
+ st->codec->codec_id = CODEC_ID_PCM_F32LE;
+ break;
+ case CODEC_ID_PCM_F64BE:
+ st->codec->codec_id = CODEC_ID_PCM_F64LE;
+ break;
default:
break;
}
}
/* FIXME modify qdm2/svq3/h264 decoders to take full atom as extradata */
-static int mov_read_extradata(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_extradata(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
- AVStream *st = c->fc->streams[c->fc->nb_streams-1];
- uint64_t size= (uint64_t)st->codec->extradata_size + atom.size + 8 + FF_INPUT_BUFFER_PADDING_SIZE;
+ AVStream *st;
+ uint64_t size;
uint8_t *buf;
+
+ if (c->fc->nb_streams < 1) // will happen with jp2 files
+ return 0;
+ st= c->fc->streams[c->fc->nb_streams-1];
+ size= (uint64_t)st->codec->extradata_size + atom.size + 8 + FF_INPUT_BUFFER_PADDING_SIZE;
if(size > INT_MAX || (uint64_t)atom.size > INT_MAX)
return -1;
buf= av_realloc(st->codec->extradata, size);
return 0;
}
-static int mov_read_wave(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_wave(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
* This function reads atom content and puts data in extradata without tag
* nor size unlike mov_read_extradata.
*/
-static int mov_read_glbl(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_glbl(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
return 0;
}
-static int mov_read_stco(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_stco(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
MOVStreamContext *sc = st->priv_data;
if(entries >= UINT_MAX/sizeof(int64_t))
return -1;
- sc->chunk_count = entries;
sc->chunk_offsets = av_malloc(entries * sizeof(int64_t));
if (!sc->chunk_offsets)
- return -1;
+ return AVERROR(ENOMEM);
+ sc->chunk_count = entries;
+
if (atom.type == MKTAG('s','t','c','o'))
for(i=0; i<entries; i++)
sc->chunk_offsets[i] = get_be32(pb);
return 0;
}
-static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+/**
+ * Compute codec id for 'lpcm' tag.
+ * See CoreAudioTypes and AudioStreamBasicDescription at Apple.
+ */
+static enum CodecID mov_get_lpcm_codec_id(int bps, int flags)
+{
+ if (flags & 1) { // floating point
+ if (flags & 2) { // big endian
+ if (bps == 32) return CODEC_ID_PCM_F32BE;
+ else if (bps == 64) return CODEC_ID_PCM_F64BE;
+ } else {
+ if (bps == 32) return CODEC_ID_PCM_F32LE;
+ else if (bps == 64) return CODEC_ID_PCM_F64LE;
+ }
+ } else {
+ if (flags & 2) {
+ if (bps == 8)
+ // signed integer
+ if (flags & 4) return CODEC_ID_PCM_S8;
+ else return CODEC_ID_PCM_U8;
+ else if (bps == 16) return CODEC_ID_PCM_S16BE;
+ else if (bps == 24) return CODEC_ID_PCM_S24BE;
+ else if (bps == 32) return CODEC_ID_PCM_S32BE;
+ } else {
+ if (bps == 8)
+ if (flags & 4) return CODEC_ID_PCM_S8;
+ else return CODEC_ID_PCM_U8;
+ else if (bps == 16) return CODEC_ID_PCM_S16LE;
+ else if (bps == 24) return CODEC_ID_PCM_S24LE;
+ else if (bps == 32) return CODEC_ID_PCM_S32LE;
+ }
+ }
+ return CODEC_ID_NONE;
+}
+
+static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
MOVStreamContext *sc = st->priv_data;
//Parsing Sample description table
enum CodecID id;
int dref_id;
- MOV_atom_t a = { 0, 0, 0 };
- offset_t start_pos = url_ftell(pb);
+ MOVAtom a = { 0, 0, 0 };
+ int64_t start_pos = url_ftell(pb);
int size = get_be32(pb); /* size */
uint32_t format = get_le32(pb); /* data format */
dref_id = get_be16(pb);
if (st->codec->codec_tag &&
+ st->codec->codec_tag != format &&
(c->fc->video_codec_id ? codec_get_id(codec_movvideo_tags, format) != c->fc->video_codec_id
: st->codec->codec_tag != MKTAG('j','p','e','g'))
){
/* Multiple fourcc, we skip JPEG. This is not correct, we should
* export it as a separate AVStream but this needs a few changes
* in the MOV demuxer, patch welcome. */
+ av_log(c->fc, AV_LOG_WARNING, "multiple fourcc not supported\n");
url_fskip(pb, size - (url_ftell(pb) - start_pos));
continue;
}
- sc->pseudo_stream_id= pseudo_stream_id;
+ sc->pseudo_stream_id = st->codec->codec_tag ? -1 : pseudo_stream_id;
sc->dref_id= dref_id;
st->codec->codec_tag = format;
uint8_t codec_name[32];
unsigned int color_depth;
int color_greyscale;
- int frames_per_sample;
st->codec->codec_id = id;
get_be16(pb); /* version */
get_be32(pb); /* horiz resolution */
get_be32(pb); /* vert resolution */
get_be32(pb); /* data size, always 0 */
- frames_per_sample = get_be16(pb); /* frames per samples */
-
- dprintf(c->fc, "frames/samples = %d\n", frames_per_sample);
+ get_be16(pb); /* frames per samples */
- get_buffer(pb, codec_name, 32); /* codec name, pascal string (FIXME: true for mp4?) */
+ get_buffer(pb, codec_name, 32); /* codec name, pascal string */
if (codec_name[0] <= 31) {
memcpy(st->codec->codec_name, &codec_name[1],codec_name[0]);
st->codec->codec_name[codec_name[0]] = 0;
}
- st->codec->bits_per_sample = get_be16(pb); /* depth */
+ st->codec->bits_per_coded_sample = get_be16(pb); /* depth */
st->codec->color_table_id = get_be16(pb); /* colortable id */
dprintf(c->fc, "depth %d, ctab id %d\n",
- st->codec->bits_per_sample, st->codec->color_table_id);
+ st->codec->bits_per_coded_sample, st->codec->color_table_id);
/* figure out the palette situation */
- color_depth = st->codec->bits_per_sample & 0x1F;
- color_greyscale = st->codec->bits_per_sample & 0x20;
+ color_depth = st->codec->bits_per_coded_sample & 0x1F;
+ color_greyscale = st->codec->bits_per_coded_sample & 0x20;
/* if the depth is 2, 4, or 8 bpp, file is palettized */
if ((color_depth == 2) || (color_depth == 4) ||
if (color_greyscale) {
int color_index, color_dec;
/* compute the greyscale palette */
- st->codec->bits_per_sample = color_depth;
+ st->codec->bits_per_coded_sample = color_depth;
color_count = 1 << color_depth;
color_index = 255;
color_dec = 256 / (color_count - 1);
} else
st->codec->palctrl = NULL;
} else if(st->codec->codec_type==CODEC_TYPE_AUDIO) {
- int bits_per_sample;
+ int bits_per_sample, flags;
uint16_t version = get_be16(pb);
st->codec->codec_id = id;
st->codec->channels = get_be16(pb); /* channel count */
dprintf(c->fc, "audio channels %d\n", st->codec->channels);
- st->codec->bits_per_sample = get_be16(pb); /* sample size */
+ st->codec->bits_per_coded_sample = get_be16(pb); /* sample size */
sc->audio_cid = get_be16(pb);
get_be16(pb); /* packet size = 0 */
st->codec->sample_rate = ((get_be32(pb) >> 16));
+ //Read QT version 1 fields. In version 0 these do not exist.
+ dprintf(c->fc, "version =%d, isom =%d\n",version,c->isom);
+ if(!c->isom) {
+ if(version==1) {
+ sc->samples_per_frame = get_be32(pb);
+ get_be32(pb); /* bytes per packet */
+ sc->bytes_per_frame = get_be32(pb);
+ get_be32(pb); /* bytes per sample */
+ } else if(version==2) {
+ get_be32(pb); /* sizeof struct only */
+ st->codec->sample_rate = av_int2dbl(get_be64(pb)); /* float 64 */
+ st->codec->channels = get_be32(pb);
+ get_be32(pb); /* always 0x7F000000 */
+ st->codec->bits_per_coded_sample = get_be32(pb); /* bits per channel if sound is uncompressed */
+ flags = get_be32(pb); /* lcpm format specific flag */
+ sc->bytes_per_frame = get_be32(pb); /* bytes per audio packet if constant */
+ sc->samples_per_frame = get_be32(pb); /* lpcm frames per audio packet if constant */
+ if (format == MKTAG('l','p','c','m'))
+ st->codec->codec_id = mov_get_lpcm_codec_id(st->codec->bits_per_coded_sample, flags);
+ }
+ }
+
switch (st->codec->codec_id) {
case CODEC_ID_PCM_S8:
case CODEC_ID_PCM_U8:
- if (st->codec->bits_per_sample == 16)
+ if (st->codec->bits_per_coded_sample == 16)
st->codec->codec_id = CODEC_ID_PCM_S16BE;
break;
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
- if (st->codec->bits_per_sample == 8)
+ if (st->codec->bits_per_coded_sample == 8)
st->codec->codec_id = CODEC_ID_PCM_S8;
- else if (st->codec->bits_per_sample == 24)
- st->codec->codec_id = CODEC_ID_PCM_S24BE;
+ else if (st->codec->bits_per_coded_sample == 24)
+ st->codec->codec_id =
+ st->codec->codec_id == CODEC_ID_PCM_S16BE ?
+ CODEC_ID_PCM_S24BE : CODEC_ID_PCM_S24LE;
break;
/* set values for old format before stsd version 1 appeared */
case CODEC_ID_MACE3:
break;
}
- //Read QT version 1 fields. In version 0 these do not exist.
- dprintf(c->fc, "version =%d, isom =%d\n",version,c->isom);
- if(!c->isom) {
- if(version==1) {
- sc->samples_per_frame = get_be32(pb);
- get_be32(pb); /* bytes per packet */
- sc->bytes_per_frame = get_be32(pb);
- get_be32(pb); /* bytes per sample */
- } else if(version==2) {
- get_be32(pb); /* sizeof struct only */
- st->codec->sample_rate = av_int2dbl(get_be64(pb)); /* float 64 */
- st->codec->channels = get_be32(pb);
- get_be32(pb); /* always 0x7F000000 */
- get_be32(pb); /* bits per channel if sound is uncompressed */
- get_be32(pb); /* lcpm format specific flag */
- get_be32(pb); /* bytes per audio packet if constant */
- get_be32(pb); /* lpcm frames per audio packet if constant */
- }
- }
-
bits_per_sample = av_get_bits_per_sample(st->codec->codec_id);
if (bits_per_sample) {
- st->codec->bits_per_sample = bits_per_sample;
+ st->codec->bits_per_coded_sample = bits_per_sample;
sc->sample_size = (bits_per_sample >> 3) * st->codec->channels;
}
} else if(st->codec->codec_type==CODEC_TYPE_SUBTITLE){
+ // ttxt stsd contains display flags, justification, background
+ // color, fonts, and default styles, so fake an atom to read it
+ MOVAtom fake_atom = { .size = size - (url_ftell(pb) - start_pos) };
+ mov_read_glbl(c, pb, fake_atom);
st->codec->codec_id= id;
+ st->codec->width = sc->width;
+ st->codec->height = sc->height;
} else {
/* other codec type, just skip (rtp, mp4s, tmcd ...) */
url_fskip(pb, size - (url_ftell(pb) - start_pos));
/* special codec parameters handling */
switch (st->codec->codec_id) {
-#ifdef CONFIG_DV_DEMUXER
+#if CONFIG_DV_DEMUXER
case CODEC_ID_DVAUDIO:
- c->dv_fctx = av_alloc_format_context();
+ c->dv_fctx = avformat_alloc_context();
c->dv_demux = dv_init_demux(c->dv_fctx);
if (!c->dv_demux) {
av_log(c->fc, AV_LOG_ERROR, "dv demux context init error\n");
break;
#endif
/* no ifdef since parameters are always those */
- case CODEC_ID_AMR_WB:
- st->codec->sample_rate= 16000;
+ case CODEC_ID_QCELP:
+ st->codec->frame_size= 160;
st->codec->channels= 1; /* really needed */
break;
- case CODEC_ID_QCELP:
case CODEC_ID_AMR_NB:
+ case CODEC_ID_AMR_WB:
st->codec->frame_size= sc->samples_per_frame;
- st->codec->sample_rate= 8000;
st->codec->channels= 1; /* really needed */
+ /* force sample rate for amr, stsd in 3gp does not store sample rate */
+ if (st->codec->codec_id == CODEC_ID_AMR_NB)
+ st->codec->sample_rate = 8000;
+ else if (st->codec->codec_id == CODEC_ID_AMR_WB)
+ st->codec->sample_rate = 16000;
break;
case CODEC_ID_MP2:
case CODEC_ID_MP3:
st->codec->block_align = sc->bytes_per_frame;
break;
case CODEC_ID_ALAC:
- if (st->codec->extradata_size == 36)
- st->codec->frame_size = AV_RB32((st->codec->extradata+12));
+ if (st->codec->extradata_size == 36) {
+ st->codec->frame_size = AV_RB32(st->codec->extradata+12);
+ st->codec->channels = AV_RB8 (st->codec->extradata+21);
+ }
break;
default:
break;
return 0;
}
-static int mov_read_stsc(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_stsc(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
MOVStreamContext *sc = st->priv_data;
entries = get_be32(pb);
- if(entries >= UINT_MAX / sizeof(MOV_stsc_t))
- return -1;
-
dprintf(c->fc, "track[%i].stsc.entries = %i\n", c->fc->nb_streams-1, entries);
- sc->sample_to_chunk_sz = entries;
- sc->sample_to_chunk = av_malloc(entries * sizeof(MOV_stsc_t));
- if (!sc->sample_to_chunk)
+ if(entries >= UINT_MAX / sizeof(*sc->stsc_data))
return -1;
+ sc->stsc_data = av_malloc(entries * sizeof(*sc->stsc_data));
+ if (!sc->stsc_data)
+ return AVERROR(ENOMEM);
+ sc->stsc_count = entries;
+
for(i=0; i<entries; i++) {
- sc->sample_to_chunk[i].first = get_be32(pb);
- sc->sample_to_chunk[i].count = get_be32(pb);
- sc->sample_to_chunk[i].id = get_be32(pb);
+ sc->stsc_data[i].first = get_be32(pb);
+ sc->stsc_data[i].count = get_be32(pb);
+ sc->stsc_data[i].id = get_be32(pb);
}
return 0;
}
-static int mov_read_stss(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_stss(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
MOVStreamContext *sc = st->priv_data;
entries = get_be32(pb);
+ dprintf(c->fc, "keyframe_count = %d\n", entries);
+
if(entries >= UINT_MAX / sizeof(int))
return -1;
-
- sc->keyframe_count = entries;
-
- dprintf(c->fc, "keyframe_count = %d\n", sc->keyframe_count);
-
sc->keyframes = av_malloc(entries * sizeof(int));
if (!sc->keyframes)
- return -1;
+ return AVERROR(ENOMEM);
+ sc->keyframe_count = entries;
+
for(i=0; i<entries; i++) {
sc->keyframes[i] = get_be32(pb);
//dprintf(c->fc, "keyframes[]=%d\n", sc->keyframes[i]);
return 0;
}
-static int mov_read_stsz(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_stsz(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
MOVStreamContext *sc = st->priv_data;
if (!sc->sample_size) /* do not overwrite value computed in stsd */
sc->sample_size = sample_size;
entries = get_be32(pb);
- if(entries >= UINT_MAX / sizeof(int))
- return -1;
+
+ dprintf(c->fc, "sample_size = %d sample_count = %d\n", sc->sample_size, entries);
sc->sample_count = entries;
if (sample_size)
return 0;
- dprintf(c->fc, "sample_size = %d sample_count = %d\n", sc->sample_size, sc->sample_count);
-
+ if(entries >= UINT_MAX / sizeof(int))
+ return -1;
sc->sample_sizes = av_malloc(entries * sizeof(int));
if (!sc->sample_sizes)
- return -1;
+ return AVERROR(ENOMEM);
+
for(i=0; i<entries; i++)
sc->sample_sizes[i] = get_be32(pb);
return 0;
}
-static int mov_read_stts(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_stts(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
MOVStreamContext *sc = st->priv_data;
get_byte(pb); /* version */
get_be24(pb); /* flags */
entries = get_be32(pb);
- if(entries >= UINT_MAX / sizeof(MOV_stts_t))
- return -1;
- sc->stts_count = entries;
- sc->stts_data = av_malloc(entries * sizeof(MOV_stts_t));
- if (!sc->stts_data)
- return -1;
dprintf(c->fc, "track[%i].stts.entries = %i\n", c->fc->nb_streams-1, entries);
- sc->time_rate=0;
+ if(entries >= UINT_MAX / sizeof(*sc->stts_data))
+ return -1;
+ sc->stts_data = av_malloc(entries * sizeof(*sc->stts_data));
+ if (!sc->stts_data)
+ return AVERROR(ENOMEM);
+ sc->stts_count = entries;
for(i=0; i<entries; i++) {
int sample_duration;
sc->stts_data[i].count= sample_count;
sc->stts_data[i].duration= sample_duration;
- sc->time_rate= ff_gcd(sc->time_rate, sample_duration);
+ sc->time_rate= av_gcd(sc->time_rate, sample_duration);
dprintf(c->fc, "sample_count=%d, sample_duration=%d\n",sample_count,sample_duration);
return 0;
}
-static int mov_read_ctts(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_ctts(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
MOVStreamContext *sc = st->priv_data;
get_byte(pb); /* version */
get_be24(pb); /* flags */
entries = get_be32(pb);
- if(entries >= UINT_MAX / sizeof(MOV_stts_t))
- return -1;
- sc->ctts_count = entries;
- sc->ctts_data = av_malloc(entries * sizeof(MOV_stts_t));
- if (!sc->ctts_data)
- return -1;
dprintf(c->fc, "track[%i].ctts.entries = %i\n", c->fc->nb_streams-1, entries);
+ if(entries >= UINT_MAX / sizeof(*sc->ctts_data))
+ return -1;
+ sc->ctts_data = av_malloc(entries * sizeof(*sc->ctts_data));
+ if (!sc->ctts_data)
+ return AVERROR(ENOMEM);
+ sc->ctts_count = entries;
+
for(i=0; i<entries; i++) {
int count =get_be32(pb);
int duration =get_be32(pb);
if (duration < 0) {
- av_log(c->fc, AV_LOG_ERROR, "negative ctts, ignoring\n");
- sc->ctts_count = 0;
- url_fskip(pb, 8 * (entries - i - 1));
- break;
+ sc->wrong_dts = 1;
+ st->codec->has_b_frames = 1;
}
sc->ctts_data[i].count = count;
sc->ctts_data[i].duration= duration;
- sc->time_rate= ff_gcd(sc->time_rate, duration);
+ sc->time_rate= av_gcd(sc->time_rate, FFABS(duration));
}
return 0;
}
static void mov_build_index(MOVContext *mov, AVStream *st)
{
MOVStreamContext *sc = st->priv_data;
- offset_t current_offset;
+ int64_t current_offset;
int64_t current_dts = 0;
unsigned int stts_index = 0;
unsigned int stsc_index = 0;
unsigned int stss_index = 0;
unsigned int i, j;
+ /* adjust first dts according to edit list */
+ if (sc->time_offset) {
+ assert(sc->time_offset % sc->time_rate == 0);
+ current_dts = - (sc->time_offset / sc->time_rate);
+ }
+
/* only use old uncompressed audio chunk demuxing when stts specifies it */
if (!(st->codec->codec_type == CODEC_TYPE_AUDIO &&
sc->stts_count == 1 && sc->stts_data[0].duration == 1)) {
st->nb_frames = sc->sample_count;
for (i = 0; i < sc->chunk_count; i++) {
current_offset = sc->chunk_offsets[i];
- if (stsc_index + 1 < sc->sample_to_chunk_sz &&
- i + 1 == sc->sample_to_chunk[stsc_index + 1].first)
+ if (stsc_index + 1 < sc->stsc_count &&
+ i + 1 == sc->stsc_data[stsc_index + 1].first)
stsc_index++;
- for (j = 0; j < sc->sample_to_chunk[stsc_index].count; j++) {
+ for (j = 0; j < sc->stsc_data[stsc_index].count; j++) {
if (current_sample >= sc->sample_count) {
av_log(mov->fc, AV_LOG_ERROR, "wrong sample count\n");
goto out;
stss_index++;
}
sample_size = sc->sample_size > 0 ? sc->sample_size : sc->sample_sizes[current_sample];
- dprintf(mov->fc, "AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", "
- "size %d, distance %d, keyframe %d\n", st->index, current_sample,
- current_offset, current_dts, sample_size, distance, keyframe);
- if(sc->sample_to_chunk[stsc_index].id - 1 == sc->pseudo_stream_id)
+ if(sc->pseudo_stream_id == -1 ||
+ sc->stsc_data[stsc_index].id - 1 == sc->pseudo_stream_id) {
av_add_index_entry(st, current_offset, current_dts, sample_size, distance,
keyframe ? AVINDEX_KEYFRAME : 0);
+ dprintf(mov->fc, "AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", "
+ "size %d, distance %d, keyframe %d\n", st->index, current_sample,
+ current_offset, current_dts, sample_size, distance, keyframe);
+ }
current_offset += sample_size;
assert(sc->stts_data[stts_index].duration % sc->time_rate == 0);
current_dts += sc->stts_data[stts_index].duration / sc->time_rate;
unsigned int frames = 1;
for (i = 0; i < sc->chunk_count; i++) {
current_offset = sc->chunk_offsets[i];
- if (stsc_index + 1 < sc->sample_to_chunk_sz &&
- i + 1 == sc->sample_to_chunk[stsc_index + 1].first)
+ if (stsc_index + 1 < sc->stsc_count &&
+ i + 1 == sc->stsc_data[stsc_index + 1].first)
stsc_index++;
- chunk_samples = sc->sample_to_chunk[stsc_index].count;
+ chunk_samples = sc->stsc_data[stsc_index].count;
/* get chunk size, beware of alaw/ulaw/mace */
if (sc->samples_per_frame > 0 &&
(chunk_samples * sc->bytes_per_frame % sc->samples_per_frame == 0)) {
sc->sample_count = st->nb_index_entries;
}
-static int mov_read_trak(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_trak(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
AVStream *st;
MOVStreamContext *sc;
st->priv_data = sc;
st->codec->codec_type = CODEC_TYPE_DATA;
- st->start_time = 0; /* XXX: check */
+ sc->ffindex = st->index;
if ((ret = mov_read_default(c, pb, atom)) < 0)
return ret;
/* sanity checks */
- if(sc->chunk_count && (!sc->stts_count || !sc->sample_to_chunk_sz ||
+ if(sc->chunk_count && (!sc->stts_count || !sc->stsc_count ||
(!sc->sample_size && !sc->sample_count))){
av_log(c->fc, AV_LOG_ERROR, "stream %d, missing mandatory atoms, broken header\n",
st->index);
av_set_pts_info(st, 64, sc->time_rate, sc->time_scale);
if (st->codec->codec_type == CODEC_TYPE_AUDIO &&
- !st->codec->frame_size && sc->stts_count == 1)
- st->codec->frame_size = av_rescale(sc->time_rate, st->codec->sample_rate, sc->time_scale);
+ !st->codec->frame_size && sc->stts_count == 1) {
+ st->codec->frame_size = av_rescale(sc->stts_data[0].duration,
+ st->codec->sample_rate, sc->time_scale);
+ dprintf(c->fc, "frame size %d\n", st->codec->frame_size);
+ }
if(st->duration != AV_NOPTS_VALUE){
assert(st->duration % sc->time_rate == 0);
st->duration /= sc->time_rate;
}
- sc->ffindex = st->index;
+
mov_build_index(c, st);
if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) {
sc->pb = c->fc->pb;
switch (st->codec->codec_id) {
-#ifdef CONFIG_H261_DECODER
+#if CONFIG_H261_DECODER
case CODEC_ID_H261:
#endif
-#ifdef CONFIG_H263_DECODER
+#if CONFIG_H263_DECODER
case CODEC_ID_H263:
#endif
-#ifdef CONFIG_MPEG4_DECODER
+#if CONFIG_MPEG4_DECODER
case CODEC_ID_MPEG4:
#endif
st->codec->width= 0; /* let decoder init width/height */
st->codec->height= 0;
break;
-#ifdef CONFIG_VORBIS_DECODER
- case CODEC_ID_VORBIS:
-#endif
- st->codec->sample_rate= 0; /* let decoder init parameters properly */
- break;
}
/* Do not need those anymore. */
av_freep(&sc->chunk_offsets);
- av_freep(&sc->sample_to_chunk);
+ av_freep(&sc->stsc_data);
av_freep(&sc->sample_sizes);
av_freep(&sc->keyframes);
av_freep(&sc->stts_data);
return 0;
}
-static void mov_parse_udta_string(ByteIOContext *pb, char *str, int size)
+static int mov_read_ilst(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
- uint16_t str_size = get_be16(pb); /* string length */;
-
- get_be16(pb); /* skip language */
- get_buffer(pb, str, FFMIN(size, str_size));
+ int ret;
+ c->itunes_metadata = 1;
+ ret = mov_read_default(c, pb, atom);
+ c->itunes_metadata = 0;
+ return ret;
}
-static int mov_read_udta(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_meta(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
- uint64_t end = url_ftell(pb) + atom.size;
-
- while (url_ftell(pb) + 8 < end) {
- uint32_t tag_size = get_be32(pb);
- uint32_t tag = get_le32(pb);
- uint64_t next = url_ftell(pb) + tag_size - 8;
-
- if (next > end) // stop if tag_size is wrong
- break;
+ url_fskip(pb, 4); // version + flags
+ atom.size -= 4;
+ return mov_read_default(c, pb, atom);
+}
- switch (tag) {
- case MKTAG(0xa9,'n','a','m'):
- mov_parse_udta_string(pb, c->fc->title, sizeof(c->fc->title));
- break;
- case MKTAG(0xa9,'w','r','t'):
- mov_parse_udta_string(pb, c->fc->author, sizeof(c->fc->author));
- break;
- case MKTAG(0xa9,'c','p','y'):
- mov_parse_udta_string(pb, c->fc->copyright, sizeof(c->fc->copyright));
- break;
- case MKTAG(0xa9,'i','n','f'):
- mov_parse_udta_string(pb, c->fc->comment, sizeof(c->fc->comment));
- break;
- default:
- break;
- }
+static int mov_read_trkn(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
+{
+ get_be32(pb); // type
+ get_be32(pb); // unknown
+ c->fc->track = get_be32(pb);
+ dprintf(c->fc, "%.4s %d\n", (char*)&atom.type, c->fc->track);
+ return 0;
+}
- url_fseek(pb, next, SEEK_SET);
+static int mov_read_udta_string(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
+{
+ char str[1024], key2[16], language[4] = {0};
+ const char *key = NULL;
+ uint16_t str_size;
+
+ if (c->itunes_metadata) {
+ int data_size = get_be32(pb);
+ int tag = get_le32(pb);
+ if (tag == MKTAG('d','a','t','a')) {
+ get_be32(pb); // type
+ get_be32(pb); // unknown
+ str_size = data_size - 16;
+ atom.size -= 16;
+ } else return 0;
+ } else {
+ str_size = get_be16(pb); // string length
+ ff_mov_lang_to_iso639(get_be16(pb), language);
+ atom.size -= 4;
+ }
+ switch (atom.type) {
+ case MKTAG(0xa9,'n','a','m'): key = "title"; break;
+ case MKTAG(0xa9,'a','u','t'):
+ case MKTAG(0xa9,'A','R','T'):
+ case MKTAG(0xa9,'w','r','t'): key = "author"; break;
+ case MKTAG(0xa9,'c','p','y'): key = "copyright"; break;
+ case MKTAG(0xa9,'c','m','t'):
+ case MKTAG(0xa9,'i','n','f'): key = "comment"; break;
+ case MKTAG(0xa9,'a','l','b'): key = "album"; break;
+ case MKTAG(0xa9,'d','a','y'): key = "year"; break;
+ case MKTAG(0xa9,'g','e','n'): key = "genre"; break;
+ case MKTAG(0xa9,'t','o','o'):
+ case MKTAG(0xa9,'e','n','c'): key = "muxer"; break;
}
+ if (!key)
+ return 0;
+ if (atom.size < 0)
+ return -1;
+ str_size = FFMIN3(sizeof(str)-1, str_size, atom.size);
+ get_buffer(pb, str, str_size);
+ str[str_size] = 0;
+ av_metadata_set(&c->fc->metadata, key, str);
+ if (*language && strcmp(language, "und")) {
+ snprintf(key2, sizeof(key2), "%s-%s", key, language);
+ av_metadata_set(&c->fc->metadata, key2, str);
+ }
+ dprintf(c->fc, "%.4s %s %d %lld\n", (char*)&atom.type, str, str_size, atom.size);
return 0;
}
-static int mov_read_tkhd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_tkhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
+ int i;
+ int width;
+ int height;
+ int64_t disp_transform[2];
+ int display_matrix[3][2];
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = st->priv_data;
int version = get_byte(pb);
get_be24(pb); /* flags */
}
st->id = (int)get_be32(pb); /* track id (NOT 0 !)*/
get_be32(pb); /* reserved */
- st->start_time = 0; /* check */
+
/* highlevel (considering edits) duration in movie timebase */
(version == 1) ? get_be64(pb) : get_be32(pb);
get_be32(pb); /* reserved */
get_be16(pb); /* volume */
get_be16(pb); /* reserved */
- url_fskip(pb, 36); /* display matrix */
-
- /* those are fixed-point */
- get_be32(pb); /* track width */
- get_be32(pb); /* track height */
+ //read in the display matrix (outlined in ISO 14496-12, Section 6.2.2)
+ // they're kept in fixed point format through all calculations
+ // ignore u,v,z b/c we don't need the scale factor to calc aspect ratio
+ for (i = 0; i < 3; i++) {
+ display_matrix[i][0] = get_be32(pb); // 16.16 fixed point
+ display_matrix[i][1] = get_be32(pb); // 16.16 fixed point
+ get_be32(pb); // 2.30 fixed point (not used)
+ }
+ width = get_be32(pb); // 16.16 fixed point track width
+ height = get_be32(pb); // 16.16 fixed point track height
+ sc->width = width >> 16;
+ sc->height = height >> 16;
+
+ //transform the display width/height according to the matrix
+ // skip this if the display matrix is the default identity matrix
+ // to keep the same scale, use [width height 1<<16]
+ if (width && height &&
+ (display_matrix[0][0] != 65536 || display_matrix[0][1] ||
+ display_matrix[1][0] || display_matrix[1][1] != 65536 ||
+ display_matrix[2][0] || display_matrix[2][1])) {
+ for (i = 0; i < 2; i++)
+ disp_transform[i] =
+ (int64_t) width * display_matrix[0][i] +
+ (int64_t) height * display_matrix[1][i] +
+ ((int64_t) display_matrix[2][i] << 16);
+
+ //sample aspect ratio is new width/height divided by old width/height
+ st->sample_aspect_ratio = av_d2q(
+ ((double) disp_transform[0] * height) /
+ ((double) disp_transform[1] * width), INT_MAX);
+ }
return 0;
}
-static int mov_read_tfhd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_tfhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
MOVFragment *frag = &c->fragment;
MOVTrackExt *trex = NULL;
return 0;
}
-static int mov_read_trex(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_trex(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
MOVTrackExt *trex;
if ((uint64_t)c->trex_count+1 >= UINT_MAX / sizeof(*c->trex_data))
return -1;
- c->trex_data = av_realloc(c->trex_data, (c->trex_count+1)*sizeof(*c->trex_data));
- if (!c->trex_data)
+ trex = av_realloc(c->trex_data, (c->trex_count+1)*sizeof(*c->trex_data));
+ if (!trex)
return AVERROR(ENOMEM);
+ c->trex_data = trex;
trex = &c->trex_data[c->trex_count++];
get_byte(pb); /* version */
get_be24(pb); /* flags */
return 0;
}
-static int mov_read_trun(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_trun(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
MOVFragment *frag = &c->fragment;
AVStream *st;
if (flags & 0x001) data_offset = get_be32(pb);
if (flags & 0x004) first_sample_flags = get_be32(pb);
if (flags & 0x800) {
+ MOVStts *ctts_data;
if ((uint64_t)entries+sc->ctts_count >= UINT_MAX/sizeof(*sc->ctts_data))
return -1;
- sc->ctts_data = av_realloc(sc->ctts_data,
- (entries+sc->ctts_count)*sizeof(*sc->ctts_data));
- if (!sc->ctts_data)
+ ctts_data = av_realloc(sc->ctts_data,
+ (entries+sc->ctts_count)*sizeof(*sc->ctts_data));
+ if (!ctts_data)
return AVERROR(ENOMEM);
+ sc->ctts_data = ctts_data;
}
dts = st->duration;
offset = frag->base_data_offset + data_offset;
/* this atom should be null (from specs), but some buggy files put the 'moov' atom inside it... */
/* like the files created with Adobe Premiere 5.0, for samples see */
/* http://graphics.tudelft.nl/~wouter/publications/soundtests/ */
-static int mov_read_wide(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_wide(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
int err;
return err;
}
-static int mov_read_cmov(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_cmov(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
-#ifdef CONFIG_ZLIB
+#if CONFIG_ZLIB
ByteIOContext ctx;
uint8_t *cmov_data;
uint8_t *moov_data; /* uncompressed data */
long cmov_len, moov_len;
- int ret;
+ int ret = -1;
get_be32(pb); /* dcom atom */
if (get_le32(pb) != MKTAG('d','c','o','m'))
cmov_data = av_malloc(cmov_len);
if (!cmov_data)
- return -1;
+ return AVERROR(ENOMEM);
moov_data = av_malloc(moov_len);
if (!moov_data) {
av_free(cmov_data);
- return -1;
+ return AVERROR(ENOMEM);
}
get_buffer(pb, cmov_data, cmov_len);
if(uncompress (moov_data, (uLongf *) &moov_len, (const Bytef *)cmov_data, cmov_len) != Z_OK)
- return -1;
+ goto free_and_return;
if(init_put_byte(&ctx, moov_data, moov_len, 0, NULL, NULL, NULL, NULL) != 0)
- return -1;
+ goto free_and_return;
atom.type = MKTAG('m','o','o','v');
atom.offset = 0;
atom.size = moov_len;
// { int fd = open("/tmp/uncompheader.mov", O_WRONLY | O_CREAT); write(fd, moov_data, moov_len); close(fd); }
#endif
ret = mov_read_default(c, &ctx, atom);
+free_and_return:
av_free(moov_data);
av_free(cmov_data);
return ret;
}
/* edit list atom */
-static int mov_read_elst(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+static int mov_read_elst(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
{
MOVStreamContext *sc = c->fc->streams[c->fc->nb_streams-1]->priv_data;
int i, edit_count;
get_byte(pb); /* version */
get_be24(pb); /* flags */
- edit_count= sc->edit_count = get_be32(pb); /* entries */
+ edit_count = get_be32(pb); /* entries */
for(i=0; i<edit_count; i++){
int time;
get_be32(pb); /* Track duration */
time = get_be32(pb); /* Media time */
get_be32(pb); /* Media rate */
- if (time != 0)
- av_log(c->fc, AV_LOG_WARNING, "edit list not starting at 0, "
- "a/v desync might occur, patch welcome\n");
+ if (i == 0 && time != -1) {
+ sc->time_offset = time;
+ sc->time_rate = av_gcd(sc->time_rate, time);
+ }
}
- dprintf(c->fc, "track[%i].edit_count = %i\n", c->fc->nb_streams-1, sc->edit_count);
+
+ if(edit_count > 1)
+ av_log(c->fc, AV_LOG_WARNING, "multiple edit list entries, "
+ "a/v desync might occur, patch welcome\n");
+
+ dprintf(c->fc, "track[%i].edit_count = %i\n", c->fc->nb_streams-1, edit_count);
return 0;
}
static const MOVParseTableEntry mov_default_parse_table[] = {
+{ MKTAG('a','v','s','s'), mov_read_extradata },
{ MKTAG('c','o','6','4'), mov_read_stco },
{ MKTAG('c','t','t','s'), mov_read_ctts }, /* composition time to sample */
{ MKTAG('d','i','n','f'), mov_read_default },
{ MKTAG('f','t','y','p'), mov_read_ftyp },
{ MKTAG('g','l','b','l'), mov_read_glbl },
{ MKTAG('h','d','l','r'), mov_read_hdlr },
+{ MKTAG('i','l','s','t'), mov_read_ilst },
{ MKTAG('j','p','2','h'), mov_read_extradata },
{ MKTAG('m','d','a','t'), mov_read_mdat },
{ MKTAG('m','d','h','d'), mov_read_mdhd },
{ MKTAG('m','d','i','a'), mov_read_default },
+{ MKTAG('m','e','t','a'), mov_read_meta },
{ MKTAG('m','i','n','f'), mov_read_default },
{ MKTAG('m','o','o','f'), mov_read_moof },
{ MKTAG('m','o','o','v'), mov_read_moov },
{ MKTAG('S','M','I',' '), mov_read_smi }, /* Sorenson extension ??? */
{ MKTAG('a','l','a','c'), mov_read_extradata }, /* alac specific atom */
{ MKTAG('a','v','c','C'), mov_read_glbl },
+{ MKTAG('p','a','s','p'), mov_read_pasp },
{ MKTAG('s','t','b','l'), mov_read_default },
{ MKTAG('s','t','c','o'), mov_read_stco },
{ MKTAG('s','t','s','c'), mov_read_stsc },
{ MKTAG('t','r','a','k'), mov_read_trak },
{ MKTAG('t','r','a','f'), mov_read_default },
{ MKTAG('t','r','e','x'), mov_read_trex },
+{ MKTAG('t','r','k','n'), mov_read_trkn },
{ MKTAG('t','r','u','n'), mov_read_trun },
-{ MKTAG('u','d','t','a'), mov_read_udta },
+{ MKTAG('u','d','t','a'), mov_read_default },
{ MKTAG('w','a','v','e'), mov_read_wave },
{ MKTAG('e','s','d','s'), mov_read_esds },
{ MKTAG('w','i','d','e'), mov_read_wide }, /* place holder */
{ MKTAG('c','m','o','v'), mov_read_cmov },
+{ MKTAG(0xa9,'n','a','m'), mov_read_udta_string },
+{ MKTAG(0xa9,'w','r','t'), mov_read_udta_string },
+{ MKTAG(0xa9,'c','p','y'), mov_read_udta_string },
+{ MKTAG(0xa9,'i','n','f'), mov_read_udta_string },
+{ MKTAG(0xa9,'i','n','f'), mov_read_udta_string },
+{ MKTAG(0xa9,'A','R','T'), mov_read_udta_string },
+{ MKTAG(0xa9,'a','l','b'), mov_read_udta_string },
+{ MKTAG(0xa9,'c','m','t'), mov_read_udta_string },
+{ MKTAG(0xa9,'a','u','t'), mov_read_udta_string },
+{ MKTAG(0xa9,'d','a','y'), mov_read_udta_string },
+{ MKTAG(0xa9,'g','e','n'), mov_read_udta_string },
+{ MKTAG(0xa9,'e','n','c'), mov_read_udta_string },
+{ MKTAG(0xa9,'t','o','o'), mov_read_udta_string },
{ 0, NULL }
};
case MKTAG('m','d','a','t'):
case MKTAG('p','n','o','t'): /* detect movs with preview pics like ew.mov and april.mov */
case MKTAG('u','d','t','a'): /* Packet Video PVAuthor adds this and a lot of more junk */
+ case MKTAG('f','t','y','p'):
return AVPROBE_SCORE_MAX;
/* those are more common words, so rate then a bit less */
case MKTAG('e','d','i','w'): /* xdcam files have reverted first tags */
case MKTAG('j','u','n','k'):
case MKTAG('p','i','c','t'):
return AVPROBE_SCORE_MAX - 5;
- case MKTAG(0x82,0x82,0x7f,0x7d ):
- case MKTAG('f','t','y','p'):
+ case MKTAG(0x82,0x82,0x7f,0x7d):
case MKTAG('s','k','i','p'):
case MKTAG('u','u','i','d'):
case MKTAG('p','r','f','l'):
MOVContext *mov = s->priv_data;
ByteIOContext *pb = s->pb;
int err;
- MOV_atom_t atom = { 0, 0, 0 };
+ MOVAtom atom = { 0, 0, 0 };
mov->fc = s;
/* .mov and .mp4 aren't streamable anyway (only progressive download if moov is before mdat) */
MOVStreamContext *sc = 0;
AVIndexEntry *sample = 0;
int64_t best_dts = INT64_MAX;
- int i;
+ int i, ret;
retry:
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (!sample) {
mov->found_mdat = 0;
if (!url_is_streamed(s->pb) ||
- mov_read_default(mov, s->pb, (MOV_atom_t){ 0, 0, INT64_MAX }) < 0 ||
+ mov_read_default(mov, s->pb, (MOVAtom){ 0, 0, INT64_MAX }) < 0 ||
url_feof(s->pb))
return -1;
dprintf(s, "read fragments, offset 0x%llx\n", url_ftell(s->pb));
sc->ffindex, sample->pos);
return -1;
}
- av_get_packet(sc->pb, pkt, sample->size);
-#ifdef CONFIG_DV_DEMUXER
+ ret = av_get_packet(sc->pb, pkt, sample->size);
+ if (ret < 0)
+ return ret;
+#if CONFIG_DV_DEMUXER
if (mov->dv_demux && sc->dv_audio_container) {
dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size);
av_free(pkt->data);
pkt->stream_index = sc->ffindex;
pkt->dts = sample->timestamp;
if (sc->ctts_data) {
- assert(sc->ctts_data[sc->sample_to_ctime_index].duration % sc->time_rate == 0);
- pkt->pts = pkt->dts + sc->ctts_data[sc->sample_to_ctime_index].duration / sc->time_rate;
+ assert(sc->ctts_data[sc->ctts_index].duration % sc->time_rate == 0);
+ pkt->pts = pkt->dts + sc->ctts_data[sc->ctts_index].duration / sc->time_rate;
/* update ctts context */
- sc->sample_to_ctime_sample++;
- if (sc->sample_to_ctime_index < sc->ctts_count &&
- sc->ctts_data[sc->sample_to_ctime_index].count == sc->sample_to_ctime_sample) {
- sc->sample_to_ctime_index++;
- sc->sample_to_ctime_sample = 0;
+ sc->ctts_sample++;
+ if (sc->ctts_index < sc->ctts_count &&
+ sc->ctts_data[sc->ctts_index].count == sc->ctts_sample) {
+ sc->ctts_index++;
+ sc->ctts_sample = 0;
}
+ if (sc->wrong_dts)
+ pkt->dts = AV_NOPTS_VALUE;
} else {
+ AVStream *st = s->streams[sc->ffindex];
+ int64_t next_dts = (sc->current_sample < sc->sample_count) ?
+ st->index_entries[sc->current_sample].timestamp : st->duration;
+ pkt->duration = next_dts - pkt->dts;
pkt->pts = pkt->dts;
}
pkt->flags |= sample->flags & AVINDEX_KEYFRAME ? PKT_FLAG_KEY : 0;
for (i = 0; i < sc->ctts_count; i++) {
int next = time_sample + sc->ctts_data[i].count;
if (next > sc->current_sample) {
- sc->sample_to_ctime_index = i;
- sc->sample_to_ctime_sample = sc->current_sample - time_sample;
+ sc->ctts_index = i;
+ sc->ctts_sample = sc->current_sample - time_sample;
break;
}
time_sample = next;
if (stream_index >= s->nb_streams)
return -1;
+ if (sample_time < 0)
+ sample_time = 0;
st = s->streams[stream_index];
sample = mov_seek_stream(st, sample_time, flags);
AVInputFormat mov_demuxer = {
"mov,mp4,m4a,3gp,3g2,mj2",
- "QuickTime/MPEG4/Motion JPEG 2000 format",
+ NULL_IF_CONFIG_SMALL("QuickTime/MPEG-4/Motion JPEG 2000 format"),
sizeof(MOVContext),
mov_probe,
mov_read_header,