* H.264 encoding using the x264 library
* Copyright (C) 2005 Mans Rullgard <mans@mansr.com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
char *preset;
char *tune;
char *profile;
+ char *level;
int fastfirstpass;
+ char *wpredp;
+ char *x264opts;
float crf;
float crf_max;
int cqp;
int direct_pred;
int slice_max_size;
char *stats;
+ int nal_hrd
} X264Context;
static void X264_log(void *p, int level, const char *fmt, va_list args)
for (i = 0; i < nnal; i++)
size += nals[i].i_payload;
- if ((ret = ff_alloc_packet(pkt, size)) < 0)
+ if ((ret = ff_alloc_packet2(ctx, pkt, size)) < 0)
return ret;
p = pkt->data;
/* Write the SEI as part of the first frame. */
if (x4->sei_size > 0 && nnal > 0) {
+ if (x4->sei_size > size) {
+ av_log(ctx, AV_LOG_ERROR, "Error: nal buffer is too small\n");
+ return -1;
+ }
memcpy(p, x4->sei, x4->sei_size);
p += x4->sei_size;
x4->sei_size = 0;
+ av_freep(&x4->sei);
}
for (i = 0; i < nnal; i++){
return 1;
}
+static int avfmt2_num_planes(int avfmt)
+{
+ switch (avfmt) {
+ case PIX_FMT_YUV420P:
+ case PIX_FMT_YUVJ420P:
+ case PIX_FMT_YUV420P9:
+ case PIX_FMT_YUV420P10:
+ case PIX_FMT_YUV444P:
+ return 3;
+
+ case PIX_FMT_BGR24:
+ case PIX_FMT_RGB24:
+ return 1;
+
+ default:
+ return 3;
+ }
+}
+
static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
int *got_packet)
{
x4->pic.img.i_csp = x4->params.i_csp;
if (x264_bit_depth > 8)
x4->pic.img.i_csp |= X264_CSP_HIGH_DEPTH;
- x4->pic.img.i_plane = 3;
+ x4->pic.img.i_plane = avfmt2_num_planes(ctx->pix_fmt);
if (frame) {
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < x4->pic.img.i_plane; i++) {
x4->pic.img.plane[i] = frame->data[i];
x4->pic.img.i_stride[i] = frame->linesize[i];
}
return 0;
}
+#define OPT_STR(opt, param) \
+ do { \
+ int ret; \
+ if (param && (ret = x264_param_parse(&x4->params, opt, param)) < 0) { \
+ if(ret == X264_PARAM_BAD_NAME) \
+ av_log(avctx, AV_LOG_ERROR, \
+ "bad option '%s': '%s'\n", opt, param); \
+ else \
+ av_log(avctx, AV_LOG_ERROR, \
+ "bad value for '%s': '%s'\n", opt, param); \
+ return -1; \
+ } \
+ } while (0)
+
static int convert_pix_fmt(enum PixelFormat pix_fmt)
{
switch (pix_fmt) {
case PIX_FMT_YUV444P:
case PIX_FMT_YUV444P9:
case PIX_FMT_YUV444P10: return X264_CSP_I444;
+#ifdef X264_CSP_BGR
+ case PIX_FMT_BGR24:
+ return X264_CSP_BGR;
+
+ case PIX_FMT_RGB24:
+ return X264_CSP_RGB;
+#endif
};
return 0;
}
static av_cold int X264_init(AVCodecContext *avctx)
{
X264Context *x4 = avctx->priv_data;
+ int sw,sh;
x264_param_default(&x4->params);
x4->params.b_deblocking_filter = avctx->flags & CODEC_FLAG_LOOP_FILTER;
+ x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
+ x4->params.rc.f_pb_factor = avctx->b_quant_factor;
+ x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset;
if (x4->preset || x4->tune)
if (x264_param_default_preset(&x4->params, x4->preset, x4->tune) < 0) {
+ int i;
av_log(avctx, AV_LOG_ERROR, "Error setting preset/tune %s/%s.\n", x4->preset, x4->tune);
+ av_log(avctx, AV_LOG_INFO, "Possible presets:");
+ for (i = 0; x264_preset_names[i]; i++)
+ av_log(avctx, AV_LOG_INFO, " %s", x264_preset_names[i]);
+ av_log(avctx, AV_LOG_INFO, "\n");
+ av_log(avctx, AV_LOG_INFO, "Possible tunes:");
+ for (i = 0; x264_tune_names[i]; i++)
+ av_log(avctx, AV_LOG_INFO, " %s", x264_tune_names[i]);
+ av_log(avctx, AV_LOG_INFO, "\n");
return AVERROR(EINVAL);
}
x4->params.i_log_level = X264_LOG_DEBUG;
x4->params.i_csp = convert_pix_fmt(avctx->pix_fmt);
+ OPT_STR("weightp", x4->wpredp);
+
if (avctx->bit_rate) {
x4->params.rc.i_bitrate = avctx->bit_rate / 1000;
x4->params.rc.i_rc_method = X264_RC_ABR;
(float)avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size;
}
- x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
- x4->params.rc.f_pb_factor = avctx->b_quant_factor;
- x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset;
+ OPT_STR("level", x4->level);
+
+ if(x4->x264opts){
+ const char *p= x4->x264opts;
+ while(p){
+ char param[256]={0}, val[256]={0};
+ if(sscanf(p, "%255[^:=]=%255[^:]", param, val) == 1){
+ OPT_STR(param, "1");
+ }else
+ OPT_STR(param, val);
+ p= strchr(p, ':');
+ p+=!!p;
+ }
+ }
if (avctx->me_method == ME_EPZS)
x4->params.analyse.i_me_method = X264_ME_DIA;
if (x4->slice_max_size >= 0)
x4->params.i_slice_max_size = x4->slice_max_size;
+ else {
+ /*
+ * Allow x264 to be instructed through AVCodecContext about the maximum
+ * size of the RTP payload. For example, this enables the production of
+ * payload suitable for the H.264 RTP packetization-mode 0 i.e. single
+ * NAL unit per RTP packet.
+ */
+ if (avctx->rtp_payload_size)
+ x4->params.i_slice_max_size = avctx->rtp_payload_size;
+ }
if (x4->fastfirstpass)
x264_param_apply_fastfirstpass(&x4->params);
+ /* Allow specifying the x264 profile through AVCodecContext. */
+ if (!x4->profile)
+ switch (avctx->profile) {
+ case FF_PROFILE_H264_BASELINE:
+ x4->profile = av_strdup("baseline");
+ break;
+ case FF_PROFILE_H264_HIGH:
+ x4->profile = av_strdup("high");
+ break;
+ case FF_PROFILE_H264_HIGH_10:
+ x4->profile = av_strdup("high10");
+ break;
+ case FF_PROFILE_H264_HIGH_422:
+ x4->profile = av_strdup("high422");
+ break;
+ case FF_PROFILE_H264_HIGH_444:
+ x4->profile = av_strdup("high444");
+ break;
+ case FF_PROFILE_H264_MAIN:
+ x4->profile = av_strdup("main");
+ break;
+ default:
+ break;
+ }
++
+ if (x4->nal_hrd >= 0)
+ x4->params.i_nal_hrd = x4->nal_hrd;
+
if (x4->profile)
if (x264_param_apply_profile(&x4->params, x4->profile) < 0) {
+ int i;
av_log(avctx, AV_LOG_ERROR, "Error setting profile %s.\n", x4->profile);
+ av_log(avctx, AV_LOG_INFO, "Possible profiles:");
+ for (i = 0; x264_profile_names[i]; i++)
+ av_log(avctx, AV_LOG_INFO, " %s", x264_profile_names[i]);
+ av_log(avctx, AV_LOG_INFO, "\n");
return AVERROR(EINVAL);
}
x4->params.i_width = avctx->width;
x4->params.i_height = avctx->height;
- x4->params.vui.i_sar_width = avctx->sample_aspect_ratio.num;
- x4->params.vui.i_sar_height = avctx->sample_aspect_ratio.den;
+ av_reduce(&sw, &sh, avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 4096);
+ x4->params.vui.i_sar_width = sw;
+ x4->params.vui.i_sar_height = sh;
x4->params.i_fps_num = x4->params.i_timebase_den = avctx->time_base.den;
x4->params.i_fps_den = x4->params.i_timebase_num = avctx->time_base.num;
x4->params.b_interlaced = avctx->flags & CODEC_FLAG_INTERLACED_DCT;
- x4->params.b_open_gop = !(avctx->flags & CODEC_FLAG_CLOSED_GOP);
+// x4->params.b_open_gop = !(avctx->flags & CODEC_FLAG_CLOSED_GOP);
x4->params.i_slice_count = avctx->slices;
PIX_FMT_YUV444P10,
PIX_FMT_NONE
};
+static const enum PixelFormat pix_fmts_8bit_rgb[] = {
+#ifdef X264_CSP_BGR
+ PIX_FMT_BGR24,
+ PIX_FMT_RGB24,
+#endif
+ PIX_FMT_NONE
+};
static av_cold void X264_init_static(AVCodec *codec)
{
{ "tune", "Tune the encoding params (cf. x264 --fullhelp)", OFFSET(tune), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE},
{ "profile", "Set profile restrictions (cf. x264 --fullhelp) ", OFFSET(profile), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE},
{ "fastfirstpass", "Use fast settings when encoding first pass", OFFSET(fastfirstpass), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, VE},
+ {"level", "Specify level (as defined by Annex A)", OFFSET(level), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, VE},
+ {"passlogfile", "Filename for 2 pass stats", OFFSET(stats), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, VE},
+ {"wpredp", "Weighted prediction for P-frames", OFFSET(wpredp), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, VE},
+ {"x264opts", "x264 options", OFFSET(x264opts), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, VE},
{ "crf", "Select the quality for constant quality mode", OFFSET(crf), AV_OPT_TYPE_FLOAT, {.dbl = -1 }, -1, FLT_MAX, VE },
{ "crf_max", "In CRF mode, prevents VBV from lowering quality beyond this point.",OFFSET(crf_max), AV_OPT_TYPE_FLOAT, {.dbl = -1 }, -1, FLT_MAX, VE },
{ "qp", "Constant quantization parameter rate control method",OFFSET(cqp), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, VE },
{ "auto", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = X264_DIRECT_PRED_AUTO }, 0, 0, VE, "direct-pred" },
{ "slice-max-size","Limit the size of each slice in bytes", OFFSET(slice_max_size),AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, VE },
{ "stats", "Filename for 2 pass stats", OFFSET(stats), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
+ { "nal-hrd", "Signal HRD information (requires vbv-bufsize; "
+ "cbr not allowed in .mp4)", OFFSET(nal_hrd), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, VE, "nal-hrd" },
+ { "none", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = X264_NAL_HRD_NONE}, INT_MIN, INT_MAX, VE, "nal-hrd" },
+ { "vbr", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = X264_NAL_HRD_VBR}, INT_MIN, INT_MAX, VE, "nal-hrd" },
+ { "cbr", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = X264_NAL_HRD_CBR}, INT_MIN, INT_MAX, VE, "nal-hrd" },
{ NULL },
};
.version = LIBAVUTIL_VERSION_INT,
};
+static const AVClass rgbclass = {
+ .class_name = "libx264rgb",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
static const AVCodecDefault x264_defaults[] = {
{ "b", "0" },
{ "bf", "-1" },
+ { "flags2", "0" },
{ "g", "-1" },
{ "qmin", "-1" },
{ "qmax", "-1" },
{ "qdiff", "-1" },
{ "qblur", "-1" },
{ "qcomp", "-1" },
+// { "rc_lookahead", "-1" },
{ "refs", "-1" },
{ "sc_threshold", "-1" },
{ "trellis", "-1" },
.defaults = x264_defaults,
.init_static_data = X264_init_static,
};
+
+AVCodec ff_libx264rgb_encoder = {
+ .name = "libx264rgb",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_H264,
+ .priv_data_size = sizeof(X264Context),
+ .init = X264_init,
+ .encode2 = X264_frame,
+ .close = X264_close,
+ .capabilities = CODEC_CAP_DELAY,
+ .long_name = NULL_IF_CONFIG_SMALL("libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 RGB"),
+ .priv_class = &rgbclass,
+ .defaults = x264_defaults,
+ .pix_fmts = pix_fmts_8bit_rgb,
+};
* copyright (c) 2002 Francois Revol <revol@free.fr>
* copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
unsigned flags;
} MOVTrackExt;
+ typedef struct {
+ unsigned int count;
+ unsigned int index;
+ } MOVSbgp;
+
typedef struct MOVStreamContext {
AVIOContext *pb;
int ffindex; ///< AVStream index
unsigned *stps_data; ///< partial sync sample for mpeg-2 open gop
int ctts_index;
int ctts_sample;
- unsigned int sample_size;
+ unsigned int sample_size; ///< may contain value calculated from stsd or value from stsz atom
+ unsigned int alt_sample_size; ///< always contains sample size from stsz atom
unsigned int sample_count;
int *sample_sizes;
int keyframe_absent;
unsigned int keyframe_count;
int *keyframes;
int time_scale;
- int64_t time_offset; ///< time offset of the first edit list entry
+ int64_t empty_duration; ///< empty duration of the first edit list entry
+ int64_t start_time; ///< start time of the media
+ int64_t time_offset; ///< time offset of the edit list entries
int current_sample;
unsigned int bytes_per_frame;
unsigned int samples_per_frame;
unsigned drefs_count;
MOVDref *drefs;
int dref_id;
+ unsigned tref_type;
+ unsigned trefs_count;
+ uint32_t *trefs;
int wrong_dts; ///< dts are wrong due to huge ctts offset (iMovie files)
int width; ///< tkhd width
int height; ///< tkhd height
uint32_t palette[256];
int has_palette;
int64_t data_size;
+ uint32_t tmcd_flags; ///< tmcd track flags
int64_t track_end; ///< used for dts generation in fragmented movie files
+ int start_pad; ///< amount of samples to skip due to enc-dec delay
+ unsigned int rap_group_count;
+ MOVSbgp *rap_group;
} MOVStreamContext;
typedef struct MOVContext {
+ AVClass *avclass;
AVFormatContext *fc;
int time_scale;
int64_t duration; ///< duration of the longest track
unsigned trex_count;
int itunes_metadata; ///< metadata are itunes style
int chapter_track;
+ int use_absolute_path;
int64_t next_root_atom; ///< offset of the next root atom
} MOVContext;
enum AVCodecID ff_mov_get_lpcm_codec_id(int bps, int flags);
int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries);
+int ff_mov_read_chan(AVFormatContext *s, AVIOContext *pb, AVStream *st, int64_t size);
+void ff_mov_write_chan(AVIOContext *pb, int64_t channel_layout);
#endif /* AVFORMAT_ISOM_H */
/*
* Matroska file demuxer
- * Copyright (c) 2003-2008 The Libav Project
+ * Copyright (c) 2003-2008 The FFmpeg Project
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
uint64_t display_height;
uint64_t pixel_width;
uint64_t pixel_height;
- uint64_t fourcc;
+ EbmlBin color_space;
+ uint64_t stereo_mode;
} MatroskaTrackVideo;
typedef struct {
uint8_t *buf;
} MatroskaTrackAudio;
+typedef struct {
+ uint64_t uid;
+ uint64_t type;
+} MatroskaTrackPlane;
+
+typedef struct {
+ EbmlList combine_planes;
+} MatroskaTrackOperation;
+
typedef struct {
uint64_t num;
uint64_t uid;
uint64_t flag_forced;
MatroskaTrackVideo video;
MatroskaTrackAudio audio;
+ MatroskaTrackOperation operation;
EbmlList encodings;
AVStream *stream;
uint64_t time_scale;
double duration;
char *title;
+ EbmlBin date_utc;
EbmlList tracks;
EbmlList attachments;
EbmlList chapters;
{ MATROSKA_ID_TITLE, EBML_UTF8, 0, offsetof(MatroskaDemuxContext,title) },
{ MATROSKA_ID_WRITINGAPP, EBML_NONE },
{ MATROSKA_ID_MUXINGAPP, EBML_NONE },
- { MATROSKA_ID_DATEUTC, EBML_NONE },
+ { MATROSKA_ID_DATEUTC, EBML_BIN, 0, offsetof(MatroskaDemuxContext,date_utc) },
{ MATROSKA_ID_SEGMENTUID, EBML_NONE },
{ 0 }
};
{ MATROSKA_ID_VIDEODISPLAYHEIGHT, EBML_UINT, 0, offsetof(MatroskaTrackVideo,display_height) },
{ MATROSKA_ID_VIDEOPIXELWIDTH, EBML_UINT, 0, offsetof(MatroskaTrackVideo,pixel_width) },
{ MATROSKA_ID_VIDEOPIXELHEIGHT, EBML_UINT, 0, offsetof(MatroskaTrackVideo,pixel_height) },
- { MATROSKA_ID_VIDEOCOLORSPACE, EBML_UINT, 0, offsetof(MatroskaTrackVideo,fourcc) },
+ { MATROSKA_ID_VIDEOCOLORSPACE, EBML_BIN, 0, offsetof(MatroskaTrackVideo,color_space) },
+ { MATROSKA_ID_VIDEOSTEREOMODE, EBML_UINT, 0, offsetof(MatroskaTrackVideo,stereo_mode) },
{ MATROSKA_ID_VIDEOPIXELCROPB, EBML_NONE },
{ MATROSKA_ID_VIDEOPIXELCROPT, EBML_NONE },
{ MATROSKA_ID_VIDEOPIXELCROPL, EBML_NONE },
{ MATROSKA_ID_VIDEOPIXELCROPR, EBML_NONE },
{ MATROSKA_ID_VIDEODISPLAYUNIT, EBML_NONE },
{ MATROSKA_ID_VIDEOFLAGINTERLACED,EBML_NONE },
- { MATROSKA_ID_VIDEOSTEREOMODE, EBML_NONE },
{ MATROSKA_ID_VIDEOASPECTRATIO, EBML_NONE },
{ 0 }
};
{ 0 }
};
+static EbmlSyntax matroska_track_plane[] = {
+ { MATROSKA_ID_TRACKPLANEUID, EBML_UINT, 0, offsetof(MatroskaTrackPlane,uid) },
+ { MATROSKA_ID_TRACKPLANETYPE, EBML_UINT, 0, offsetof(MatroskaTrackPlane,type) },
+ { 0 }
+};
+
+static EbmlSyntax matroska_track_combine_planes[] = {
+ { MATROSKA_ID_TRACKPLANE, EBML_NEST, sizeof(MatroskaTrackPlane), offsetof(MatroskaTrackOperation,combine_planes), {.n=matroska_track_plane} },
+ { 0 }
+};
+
+static EbmlSyntax matroska_track_operation[] = {
+ { MATROSKA_ID_TRACKCOMBINEPLANES, EBML_NEST, 0, 0, {.n=matroska_track_combine_planes} },
+ { 0 }
+};
+
static EbmlSyntax matroska_track[] = {
{ MATROSKA_ID_TRACKNUMBER, EBML_UINT, 0, offsetof(MatroskaTrack,num) },
{ MATROSKA_ID_TRACKNAME, EBML_UTF8, 0, offsetof(MatroskaTrack,name) },
{ MATROSKA_ID_TRACKFLAGFORCED, EBML_UINT, 0, offsetof(MatroskaTrack,flag_forced), {.u=0} },
{ MATROSKA_ID_TRACKVIDEO, EBML_NEST, 0, offsetof(MatroskaTrack,video), {.n=matroska_track_video} },
{ MATROSKA_ID_TRACKAUDIO, EBML_NEST, 0, offsetof(MatroskaTrack,audio), {.n=matroska_track_audio} },
+ { MATROSKA_ID_TRACKOPERATION, EBML_NEST, 0, offsetof(MatroskaTrack,operation), {.n=matroska_track_operation} },
{ MATROSKA_ID_TRACKCONTENTENCODINGS,EBML_NEST, 0, 0, {.n=matroska_track_encodings} },
{ MATROSKA_ID_TRACKFLAGENABLED, EBML_NONE },
{ MATROSKA_ID_TRACKFLAGLACING, EBML_NONE },
static EbmlSyntax matroska_blockgroup[] = {
{ MATROSKA_ID_BLOCK, EBML_BIN, 0, offsetof(MatroskaBlock,bin) },
{ MATROSKA_ID_SIMPLEBLOCK, EBML_BIN, 0, offsetof(MatroskaBlock,bin) },
- { MATROSKA_ID_BLOCKDURATION, EBML_UINT, 0, offsetof(MatroskaBlock,duration), {.u=AV_NOPTS_VALUE} },
+ { MATROSKA_ID_BLOCKDURATION, EBML_UINT, 0, offsetof(MatroskaBlock,duration) },
{ MATROSKA_ID_BLOCKREFERENCE, EBML_UINT, 0, offsetof(MatroskaBlock,reference) },
{ 1, EBML_UINT, 0, offsetof(MatroskaBlock,non_simple), {.u=1} },
{ 0 }
static const char *const matroska_doctypes[] = { "matroska", "webm" };
+static int matroska_resync(MatroskaDemuxContext *matroska, int64_t last_pos)
+{
+ AVIOContext *pb = matroska->ctx->pb;
+ uint32_t id;
+ matroska->current_id = 0;
+ matroska->num_levels = 0;
+
+ // seek to next position to resync from
+ if (avio_seek(pb, last_pos + 1, SEEK_SET) < 0 || avio_tell(pb) <= last_pos)
+ goto eof;
+
+ id = avio_rb32(pb);
+
+ // try to find a toplevel element
+ while (!url_feof(pb)) {
+ if (id == MATROSKA_ID_INFO || id == MATROSKA_ID_TRACKS ||
+ id == MATROSKA_ID_CUES || id == MATROSKA_ID_TAGS ||
+ id == MATROSKA_ID_SEEKHEAD || id == MATROSKA_ID_ATTACHMENTS ||
+ id == MATROSKA_ID_CLUSTER || id == MATROSKA_ID_CHAPTERS)
+ {
+ matroska->current_id = id;
+ return 0;
+ }
+ id = (id << 8) | avio_r8(pb);
+ }
+eof:
+ matroska->done = 1;
+ return AVERROR_EOF;
+}
+
/*
* Return: Whether we reached the end of a level in the hierarchy or not.
*/
* use it safely here to catch EOS. */
if (!(total = avio_r8(pb))) {
/* we might encounter EOS here */
- if (!pb->eof_reached) {
+ if (!url_feof(pb)) {
int64_t pos = avio_tell(pb);
av_log(matroska->ctx, AV_LOG_ERROR,
"Read error at pos. %"PRIu64" (0x%"PRIx64")\n",
return ebml_parse_nest(matroska, syntax->def.n, data);
case EBML_PASS: return ebml_parse_id(matroska, syntax->def.n, id, data);
case EBML_STOP: return 1;
- default: return avio_skip(pb,length)<0 ? AVERROR(EIO) : 0;
+ default:
+ if(ffio_limit(pb, length) != length)
+ return AVERROR(EIO);
+ return avio_skip(pb,length)<0 ? AVERROR(EIO) : 0;
}
if (res == AVERROR_INVALIDDATA)
av_log(matroska->ctx, AV_LOG_ERROR, "Invalid element\n");
int result = 0;
int olen;
- if (pkt_size >= 10000000)
+ if (pkt_size >= 10000000U)
return AVERROR_INVALIDDATA;
switch (encodings[0].compression.algo) {
int header_size = encodings[0].compression.settings.size;
uint8_t *header = encodings[0].compression.settings.data;
+ if (header_size && !header) {
+ av_log(0, AV_LOG_ERROR, "Compression size but no data in headerstrip\n");
+ return -1;
+ }
+
if (!header_size)
return 0;
pkt_data = newpktdata;
zstream.avail_out = pkt_size - zstream.total_out;
zstream.next_out = pkt_data + zstream.total_out;
- result = inflate(&zstream, Z_NO_FLUSH);
+ if (pkt_data) {
+ result = inflate(&zstream, Z_NO_FLUSH);
+ } else
+ result = Z_MEM_ERROR;
} while (result==Z_OK && pkt_size<10000000);
pkt_size = zstream.total_out;
inflateEnd(&zstream);
pkt_data = newpktdata;
bzstream.avail_out = pkt_size - bzstream.total_out_lo32;
bzstream.next_out = pkt_data + bzstream.total_out_lo32;
- result = BZ2_bzDecompress(&bzstream);
+ if (pkt_data) {
+ result = BZ2_bzDecompress(&bzstream);
+ } else
+ result = BZ_MEM_ERROR;
} while (result==BZ_OK && pkt_size<10000000);
pkt_size = bzstream.total_out_lo32;
BZ2_bzDecompressEnd(&bzstream);
char *line, *layer, *ptr = pkt->data, *end = ptr+pkt->size;
for (; *ptr!=',' && ptr<end-1; ptr++);
if (*ptr == ',')
- layer = ++ptr;
+ ptr++;
+ layer = ptr;
for (; *ptr!=',' && ptr<end-1; ptr++);
if (*ptr == ',') {
int64_t end_pts = pkt->pts + display_duration;
static int matroska_merge_packets(AVPacket *out, AVPacket *in)
{
- void *newdata = av_realloc(out->data, out->size+in->size);
- if (!newdata)
- return AVERROR(ENOMEM);
- out->data = newdata;
- memcpy(out->data+out->size, in->data, in->size);
- out->size += in->size;
+ int ret = av_grow_packet(out, in->size);
+ if (ret < 0)
+ return ret;
+ memcpy(out->data + out->size - in->size, in->data, in->size);
av_destruct_packet(in);
av_free(in);
return 0;
int i;
for (i=0; i < list->nb_elem; i++) {
- const char *lang = strcmp(tags[i].lang, "und") ? tags[i].lang : NULL;
+ const char *lang= (tags[i].lang && strcmp(tags[i].lang, "und")) ? tags[i].lang : NULL;
if (!tags[i].name) {
av_log(s, AV_LOG_WARNING, "Skipping invalid tag with no TagName.\n");
continue;
}
- if (matroska_parse_seekhead_entry(matroska, i) < 0)
+ if (matroska_parse_seekhead_entry(matroska, i) < 0) {
+ // mark index as broken
+ matroska->cues_parsing_deferred = -1;
break;
+ }
}
}
-static void matroska_parse_cues(MatroskaDemuxContext *matroska) {
- EbmlList *seekhead_list = &matroska->seekhead;
- MatroskaSeekhead *seekhead = seekhead_list->elem;
+static void matroska_add_index_entries(MatroskaDemuxContext *matroska) {
EbmlList *index_list;
MatroskaIndex *index;
int index_scale = 1;
int i, j;
- for (i = 0; i < seekhead_list->nb_elem; i++)
- if (seekhead[i].id == MATROSKA_ID_CUES)
- break;
- assert(i <= seekhead_list->nb_elem);
-
- matroska_parse_seekhead_entry(matroska, i);
-
index_list = &matroska->index;
index = index_list->elem;
if (index_list->nb_elem
}
}
+static void matroska_parse_cues(MatroskaDemuxContext *matroska) {
+ EbmlList *seekhead_list = &matroska->seekhead;
+ MatroskaSeekhead *seekhead = seekhead_list->elem;
+ int i;
+
+ for (i = 0; i < seekhead_list->nb_elem; i++)
+ if (seekhead[i].id == MATROSKA_ID_CUES)
+ break;
+ assert(i <= seekhead_list->nb_elem);
+
+ if (matroska_parse_seekhead_entry(matroska, i) < 0)
+ matroska->cues_parsing_deferred = -1;
+ matroska_add_index_entries(matroska);
+}
+
static int matroska_aac_profile(char *codec_id)
{
static const char * const aac_profiles[] = { "MAIN", "LC", "SSR" };
return sri;
}
+static void matroska_metadata_creation_time(AVDictionary **metadata, int64_t date_utc)
+{
+ char buffer[32];
+ /* Convert to seconds and adjust by number of seconds between 2001-01-01 and Epoch */
+ time_t creation_time = date_utc / 1000000000 + 978307200;
+ struct tm *ptm = gmtime(&creation_time);
+ if (!ptm) return;
+ strftime(buffer, sizeof(buffer), "%Y-%m-%d %H:%M:%S", ptm);
+ av_dict_set(metadata, "creation_time", buffer, 0);
+}
+
static int matroska_read_header(AVFormatContext *s)
{
MatroskaDemuxContext *matroska = s->priv_data;
MatroskaChapter *chapters;
MatroskaTrack *tracks;
uint64_t max_start = 0;
+ int64_t pos;
Ebml ebml = { 0 };
AVStream *st;
- int i, j, res;
+ int i, j, k, res;
matroska->ctx = s;
/* First read the EBML header. */
if (ebml_parse(matroska, ebml_syntax, &ebml)
|| ebml.version > EBML_VERSION || ebml.max_size > sizeof(uint64_t)
- || ebml.id_length > sizeof(uint32_t) || ebml.doctype_version > 2) {
+ || ebml.id_length > sizeof(uint32_t) || ebml.doctype_version > 3 || !ebml.doctype) {
av_log(matroska->ctx, AV_LOG_ERROR,
"EBML header using unsupported features\n"
"(EBML version %"PRIu64", doctype %s, doc version %"PRIu64")\n",
ebml.version, ebml.doctype, ebml.doctype_version);
ebml_free(ebml_syntax, &ebml);
return AVERROR_PATCHWELCOME;
+ } else if (ebml.doctype_version == 3) {
+ av_log(matroska->ctx, AV_LOG_WARNING,
+ "EBML header using unsupported features\n"
+ "(EBML version %"PRIu64", doctype %s, doc version %"PRIu64")\n",
+ ebml.version, ebml.doctype, ebml.doctype_version);
}
for (i = 0; i < FF_ARRAY_ELEMS(matroska_doctypes); i++)
if (!strcmp(ebml.doctype, matroska_doctypes[i]))
ebml_free(ebml_syntax, &ebml);
/* The next thing is a segment. */
- if ((res = ebml_parse(matroska, matroska_segments, matroska)) < 0)
- return res;
+ pos = avio_tell(matroska->ctx->pb);
+ res = ebml_parse(matroska, matroska_segments, matroska);
+ // try resyncing until we find a EBML_STOP type element.
+ while (res != 1) {
+ res = matroska_resync(matroska, pos);
+ if (res < 0)
+ return res;
+ pos = avio_tell(matroska->ctx->pb);
+ res = ebml_parse(matroska, matroska_segment, matroska);
+ }
matroska_execute_seekhead(matroska);
if (!matroska->time_scale)
* 1000 / AV_TIME_BASE;
av_dict_set(&s->metadata, "title", matroska->title, 0);
+ if (matroska->date_utc.size == 8)
+ matroska_metadata_creation_time(&s->metadata, AV_RB64(matroska->date_utc.data));
+
tracks = matroska->tracks.elem;
for (i=0; i < matroska->tracks.nb_elem; i++) {
MatroskaTrack *track = &tracks[i];
enum AVCodecID codec_id = AV_CODEC_ID_NONE;
- EbmlList *encodings_list = &tracks->encodings;
+ EbmlList *encodings_list = &track->encodings;
MatroskaTrackEncoding *encodings = encodings_list->elem;
uint8_t *extradata = NULL;
int extradata_size = 0;
int extradata_offset = 0;
+ uint32_t fourcc = 0;
AVIOContext b;
/* Apply some sanity checks. */
track->video.display_width = track->video.pixel_width;
if (!track->video.display_height)
track->video.display_height = track->video.pixel_height;
+ if (track->video.color_space.size == 4)
+ fourcc = AV_RL32(track->video.color_space.data);
} else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) {
if (!track->audio.out_samplerate)
track->audio.out_samplerate = track->audio.samplerate;
&& track->codec_priv.size >= 40
&& track->codec_priv.data != NULL) {
track->ms_compat = 1;
- track->video.fourcc = AV_RL32(track->codec_priv.data + 16);
- codec_id = ff_codec_get_id(ff_codec_bmp_tags, track->video.fourcc);
+ fourcc = AV_RL32(track->codec_priv.data + 16);
+ codec_id = ff_codec_get_id(ff_codec_bmp_tags, fourcc);
extradata_offset = 40;
} else if (!strcmp(track->codec_id, "A_MS/ACM")
&& track->codec_priv.size >= 14
} else if (!strcmp(track->codec_id, "V_QUICKTIME")
&& (track->codec_priv.size >= 86)
&& (track->codec_priv.data != NULL)) {
- track->video.fourcc = AV_RL32(track->codec_priv.data);
- codec_id=ff_codec_get_id(ff_codec_movvideo_tags, track->video.fourcc);
+ fourcc = AV_RL32(track->codec_priv.data);
+ codec_id = ff_codec_get_id(ff_codec_movvideo_tags, fourcc);
+ } else if (codec_id == AV_CODEC_ID_ALAC && track->codec_priv.size && track->codec_priv.size < INT_MAX-12) {
+ /* Only ALAC's magic cookie is stored in Matroska's track headers.
+ Create the "atom size", "tag", and "tag version" fields the
+ decoder expects manually. */
+ extradata_size = 12 + track->codec_priv.size;
+ extradata = av_mallocz(extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (extradata == NULL)
+ return AVERROR(ENOMEM);
+ AV_WB32(extradata, extradata_size);
+ memcpy(&extradata[4], "alac", 4);
+ AV_WB32(&extradata[8], 0);
+ memcpy(&extradata[12], track->codec_priv.data, track->codec_priv.size);
} else if (codec_id == AV_CODEC_ID_PCM_S16BE) {
switch (track->audio.bitdepth) {
case 8: codec_id = AV_CODEC_ID_PCM_U8; break;
extradata_size = 5;
} else
extradata_size = 2;
- } else if (codec_id == AV_CODEC_ID_ALAC && track->codec_priv.size) {
- /* Only ALAC's magic cookie is stored in Matroska's track headers.
- Create the "atom size", "tag", and "tag version" fields the
- decoder expects manually. */
- extradata_size = 12 + track->codec_priv.size;
- extradata = av_mallocz(extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
- if (extradata == NULL)
- return AVERROR(ENOMEM);
- AV_WB32(extradata, extradata_size);
- memcpy(&extradata[4], "alac", 4);
- AV_WB32(&extradata[8], 0);
- memcpy(&extradata[12], track->codec_priv.data,
- track->codec_priv.size);
} else if (codec_id == AV_CODEC_ID_TTA) {
extradata_size = 30;
- extradata = av_mallocz(extradata_size);
+ extradata = av_mallocz(extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (extradata == NULL)
return AVERROR(ENOMEM);
ffio_init_context(&b, extradata, extradata_size, 1,
} else if (codec_id == AV_CODEC_ID_RA_144) {
track->audio.out_samplerate = 8000;
track->audio.channels = 1;
- } else if (codec_id == AV_CODEC_ID_RA_288 || codec_id == AV_CODEC_ID_COOK ||
- codec_id == AV_CODEC_ID_ATRAC3 || codec_id == AV_CODEC_ID_SIPR) {
+ } else if ((codec_id == AV_CODEC_ID_RA_288 || codec_id == AV_CODEC_ID_COOK ||
+ codec_id == AV_CODEC_ID_ATRAC3 || codec_id == AV_CODEC_ID_SIPR)
+ && track->codec_priv.data) {
int flavor;
+
ffio_init_context(&b, track->codec_priv.data,track->codec_priv.size,
0, NULL, NULL, NULL, NULL);
avio_skip(&b, 22);
}
if (track->type == MATROSKA_TRACK_TYPE_VIDEO) {
+ MatroskaTrackPlane *planes = track->operation.combine_planes.elem;
+
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
- st->codec->codec_tag = track->video.fourcc;
+ st->codec->codec_tag = fourcc;
st->codec->width = track->video.pixel_width;
st->codec->height = track->video.pixel_height;
av_reduce(&st->sample_aspect_ratio.num,
st->codec->height * track->video.display_width,
st->codec-> width * track->video.display_height,
255);
- if (st->codec->codec_id != AV_CODEC_ID_H264)
st->need_parsing = AVSTREAM_PARSE_HEADERS;
if (track->default_duration) {
av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
st->r_frame_rate = st->avg_frame_rate;
#endif
}
+
+ /* export stereo mode flag as metadata tag */
+ if (track->video.stereo_mode && track->video.stereo_mode < MATROSKA_VIDEO_STEREO_MODE_COUNT)
+ av_dict_set(&st->metadata, "stereo_mode", ff_matroska_video_stereo_mode[track->video.stereo_mode], 0);
+
+ /* if we have virtual track, mark the real tracks */
+ for (j=0; j < track->operation.combine_planes.nb_elem; j++) {
+ char buf[32];
+ if (planes[j].type >= MATROSKA_VIDEO_STEREO_PLANE_COUNT)
+ continue;
+ snprintf(buf, sizeof(buf), "%s_%d",
+ ff_matroska_video_stereo_plane[planes[j].type], i);
+ for (k=0; k < matroska->tracks.nb_elem; k++)
+ if (planes[j].uid == tracks[k].uid) {
+ av_dict_set(&s->streams[k]->metadata,
+ "stereo_mode", buf, 0);
+ break;
+ }
+ }
} else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) {
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->sample_rate = track->audio.out_samplerate;
av_dict_set(&st->metadata, "mimetype", attachements[j].mime, 0);
st->codec->codec_id = AV_CODEC_ID_NONE;
st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT;
- st->codec->extradata = av_malloc(attachements[j].bin.size);
+ st->codec->extradata = av_malloc(attachements[j].bin.size + FF_INPUT_BUFFER_PADDING_SIZE);
if(st->codec->extradata == NULL)
break;
st->codec->extradata_size = attachements[j].bin.size;
max_start = chapters[i].start;
}
+ matroska_add_index_entries(matroska);
+
matroska_convert_tags(s);
return 0;
return 0;
}
- assert(size > 0);
+ av_assert0(size > 0);
*laces = *data + 1;
data += 1;
size -= 1;
}
case 0x2: /* fixed-size lacing */
- if (size != (size / *laces) * size) {
+ if (size != (size / *laces) * *laces) {
res = AVERROR_INVALIDDATA;
break;
}
MatroskaTrack *track,
AVStream *st,
uint8_t *data, int pkt_size,
- uint64_t timecode, uint64_t duration,
+ uint64_t timecode, uint64_t lace_duration,
int64_t pos, int is_keyframe)
{
MatroskaTrackEncoding *encodings = track->encodings.elem;
else
pkt->pts = timecode;
pkt->pos = pos;
- if (st->codec->codec_id == AV_CODEC_ID_TEXT)
- pkt->convergence_duration = duration;
- else if (track->type != MATROSKA_TRACK_TYPE_SUBTITLE)
- pkt->duration = duration;
+ if (st->codec->codec_id == AV_CODEC_ID_SUBRIP) {
+ /*
+ * For backward compatibility.
+ * Historically, we have put subtitle duration
+ * in convergence_duration, on the off chance
+ * that the time_scale is less than 1us, which
+ * could result in a 32bit overflow on the
+ * normal duration field.
+ */
+ pkt->convergence_duration = lace_duration;
+ }
+
+ if (track->type != MATROSKA_TRACK_TYPE_SUBTITLE ||
+ lace_duration <= INT_MAX) {
+ /*
+ * For non subtitle tracks, just store the duration
+ * as normal.
+ *
+ * If it's a subtitle track and duration value does
+ * not overflow a uint32, then also store it normally.
+ */
+ pkt->duration = lace_duration;
+ }
if (st->codec->codec_id == AV_CODEC_ID_SSA)
- matroska_fix_ass_packet(matroska, pkt, duration);
+ matroska_fix_ass_packet(matroska, pkt, lace_duration);
if (matroska->prev_pkt &&
timecode != AV_NOPTS_VALUE &&
static int matroska_parse_block(MatroskaDemuxContext *matroska, uint8_t *data,
int size, int64_t pos, uint64_t cluster_time,
- uint64_t duration, int is_keyframe,
+ uint64_t block_duration, int is_keyframe,
int64_t cluster_pos)
{
uint64_t timecode = AV_NOPTS_VALUE;
int16_t block_time;
uint32_t *lace_size = NULL;
int n, flags, laces = 0;
- uint64_t num, duration;
+ uint64_t num;
if ((n = matroska_ebmlnum_uint(matroska, data, size, &num)) < 0) {
av_log(matroska->ctx, AV_LOG_ERROR, "EBML block data error\n");
st = track->stream;
if (st->discard >= AVDISCARD_ALL)
return res;
- av_assert1(duration != AV_NOPTS_VALUE);
++ av_assert1(block_duration != AV_NOPTS_VALUE);
block_time = AV_RB16(data);
data += 2;
}
if (matroska->skip_to_keyframe && track->type != MATROSKA_TRACK_TYPE_SUBTITLE) {
- if (!is_keyframe || timecode < matroska->skip_to_timecode)
+ if (timecode < matroska->skip_to_timecode)
return res;
- matroska->skip_to_keyframe = 0;
+ if (!st->skip_to_keyframe) {
+ av_log(matroska->ctx, AV_LOG_ERROR, "File is broken, keyframes not correctly marked!\n");
+ matroska->skip_to_keyframe = 0;
+ }
+ if (is_keyframe)
+ matroska->skip_to_keyframe = 0;
}
res = matroska_parse_laces(matroska, &data, size, (flags & 0x06) >> 1,
if (res)
goto end;
- if (!duration)
- duration = track->default_duration * laces / matroska->time_scale;
- if (block_duration != AV_NOPTS_VALUE) {
- duration = block_duration / laces;
- if (block_duration != duration * laces) {
- av_log(matroska->ctx, AV_LOG_WARNING,
- "Incorrect block_duration, possibly corrupted container");
- }
- } else {
- duration = track->default_duration / matroska->time_scale;
- block_duration = duration * laces;
- }
++ if (!block_duration)
++ block_duration = track->default_duration * laces / matroska->time_scale;
- if (timecode != AV_NOPTS_VALUE)
+ if (cluster_time != (uint64_t)-1 && (block_time >= 0 || cluster_time >= -block_time))
- track->end_timecode = FFMAX(track->end_timecode, timecode+duration);
+ track->end_timecode =
+ FFMAX(track->end_timecode, timecode + block_duration);
for (n = 0; n < laces; n++) {
- int64_t lace_duration = duration*(n+1) / laces - duration*n / laces;
++ int64_t lace_duration = block_duration*(n+1) / laces - block_duration*n / laces;
+
+ if (lace_size[n] > size) {
+ av_log(matroska->ctx, AV_LOG_ERROR, "Invalid packet size\n");
+ break;
+ }
+
if ((st->codec->codec_id == AV_CODEC_ID_RA_288 ||
st->codec->codec_id == AV_CODEC_ID_COOK ||
st->codec->codec_id == AV_CODEC_ID_SIPR ||
st->codec->block_align && track->audio.sub_packet_size) {
res = matroska_parse_rm_audio(matroska, track, st, data, size,
-- timecode, duration, pos);
++ timecode, lace_duration, pos);
if (res)
goto end;
} else {
res = matroska_parse_frame(matroska, track, st, data, lace_size[n],
- timecode, duration,
+ timecode, lace_duration,
pos, !n? is_keyframe : 0);
if (res)
goto end;
}
if (timecode != AV_NOPTS_VALUE)
- timecode = duration ? timecode + duration : AV_NOPTS_VALUE;
+ timecode = lace_duration ? timecode + lace_duration : AV_NOPTS_VALUE;
data += lace_size[n];
size -= lace_size[n];
}
if (blocks[i].bin.size > 0 && blocks[i].bin.data) {
int is_keyframe = blocks[i].non_simple ? !blocks[i].reference : -1;
if (!blocks[i].non_simple)
- blocks[i].duration = AV_NOPTS_VALUE;
+ blocks[i].duration = 0;
res = matroska_parse_block(matroska,
blocks[i].bin.data, blocks[i].bin.size,
blocks[i].bin.pos,
res = ebml_parse(matroska, matroska_clusters, &cluster);
blocks_list = &cluster.blocks;
blocks = blocks_list->elem;
- for (i=0; i<blocks_list->nb_elem && !res; i++)
+ for (i=0; i<blocks_list->nb_elem; i++)
if (blocks[i].bin.size > 0 && blocks[i].bin.data) {
int is_keyframe = blocks[i].non_simple ? !blocks[i].reference : -1;
- if (!blocks[i].non_simple)
- blocks[i].duration = AV_NOPTS_VALUE;
res=matroska_parse_block(matroska,
blocks[i].bin.data, blocks[i].bin.size,
blocks[i].bin.pos, cluster.timecode,
pos);
}
ebml_free(matroska_cluster, &cluster);
- if (res < 0) matroska->done = 1;
return res;
}
static int matroska_read_packet(AVFormatContext *s, AVPacket *pkt)
{
MatroskaDemuxContext *matroska = s->priv_data;
- int ret = 0;
- while (!ret && matroska_deliver_packet(matroska, pkt)) {
+ while (matroska_deliver_packet(matroska, pkt)) {
+ int64_t pos = avio_tell(matroska->ctx->pb);
if (matroska->done)
return AVERROR_EOF;
- ret = matroska_parse_cluster(matroska);
- }
-
- if (ret == AVERROR_INVALIDDATA) {
- pkt->flags |= AV_PKT_FLAG_CORRUPT;
- return 0;
+ if (matroska_parse_cluster(matroska) < 0)
+ matroska_resync(matroska, pos);
}
- return ret;
+ return 0;
}
static int matroska_read_seek(AVFormatContext *s, int stream_index,
int i, index, index_sub, index_min;
/* Parse the CUES now since we need the index data to seek. */
- if (matroska->cues_parsing_deferred) {
- matroska_parse_cues(matroska);
+ if (matroska->cues_parsing_deferred > 0) {
matroska->cues_parsing_deferred = 0;
+ matroska_parse_cues(matroska);
}
if (!st->nb_index_entries)
- return 0;
+ goto err;
timestamp = FFMAX(timestamp, st->index_entries[0].timestamp);
if ((index = av_index_search_timestamp(st, timestamp, flags)) < 0) {
}
matroska_clear_queue(matroska);
- if (index < 0)
- return 0;
+ if (index < 0 || (matroska->cues_parsing_deferred < 0 && index == st->nb_index_entries - 1))
+ goto err;
index_min = index;
for (i=0; i < matroska->tracks.nb_elem; i++) {
avio_seek(s->pb, st->index_entries[index_min].pos, SEEK_SET);
matroska->current_id = 0;
+ st->skip_to_keyframe =
matroska->skip_to_keyframe = !(flags & AVSEEK_FLAG_ANY);
matroska->skip_to_timecode = st->index_entries[index].timestamp;
matroska->done = 0;
+ matroska->num_levels = 0;
ff_update_cur_dts(s, st, st->index_entries[index].timestamp);
return 0;
+err:
+ // slightly hackish but allows proper fallback to
+ // the generic seeking code.
+ matroska_clear_queue(matroska);
+ matroska->current_id = 0;
+ st->skip_to_keyframe =
+ matroska->skip_to_keyframe = 0;
+ matroska->done = 0;
+ matroska->num_levels = 0;
+ return -1;
}
static int matroska_read_close(AVFormatContext *s)
* Copyright (c) 2001 Fabrice Bellard
* Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/mathematics.h"
#include "libavutil/avstring.h"
#include "libavutil/dict.h"
+#include "libavutil/opt.h"
+#include "libavutil/timecode.h"
#include "libavcodec/ac3tab.h"
#include "avformat.h"
#include "internal.h"
return 0;
}
+static int mov_read_custom_metadata(MOVContext *c, AVIOContext *pb, MOVAtom atom)
+{
+ char key[1024]={0}, data[1024]={0};
+ int i;
+ AVStream *st;
+ MOVStreamContext *sc;
+
+ if (c->fc->nb_streams < 1)
+ return 0;
+ st = c->fc->streams[c->fc->nb_streams-1];
+ sc = st->priv_data;
+
+ if (atom.size <= 8) return 0;
+
+ for (i = 0; i < 3; i++) { // Parse up to three sub-atoms looking for name and data.
+ int data_size = avio_rb32(pb);
+ int tag = avio_rl32(pb);
+ int str_size = 0, skip_size = 0;
+ char *target = NULL;
+
+ switch (tag) {
+ case MKTAG('n','a','m','e'):
+ avio_rb32(pb); // version/flags
+ str_size = skip_size = data_size - 12;
+ atom.size -= 12;
+ target = key;
+ break;
+ case MKTAG('d','a','t','a'):
+ avio_rb32(pb); // version/flags
+ avio_rb32(pb); // reserved (zero)
+ str_size = skip_size = data_size - 16;
+ atom.size -= 16;
+ target = data;
+ break;
+ default:
+ skip_size = data_size - 8;
+ str_size = 0;
+ break;
+ }
+
+ if (target) {
+ str_size = FFMIN3(sizeof(data)-1, str_size, atom.size);
+ avio_read(pb, target, str_size);
+ target[str_size] = 0;
+ }
+ atom.size -= skip_size;
+
+ // If we didn't read the full data chunk for the sub-atom, skip to the end of it.
+ if (skip_size > str_size) avio_skip(pb, skip_size - str_size);
+ }
+
+ if (*key && *data) {
+ if (strcmp(key, "iTunSMPB") == 0) {
+ int priming, remainder, samples;
+ if(sscanf(data, "%*X %X %X %X", &priming, &remainder, &samples) == 3){
+ if(priming>0 && priming<16384)
+ sc->start_pad = priming;
+ return 1;
+ }
+ }
+ if (strcmp(key, "cdec") == 0) {
+// av_dict_set(&st->metadata, key, data, 0);
+ return 1;
+ }
+ }
+ return 0;
+}
+
static const uint32_t mac_to_unicode[128] = {
0x00C4,0x00C5,0x00C7,0x00C9,0x00D1,0x00D6,0x00DC,0x00E1,
0x00E0,0x00E2,0x00E4,0x00E3,0x00E5,0x00E7,0x00E9,0x00E8,
uint8_t t, c = avio_r8(pb);
if (c < 0x80 && p < end)
*p++ = c;
- else
+ else if (p < end)
PUT_UTF8(mac_to_unicode[c-0x80], t, if (p < end) *p++ = t;);
}
*p = 0;
uint32_t data_type = 0, str_size;
int (*parse)(MOVContext*, AVIOContext*, unsigned, const char*) = NULL;
+ if (c->itunes_metadata && atom.type == MKTAG('-','-','-','-'))
+ return mov_read_custom_metadata(c, pb, atom);
+
switch (atom.type) {
case MKTAG(0xa9,'n','a','m'): key = "title"; break;
case MKTAG(0xa9,'a','u','t'):
case MKTAG(0xa9,'w','r','t'): key = "composer"; break;
case MKTAG( 'c','p','r','t'):
case MKTAG(0xa9,'c','p','y'): key = "copyright"; break;
+ case MKTAG(0xa9,'g','r','p'): key = "grouping"; break;
+ case MKTAG(0xa9,'l','y','r'): key = "lyrics"; break;
case MKTAG(0xa9,'c','m','t'):
case MKTAG(0xa9,'i','n','f'): key = "comment"; break;
case MKTAG(0xa9,'a','l','b'): key = "album"; break;
if (parse)
parse(c, pb, str_size, key);
else {
- if (data_type == 3 || (data_type == 0 && langcode < 0x800)) { // MAC Encoded
+ if (data_type == 3 || (data_type == 0 && (langcode < 0x400 || langcode == 0x7fff))) { // MAC Encoded
mov_read_mac_string(c, pb, str_size, str, sizeof(str));
} else {
avio_read(pb, str, str_size);
if (entries >= UINT_MAX / sizeof(*sc->drefs))
return AVERROR_INVALIDDATA;
av_free(sc->drefs);
+ sc->drefs_count = 0;
sc->drefs = av_mallocz(entries * sizeof(*sc->drefs));
if (!sc->drefs)
return AVERROR(ENOMEM);
avio_skip(pb, 16);
for (type = 0; type != -1 && avio_tell(pb) < next; ) {
- if (pb->eof_reached)
+ if(url_feof(pb))
return AVERROR_EOF;
type = avio_rb16(pb);
len = avio_rb16(pb);
AVStream *st;
uint32_t type;
uint32_t av_unused ctype;
+ int title_size;
+ char *title_str;
if (c->fc->nb_streams < 1) // meta before first trak
return 0;
avio_rb32(pb); /* component flags */
avio_rb32(pb); /* component flags mask */
+ title_size = atom.size - 24;
+ if (title_size > 0) {
+ title_str = av_malloc(title_size + 1); /* Add null terminator */
+ if (!title_str)
+ return AVERROR(ENOMEM);
+ avio_read(pb, title_str, title_size);
+ title_str[title_size] = 0;
+ if (title_str[0])
+ av_dict_set(&st->metadata, "handler_name", title_str +
+ (!c->isom && title_str[0] == title_size - 1), 0);
+ av_freep(&title_str);
+ }
+
return 0;
}
char buffer[32];
if (time) {
struct tm *ptm;
- time -= 2082844800; /* seconds between 1904-01-01 and Epoch */
+ if(time >= 2082844800)
+ time -= 2082844800; /* seconds between 1904-01-01 and Epoch */
ptm = gmtime(&time);
if (!ptm) return;
strftime(buffer, sizeof(buffer), "%Y-%m-%d %H:%M:%S", ptm);
av_dlog(c->fc, "time scale = %i\n", c->time_scale);
c->duration = (version == 1) ? avio_rb64(pb) : avio_rb32(pb); /* duration */
+ // set the AVCodecContext duration because the duration of individual tracks
+ // may be inaccurate
+ if (c->time_scale > 0)
+ c->fc->duration = av_rescale(c->duration, AV_TIME_BASE, c->time_scale);
avio_rb32(pb); /* preferred scale */
avio_rb16(pb); /* preferred volume */
avio_rb32(pb); /* selection duration */
avio_rb32(pb); /* current time */
avio_rb32(pb); /* next track ID */
-
- return 0;
-}
-
-static int mov_read_smi(MOVContext *c, AVIOContext *pb, MOVAtom atom)
-{
- AVStream *st;
-
- if (c->fc->nb_streams < 1)
- return 0;
- st = c->fc->streams[c->fc->nb_streams-1];
-
- if ((uint64_t)atom.size > (1<<30))
- return AVERROR_INVALIDDATA;
-
- // currently SVQ3 decoder expect full STSD header - so let's fake it
- // this should be fixed and just SMI header should be passed
- av_free(st->codec->extradata);
- st->codec->extradata = av_mallocz(atom.size + 0x5a + FF_INPUT_BUFFER_PADDING_SIZE);
- if (!st->codec->extradata)
- return AVERROR(ENOMEM);
- st->codec->extradata_size = 0x5a + atom.size;
- memcpy(st->codec->extradata, "SVQ3", 4); // fake
- avio_read(pb, st->codec->extradata + 0x5a, atom.size);
- av_dlog(c->fc, "Reading SMI %"PRId64" %s\n", atom.size, st->codec->extradata + 0x5a);
return 0;
}
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
- little_endian = avio_rb16(pb);
+ little_endian = avio_rb16(pb) & 0xFF;
av_dlog(c->fc, "enda %d\n", little_endian);
if (little_endian == 1) {
switch (st->codec->codec_id) {
}
/* FIXME modify qdm2/svq3/h264 decoders to take full atom as extradata */
-static int mov_read_extradata(MOVContext *c, AVIOContext *pb, MOVAtom atom)
+static int mov_read_extradata(MOVContext *c, AVIOContext *pb, MOVAtom atom,
+ enum AVCodecID codec_id)
{
AVStream *st;
uint64_t size;
if (c->fc->nb_streams < 1) // will happen with jp2 files
return 0;
st= c->fc->streams[c->fc->nb_streams-1];
+
+ if (st->codec->codec_id != codec_id)
+ return 0; /* unexpected codec_id - don't mess with extradata */
+
size= (uint64_t)st->codec->extradata_size + atom.size + 8 + FF_INPUT_BUFFER_PADDING_SIZE;
if (size > INT_MAX || (uint64_t)atom.size > INT_MAX)
return AVERROR_INVALIDDATA;
return 0;
}
+/* wrapper functions for reading ALAC/AVS/MJPEG/MJPEG2000 extradata atoms only for those codecs */
+static int mov_read_alac(MOVContext *c, AVIOContext *pb, MOVAtom atom)
+{
+ return mov_read_extradata(c, pb, atom, AV_CODEC_ID_ALAC);
+}
+
+static int mov_read_avss(MOVContext *c, AVIOContext *pb, MOVAtom atom)
+{
+ return mov_read_extradata(c, pb, atom, AV_CODEC_ID_AVS);
+}
+
+static int mov_read_jp2h(MOVContext *c, AVIOContext *pb, MOVAtom atom)
+{
+ return mov_read_extradata(c, pb, atom, AV_CODEC_ID_JPEG2000);
+}
+
+static int mov_read_avid(MOVContext *c, AVIOContext *pb, MOVAtom atom)
+{
+ return mov_read_extradata(c, pb, atom, AV_CODEC_ID_AVUI);
+}
+
+static int mov_read_svq3(MOVContext *c, AVIOContext *pb, MOVAtom atom)
+{
+ return mov_read_extradata(c, pb, atom, AV_CODEC_ID_SVQ3);
+}
+
static int mov_read_wave(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
if (st->codec->codec_id == AV_CODEC_ID_QDM2 || st->codec->codec_id == AV_CODEC_ID_QDMC) {
// pass all frma atom to codec, needed at least for QDMC and QDM2
av_free(st->codec->extradata);
+ st->codec->extradata_size = 0;
st->codec->extradata = av_mallocz(atom.size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
return mov_read_default(c, pb, atom);
}
av_free(st->codec->extradata);
+ st->codec->extradata_size = 0;
st->codec->extradata = av_mallocz(atom.size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
return 0;
av_free(st->codec->extradata);
+ st->codec->extradata_size = 0;
st->codec->extradata = av_mallocz(atom.size - 7 + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
return AVERROR_INVALIDDATA;
av_free(st->codec->extradata);
+ st->codec->extradata_size = 0;
st->codec->extradata = av_mallocz(atom.size - 40 + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
int dref_id = 1;
MOVAtom a = { AV_RL32("stsd") };
int64_t start_pos = avio_tell(pb);
- uint32_t size = avio_rb32(pb); /* size */
+ int64_t size = avio_rb32(pb); /* size */
uint32_t format = avio_rl32(pb); /* data format */
if (size >= 16) {
avio_rb32(pb); /* reserved */
avio_rb16(pb); /* reserved */
dref_id = avio_rb16(pb);
- } else {
- av_log(c->fc, AV_LOG_ERROR, "invalid size %d in stsd\n", size);
+ }else if (size <= 7){
+ av_log(c->fc, AV_LOG_ERROR, "invalid size %"PRId64" in stsd\n", size);
return AVERROR_INVALIDDATA;
}
/* Multiple fourcc, we skip JPEG. This is not correct, we should
* export it as a separate AVStream but this needs a few changes
* in the MOV demuxer, patch welcome. */
- multiple_stsd:
av_log(c->fc, AV_LOG_WARNING, "multiple fourcc not supported\n");
avio_skip(pb, size - (avio_tell(pb) - start_pos));
continue;
}
/* we cannot demux concatenated h264 streams because of different extradata */
if (st->codec->codec_tag && st->codec->codec_tag == AV_RL32("avc1"))
- goto multiple_stsd;
+ av_log(c->fc, AV_LOG_WARNING, "Concatenated H.264 might not play corrently.\n");
sc->pseudo_stream_id = st->codec->codec_tag ? -1 : pseudo_stream_id;
sc->dref_id= dref_id;
id = ff_codec_get_id(ff_codec_bmp_tags, format);
if (id > 0)
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
- else if (st->codec->codec_type == AVMEDIA_TYPE_DATA){
+ else if (st->codec->codec_type == AVMEDIA_TYPE_DATA ||
+ (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE &&
+ st->codec->codec_id == AV_CODEC_ID_NONE)){
id = ff_codec_get_id(ff_codec_movsubtitle_tags, format);
if (id > 0)
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
}
}
- av_dlog(c->fc, "size=%d 4CC= %c%c%c%c codec_type=%d\n", size,
+ av_dlog(c->fc, "size=%"PRId64" 4CC= %c%c%c%c codec_type=%d\n", size,
(format >> 0) & 0xff, (format >> 8) & 0xff, (format >> 16) & 0xff,
(format >> 24) & 0xff, st->codec->codec_type);
(color_depth == 8)) {
/* for palette traversal */
unsigned int color_start, color_count, color_end;
- unsigned char r, g, b;
+ unsigned char a, r, g, b;
if (color_greyscale) {
int color_index, color_dec;
color_index = 255;
color_dec = 256 / (color_count - 1);
for (j = 0; j < color_count; j++) {
+ if (id == AV_CODEC_ID_CINEPAK){
+ r = g = b = color_count - 1 - color_index;
+ }else
r = g = b = color_index;
sc->palette[j] =
- (r << 16) | (g << 8) | (b);
+ (0xFFU << 24) | (r << 16) | (g << 8) | (b);
color_index -= color_dec;
if (color_index < 0)
color_index = 0;
g = color_table[j * 3 + 1];
b = color_table[j * 3 + 2];
sc->palette[j] =
- (r << 16) | (g << 8) | (b);
+ (0xFFU << 24) | (r << 16) | (g << 8) | (b);
}
} else {
/* load the palette from the file */
if ((color_start <= 255) &&
(color_end <= 255)) {
for (j = color_start; j <= color_end; j++) {
- /* each R, G, or B component is 16 bits;
- * only use the top 8 bits; skip alpha bytes
- * up front */
- avio_r8(pb);
+ /* each A, R, G, or B component is 16 bits;
+ * only use the top 8 bits */
+ a = avio_r8(pb);
avio_r8(pb);
r = avio_r8(pb);
avio_r8(pb);
b = avio_r8(pb);
avio_r8(pb);
sc->palette[j] =
- (r << 16) | (g << 8) | (b);
+ (a << 24 ) | (r << 16) | (g << 8) | (b);
}
}
}
st->codec->width = sc->width;
st->codec->height = sc->height;
} else {
- /* other codec type, just skip (rtp, mp4s, tmcd ...) */
+ if (st->codec->codec_tag == MKTAG('t','m','c','d')) {
+ MOVStreamContext *tmcd_ctx = st->priv_data;
+ int val;
+ avio_rb32(pb); /* reserved */
+ val = avio_rb32(pb); /* flags */
+ tmcd_ctx->tmcd_flags = val;
+ if (val & 1)
+ st->codec->flags2 |= CODEC_FLAG2_DROP_FRAME_TIMECODE;
+ avio_rb32(pb); /* time scale */
+ avio_rb32(pb); /* frame duration */
+ st->codec->time_base.den = avio_r8(pb); /* number of frame */
+ st->codec->time_base.num = 1;
+ }
+ /* other codec type, just skip (rtp, mp4s, ...) */
avio_skip(pb, size - (avio_tell(pb) - start_pos));
}
/* this will read extra atoms at the end (wave, alac, damr, avcC, SMI ...) */
st->codec->sample_rate = AV_RB32(st->codec->extradata+32);
}
break;
+ case AV_CODEC_ID_AC3:
+ st->need_parsing = AVSTREAM_PARSE_FULL;
+ break;
+ case AV_CODEC_ID_MPEG1VIDEO:
+ st->need_parsing = AVSTREAM_PARSE_FULL;
+ break;
case AV_CODEC_ID_VC1:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
sample_size = avio_rb32(pb);
if (!sc->sample_size) /* do not overwrite value computed in stsd */
sc->sample_size = sample_size;
+ sc->alt_sample_size = sample_size;
field_size = 32;
} else {
sample_size = 0;
av_dlog(c->fc, "track[%i].stts.entries = %i\n",
c->fc->nb_streams-1, entries);
- if (!entries)
- return 0;
if (entries >= UINT_MAX / sizeof(*sc->stts_data))
- return AVERROR(EINVAL);
+ return -1;
sc->stts_data = av_malloc(entries * sizeof(*sc->stts_data));
if (!sc->stts_data)
sample_count=avio_rb32(pb);
sample_duration = avio_rb32(pb);
+ /* sample_duration < 0 is invalid based on the spec */
+ if (sample_duration < 0) {
+ av_log(c->fc, AV_LOG_ERROR, "Invalid SampleDelta in STTS %d\n", sample_duration);
+ sample_duration = 1;
+ }
sc->stts_data[i].count= sample_count;
sc->stts_data[i].duration= sample_duration;
sc->ctts_data[i].count = count;
sc->ctts_data[i].duration= duration;
- if (duration < 0)
+
+ av_dlog(c->fc, "count=%d, duration=%d\n",
+ count, duration);
+
+ if (FFABS(duration) > (1<<28) && i+2<entries) {
+ av_log(c->fc, AV_LOG_WARNING, "CTTS invalid\n");
+ av_freep(&sc->ctts_data);
+ sc->ctts_count = 0;
+ return 0;
+ }
+
+ if (duration < 0 && i+2<entries)
sc->dts_shift = FFMAX(sc->dts_shift, -duration);
}
return 0;
}
+ static int mov_read_sbgp(MOVContext *c, AVIOContext *pb, MOVAtom atom)
+ {
+ AVStream *st;
+ MOVStreamContext *sc;
+ unsigned int i, entries;
+ uint8_t version;
+ uint32_t grouping_type;
+
+ if (c->fc->nb_streams < 1)
+ return 0;
+ st = c->fc->streams[c->fc->nb_streams-1];
+ sc = st->priv_data;
+
+ version = avio_r8(pb); /* version */
+ avio_rb24(pb); /* flags */
+ grouping_type = avio_rl32(pb);
+ if (grouping_type != MKTAG( 'r','a','p',' '))
+ return 0; /* only support 'rap ' grouping */
+ if (version == 1)
+ avio_rb32(pb); /* grouping_type_parameter */
+
+ entries = avio_rb32(pb);
+ if (!entries)
+ return 0;
+ if (entries >= UINT_MAX / sizeof(*sc->rap_group))
+ return AVERROR_INVALIDDATA;
+ sc->rap_group = av_malloc(entries * sizeof(*sc->rap_group));
+ if (!sc->rap_group)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < entries && !pb->eof_reached; i++) {
+ sc->rap_group[i].count = avio_rb32(pb); /* sample_count */
+ sc->rap_group[i].index = avio_rb32(pb); /* group_description_index */
+ }
+
+ sc->rap_group_count = i;
+
+ return pb->eof_reached ? AVERROR_EOF : 0;
+ }
+
static void mov_build_index(MOVContext *mov, AVStream *st)
{
MOVStreamContext *sc = st->priv_data;
AVIndexEntry *mem;
/* adjust first dts according to edit list */
- if (sc->time_offset && mov->time_scale > 0) {
- if (sc->time_offset < 0)
- sc->time_offset = av_rescale(sc->time_offset, sc->time_scale, mov->time_scale);
+ if ((sc->empty_duration || sc->start_time) && mov->time_scale > 0) {
+ if (sc->empty_duration)
+ sc->empty_duration = av_rescale(sc->empty_duration, sc->time_scale, mov->time_scale);
+ sc->time_offset = sc->start_time - sc->empty_duration;
current_dts = -sc->time_offset;
- if (sc->ctts_data && sc->stts_data && sc->stts_data[0].duration &&
- sc->ctts_data[0].duration / sc->stts_data[0].duration > 16) {
+ if (sc->ctts_count>0 && sc->stts_count>0 &&
+ sc->ctts_data[0].duration / FFMAX(sc->stts_data[0].duration, 1) > 16) {
/* more than 16 frames delay, dts are likely wrong
this happens with files created by iMovie */
sc->wrong_dts = 1;
unsigned int stts_sample = 0;
unsigned int sample_size;
unsigned int distance = 0;
- int key_off = (sc->keyframes && sc->keyframes[0] > 0) || (sc->stps_data && sc->stps_data[0] > 0);
+ unsigned int rap_group_index = 0;
+ unsigned int rap_group_sample = 0;
+ int rap_group_present = sc->rap_group_count && sc->rap_group;
+ int key_off = (sc->keyframe_count && sc->keyframes[0] > 0) || (sc->stps_data && sc->stps_data[0] > 0);
current_dts -= sc->dts_shift;
- if (!sc->sample_count)
+ if (!sc->sample_count || st->nb_index_entries)
return;
if (sc->sample_count >= UINT_MAX / sizeof(*st->index_entries) - st->nb_index_entries)
return;
if (stps_index + 1 < sc->stps_count)
stps_index++;
}
+ if (rap_group_present && rap_group_index < sc->rap_group_count) {
+ if (sc->rap_group[rap_group_index].index > 0)
+ keyframe = 1;
+ if (++rap_group_sample == sc->rap_group[rap_group_index].count) {
+ rap_group_sample = 0;
+ rap_group_index++;
+ }
+ }
if (keyframe)
distance = 0;
- sample_size = sc->sample_size > 0 ? sc->sample_size : sc->sample_sizes[current_sample];
+ sample_size = sc->alt_sample_size > 0 ? sc->alt_sample_size : sc->sample_sizes[current_sample];
if (sc->pseudo_stream_id == -1 ||
sc->stsc_data[stsc_index].id - 1 == sc->pseudo_stream_id) {
AVIndexEntry *e = &st->index_entries[st->nb_index_entries++];
}
}
-static int mov_open_dref(AVIOContext **pb, char *src, MOVDref *ref,
- AVIOInterruptCB *int_cb)
+static int mov_open_dref(AVIOContext **pb, const char *src, MOVDref *ref,
+ AVIOInterruptCB *int_cb, int use_absolute_path, AVFormatContext *fc)
{
/* try relative path, we do not try the absolute because it can leak information about our
system to an attacker */
if (ref->nlvl_to > 0 && ref->nlvl_from > 0) {
char filename[1024];
- char *src_path;
+ const char *src_path;
int i, l;
/* find a source dir */
if (!avio_open2(pb, filename, AVIO_FLAG_READ, int_cb, NULL))
return 0;
}
+ } else if (use_absolute_path) {
+ av_log(fc, AV_LOG_WARNING, "Using absolute path on user request, "
+ "this is a possible security issue\n");
+ if (!avio_open2(pb, ref->path, AVIO_FLAG_READ, int_cb, NULL))
+ return 0;
}
return AVERROR(ENOENT);
if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) {
MOVDref *dref = &sc->drefs[sc->dref_id - 1];
- if (mov_open_dref(&sc->pb, c->fc->filename, dref, &c->fc->interrupt_callback) < 0)
+ if (mov_open_dref(&sc->pb, c->fc->filename, dref, &c->fc->interrupt_callback,
+ c->use_absolute_path, c->fc) < 0)
av_log(c->fc, AV_LOG_ERROR,
"stream %d, error opening alias: path='%s', dir='%s', "
"filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d\n",
av_freep(&sc->keyframes);
av_freep(&sc->stts_data);
av_freep(&sc->stps_data);
+ av_freep(&sc->rap_group);
return 0;
}
sc->width = width >> 16;
sc->height = height >> 16;
+ //Assign clockwise rotate values based on transform matrix so that
+ //we can compensate for iPhone orientation during capture.
+
+ if (display_matrix[1][0] == -65536 && display_matrix[0][1] == 65536) {
+ av_dict_set(&st->metadata, "rotate", "90", 0);
+ }
+
+ if (display_matrix[0][0] == -65536 && display_matrix[1][1] == -65536) {
+ av_dict_set(&st->metadata, "rotate", "180", 0);
+ }
+
+ if (display_matrix[1][0] == 65536 && display_matrix[0][1] == -65536) {
+ av_dict_set(&st->metadata, "rotate", "270", 0);
+ }
+
// transform the display width/height according to the matrix
// skip this if the display matrix is the default identity matrix
// or if it is rotating the picture, ex iPhone 3GS
trex = av_realloc(c->trex_data, (c->trex_count+1)*sizeof(*c->trex_data));
if (!trex)
return AVERROR(ENOMEM);
+
+ c->fc->duration = AV_NOPTS_VALUE; // the duration from mvhd is not representing the whole file when fragments are used.
+
c->trex_data = trex;
trex = &c->trex_data[c->trex_count++];
avio_r8(pb); /* version */
if (avio_rl32(pb) != MKTAG('d','c','o','m'))
return AVERROR_INVALIDDATA;
if (avio_rl32(pb) != MKTAG('z','l','i','b')) {
- av_log(c->fc, AV_LOG_ERROR, "unknown compression for cmov atom !");
+ av_log(c->fc, AV_LOG_ERROR, "unknown compression for cmov atom !\n");
return AVERROR_INVALIDDATA;
}
avio_rb32(pb); /* cmvd atom */
static int mov_read_elst(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
MOVStreamContext *sc;
- int i, edit_count, version;
+ int i, edit_count, version, edit_start_index = 0;
if (c->fc->nb_streams < 1)
return 0;
time = (int32_t)avio_rb32(pb); /* media time */
}
avio_rb32(pb); /* Media rate */
- if (i == 0 && time >= -1) {
- sc->time_offset = time != -1 ? time : -duration;
- }
+ if (i == 0 && time == -1) {
+ sc->empty_duration = duration;
+ edit_start_index = 1;
+ } else if (i == edit_start_index && time >= 0)
+ sc->start_time = time;
}
if (edit_count > 1)
return 0;
}
+static int mov_read_chan2(MOVContext *c, AVIOContext *pb, MOVAtom atom)
+{
+ if (atom.size < 16)
+ return 0;
+ avio_skip(pb, 4);
+ ff_mov_read_chan(c->fc, pb, c->fc->streams[0], atom.size - 4);
+ return 0;
+}
+
+static int mov_read_tref(MOVContext *c, AVIOContext *pb, MOVAtom atom)
+{
+ uint32_t i, size;
+ MOVStreamContext *sc;
+
+ if (c->fc->nb_streams < 1)
+ return AVERROR_INVALIDDATA;
+ sc = c->fc->streams[c->fc->nb_streams - 1]->priv_data;
+
+ size = avio_rb32(pb);
+ if (size < 12)
+ return 0;
+
+ sc->trefs_count = (size - 4) / 8;
+ sc->trefs = av_malloc(sc->trefs_count * sizeof(*sc->trefs));
+ if (!sc->trefs)
+ return AVERROR(ENOMEM);
+
+ sc->tref_type = avio_rl32(pb);
+ for (i = 0; i < sc->trefs_count; i++)
+ sc->trefs[i] = avio_rb32(pb);
+ return 0;
+}
+
static const MOVParseTableEntry mov_default_parse_table[] = {
-{ MKTAG('a','v','s','s'), mov_read_extradata },
+{ MKTAG('A','C','L','R'), mov_read_avid },
+{ MKTAG('A','P','R','G'), mov_read_avid },
+{ MKTAG('A','A','L','P'), mov_read_avid },
+{ MKTAG('A','R','E','S'), mov_read_avid },
+{ MKTAG('a','v','s','s'), mov_read_avss },
{ MKTAG('c','h','p','l'), mov_read_chpl },
{ MKTAG('c','o','6','4'), mov_read_stco },
{ MKTAG('c','t','t','s'), mov_read_ctts }, /* composition time to sample */
{ MKTAG('g','l','b','l'), mov_read_glbl },
{ MKTAG('h','d','l','r'), mov_read_hdlr },
{ MKTAG('i','l','s','t'), mov_read_ilst },
-{ MKTAG('j','p','2','h'), mov_read_extradata },
+{ MKTAG('j','p','2','h'), mov_read_jp2h },
{ MKTAG('m','d','a','t'), mov_read_mdat },
{ MKTAG('m','d','h','d'), mov_read_mdhd },
{ MKTAG('m','d','i','a'), mov_read_default },
{ MKTAG('m','o','o','v'), mov_read_moov },
{ MKTAG('m','v','e','x'), mov_read_default },
{ MKTAG('m','v','h','d'), mov_read_mvhd },
-{ MKTAG('S','M','I',' '), mov_read_smi }, /* Sorenson extension ??? */
-{ MKTAG('a','l','a','c'), mov_read_extradata }, /* alac specific atom */
+{ MKTAG('S','M','I',' '), mov_read_svq3 },
+{ MKTAG('a','l','a','c'), mov_read_alac }, /* alac specific atom */
{ MKTAG('a','v','c','C'), mov_read_glbl },
{ MKTAG('p','a','s','p'), mov_read_pasp },
{ MKTAG('s','t','b','l'), mov_read_default },
{ MKTAG('t','f','h','d'), mov_read_tfhd }, /* track fragment header */
{ MKTAG('t','r','a','k'), mov_read_trak },
{ MKTAG('t','r','a','f'), mov_read_default },
-{ MKTAG('t','r','e','f'), mov_read_default },
+{ MKTAG('t','r','e','f'), mov_read_tref },
{ MKTAG('c','h','a','p'), mov_read_chap },
{ MKTAG('t','r','e','x'), mov_read_trex },
{ MKTAG('t','r','u','n'), mov_read_trun },
{ MKTAG('c','m','o','v'), mov_read_cmov },
{ MKTAG('c','h','a','n'), mov_read_chan }, /* channel layout */
{ MKTAG('d','v','c','1'), mov_read_dvc1 },
+ { MKTAG('s','b','g','p'), mov_read_sbgp },
{ 0, NULL }
};
if (atom.size < 0)
atom.size = INT64_MAX;
- while (total_size + 8 < atom.size && !pb->eof_reached) {
+ while (total_size + 8 <= atom.size && !url_feof(pb)) {
int (*parse)(MOVContext*, AVIOContext*, MOVAtom) = NULL;
a.size = atom.size;
a.type=0;
if (atom.size >= 8) {
a.size = avio_rb32(pb);
a.type = avio_rl32(pb);
+ if (atom.type != MKTAG('r','o','o','t') &&
+ atom.type != MKTAG('m','o','o','v'))
+ {
+ if (a.type == MKTAG('t','r','a','k') || a.type == MKTAG('m','d','a','t'))
+ {
+ av_log(c->fc, AV_LOG_ERROR, "Broken file, trak/mdat not at top-level\n");
+ avio_skip(pb, -8);
+ return 0;
+ }
+ }
+ total_size += 8;
+ if (a.size == 1) { /* 64 bit extended size */
+ a.size = avio_rb64(pb) - 8;
+ total_size += 8;
+ }
}
av_dlog(c->fc, "type: %08x '%.4s' parent:'%.4s' sz: %"PRId64" %"PRId64" %"PRId64"\n",
a.type, (char*)&a.type, (char*)&atom.type, a.size, total_size, atom.size);
- total_size += 8;
- if (a.size == 1) { /* 64 bit extended size */
- a.size = avio_rb64(pb) - 8;
- total_size += 8;
- }
if (a.size == 0) {
- a.size = atom.size - total_size;
- if (a.size <= 8)
- break;
+ a.size = atom.size - total_size + 8;
}
a.size -= 8;
if (a.size < 0)
left = a.size - avio_tell(pb) + start_pos;
if (left > 0) /* skip garbage at atom end */
avio_skip(pb, left);
+ else if(left < 0) {
+ av_log(c->fc, AV_LOG_DEBUG, "undoing overread of %"PRId64" in '%.4s'\n", -left, (char*)&a.type);
+ avio_seek(pb, left, SEEK_CUR);
+ }
}
total_size += a.size;
if (len == 1 || len == 2)
title[len] = 0;
else
- avio_get_str(sc->pb, len - 2, title + 2, title_len - 2);
+ avio_get_str(sc->pb, INT_MAX, title + 2, len - 1);
}
}
avio_seek(sc->pb, cur_pos, SEEK_SET);
}
+static int parse_timecode_in_framenum_format(AVFormatContext *s, AVStream *st,
+ uint32_t value, int flags)
+{
+ AVTimecode tc;
+ char buf[AV_TIMECODE_STR_SIZE];
+ AVRational rate = {st->codec->time_base.den,
+ st->codec->time_base.num};
+ int ret = av_timecode_init(&tc, rate, flags, 0, s);
+ if (ret < 0)
+ return ret;
+ av_dict_set(&st->metadata, "timecode",
+ av_timecode_make_string(&tc, buf, value), 0);
+ return 0;
+}
+
+static int mov_read_timecode_track(AVFormatContext *s, AVStream *st)
+{
+ MOVStreamContext *sc = st->priv_data;
+ int flags = 0;
+ int64_t cur_pos = avio_tell(sc->pb);
+ uint32_t value;
+
+ if (!st->nb_index_entries)
+ return -1;
+
+ avio_seek(sc->pb, st->index_entries->pos, SEEK_SET);
+ value = avio_rb32(s->pb);
+
+ if (sc->tmcd_flags & 0x0001) flags |= AV_TIMECODE_FLAG_DROPFRAME;
+ if (sc->tmcd_flags & 0x0002) flags |= AV_TIMECODE_FLAG_24HOURSMAX;
+ if (sc->tmcd_flags & 0x0004) flags |= AV_TIMECODE_FLAG_ALLOWNEGATIVE;
+
+ /* Assume Counter flag is set to 1 in tmcd track (even though it is likely
+ * not the case) and thus assume "frame number format" instead of QT one.
+ * No sample with tmcd track can be found with a QT timecode at the moment,
+ * despite what the tmcd track "suggests" (Counter flag set to 0 means QT
+ * format). */
+ parse_timecode_in_framenum_format(s, st, value, flags);
+
+ avio_seek(sc->pb, cur_pos, SEEK_SET);
+ return 0;
+}
+
static int mov_read_close(AVFormatContext *s)
{
MOVContext *mov = s->priv_data;
av_freep(&sc->drefs[j].dir);
}
av_freep(&sc->drefs);
+ av_freep(&sc->trefs);
if (sc->pb && sc->pb != s->pb)
avio_close(sc->pb);
+ sc->pb = NULL;
+ av_freep(&sc->chunk_offsets);
+ av_freep(&sc->keyframes);
+ av_freep(&sc->sample_sizes);
+ av_freep(&sc->stps_data);
+ av_freep(&sc->stsc_data);
+ av_freep(&sc->stts_data);
}
if (mov->dv_demux) {
return 0;
}
+static int tmcd_is_referenced(AVFormatContext *s, int tmcd_id)
+{
+ int i, j;
+
+ for (i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+ MOVStreamContext *sc = st->priv_data;
+
+ if (s->streams[i]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
+ continue;
+ for (j = 0; j < sc->trefs_count; j++)
+ if (tmcd_id == sc->trefs[j])
+ return 1;
+ }
+ return 0;
+}
+
+/* look for a tmcd track not referenced by any video track, and export it globally */
+static void export_orphan_timecode(AVFormatContext *s)
+{
+ int i;
+
+ for (i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+
+ if (st->codec->codec_tag == MKTAG('t','m','c','d') &&
+ !tmcd_is_referenced(s, i + 1)) {
+ AVDictionaryEntry *tcr = av_dict_get(st->metadata, "timecode", NULL, 0);
+ if (tcr) {
+ av_dict_set(&s->metadata, "timecode", tcr->value, 0);
+ break;
+ }
+ }
+ }
+}
+
static int mov_read_header(AVFormatContext *s)
{
MOVContext *mov = s->priv_data;
AVIOContext *pb = s->pb;
- int err;
+ int i, err;
MOVAtom atom = { AV_RL32("root") };
mov->fc = s;
}
av_dlog(mov->fc, "on_parse_exit_offset=%"PRId64"\n", avio_tell(pb));
- if (pb->seekable && mov->chapter_track > 0)
- mov_read_chapters(s);
+ if (pb->seekable) {
+ if (mov->chapter_track > 0)
+ mov_read_chapters(s);
+ for (i = 0; i < s->nb_streams; i++)
+ if (s->streams[i]->codec->codec_tag == AV_RL32("tmcd"))
+ mov_read_timecode_track(s, s->streams[i]);
+ }
+
+ /* copy timecode metadata from tmcd tracks to the related video streams */
+ for (i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+ MOVStreamContext *sc = st->priv_data;
+ if (sc->tref_type == AV_RL32("tmcd") && sc->trefs_count) {
+ AVDictionaryEntry *tcr;
+ int tmcd_st_id = sc->trefs[0] - 1;
+
+ if (tmcd_st_id < 0 || tmcd_st_id >= s->nb_streams)
+ continue;
+ tcr = av_dict_get(s->streams[tmcd_st_id]->metadata, "timecode", NULL, 0);
+ if (tcr)
+ av_dict_set(&st->metadata, "timecode", tcr->value, 0);
+ }
+ }
+ export_orphan_timecode(s);
+
+ for (i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+ MOVStreamContext *sc = st->priv_data;
+ if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->codec->codec_id == AV_CODEC_ID_AAC) {
+ if(!sc->start_pad)
+ sc->start_pad = 1024;
+ st->skip_samples = sc->start_pad;
+ }
+ }
if (mov->trex_data) {
- int i;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
MOVStreamContext *sc = st->priv_data;
AVIndexEntry *sample;
AVStream *st = NULL;
int ret;
+ mov->fc = s;
retry:
sample = mov_find_next_sample(s, &st);
if (!sample) {
avio_seek(s->pb, mov->next_root_atom, SEEK_SET);
mov->next_root_atom = 0;
if (mov_read_default(mov, s->pb, (MOVAtom){ AV_RL32("root"), INT64_MAX }) < 0 ||
- s->pb->eof_reached)
+ url_feof(s->pb))
return AVERROR_EOF;
av_dlog(s, "read fragments, offset 0x%"PRIx64"\n", avio_tell(s->pb));
goto retry;
}
#if CONFIG_DV_DEMUXER
if (mov->dv_demux && sc->dv_audio_container) {
- avpriv_dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size);
+ avpriv_dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size, pkt->pos);
av_free(pkt->data);
pkt->size = 0;
ret = avpriv_dv_get_packet(mov->dv_demux, pkt);
if (stream_index >= s->nb_streams)
return AVERROR_INVALIDDATA;
- if (sample_time < 0)
- sample_time = 0;
st = s->streams[stream_index];
sample = mov_seek_stream(s, st, sample_time, flags);
seek_timestamp = st->index_entries[sample].timestamp;
for (i = 0; i < s->nb_streams; i++) {
+ MOVStreamContext *sc = s->streams[i]->priv_data;
st = s->streams[i];
+ st->skip_samples = (sample_time <= 0) ? sc->start_pad : 0;
+
if (stream_index == i)
continue;
return 0;
}
+static const AVOption options[] = {
+ {"use_absolute_path",
+ "allow using absolute path when opening alias, this is a possible security issue",
+ offsetof(MOVContext, use_absolute_path), FF_OPT_TYPE_INT, {.dbl = 0},
+ 0, 1, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_DECODING_PARAM},
+ {NULL}
+};
+
+static const AVClass class = {
+ .class_name = "mov,mp4,m4a,3gp,3g2,mj2",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
AVInputFormat ff_mov_demuxer = {
.name = "mov,mp4,m4a,3gp,3g2,mj2",
.long_name = NULL_IF_CONFIG_SMALL("QuickTime / MOV"),
.read_packet = mov_read_packet,
.read_close = mov_read_close,
.read_seek = mov_read_seek,
+ .priv_class = &class,
};
* MP3 demuxer
* Copyright (c) 2003 Fabrice Bellard
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "id3v1.h"
#include "libavcodec/mpegaudiodecheader.h"
+ #define XING_FLAG_FRAMES 0x01
+ #define XING_FLAG_SIZE 0x02
+ #define XING_FLAG_TOC 0x04
+
+ #define XING_TOC_COUNT 100
+
+typedef struct {
+ int64_t filesize;
+ int start_pad;
+ int end_pad;
+} MP3Context;
+
/* mp3 read */
static int mp3_read_probe(AVProbeData *p)
}
// keep this in sync with ac3 probe, both need to avoid
// issues with MPEG-files!
- if (first_frames >= 4) return AVPROBE_SCORE_MAX / 2 + 1;
-
- if (max_frames) {
- int pes = 0, i;
- unsigned int code = -1;
-
-#define VIDEO_ID 0x000001e0
-#define AUDIO_ID 0x000001c0
- /* do a search for mpegps headers to be able to properly bias
- * towards mpegps if we detect this stream as both. */
- for (i = 0; i<p->buf_size; i++) {
- code = (code << 8) + p->buf[i];
- if ((code & 0xffffff00) == 0x100) {
- if ((code & 0x1f0) == VIDEO_ID) pes++;
- else if((code & 0x1e0) == AUDIO_ID) pes++;
- }
- }
-
- if (pes)
- max_frames = (max_frames + pes - 1) / pes;
- }
- if (max_frames > 500) return AVPROBE_SCORE_MAX / 2;
- else if (max_frames >= 4) return AVPROBE_SCORE_MAX / 4;
- else if (max_frames >= 1) return 1;
- else return 0;
+ if (first_frames>=4) return AVPROBE_SCORE_MAX/2+1;
+ else if(max_frames>200)return AVPROBE_SCORE_MAX/2;
+ else if(max_frames>=4) return AVPROBE_SCORE_MAX/4;
+ else if(ff_id3v2_match(buf0, ID3v2_DEFAULT_MAGIC) && 2*ff_id3v2_tag_len(buf0) >= p->buf_size)
+ return AVPROBE_SCORE_MAX/8;
+ else if(max_frames>=1) return 1;
+ else return 0;
//mpegps_mp3_unrecognized_format.mpg has max_frames=3
}
+ static void read_xing_toc(AVFormatContext *s, int64_t filesize, int64_t duration)
+ {
+ int i;
+
+ if (!filesize &&
+ !(filesize = avio_size(s->pb))) {
+ av_log(s, AV_LOG_WARNING, "Cannot determine file size, skipping TOC table.\n");
+ return;
+ }
+
+ for (i = 0; i < XING_TOC_COUNT; i++) {
+ uint8_t b = avio_r8(s->pb);
+
+ av_add_index_entry(s->streams[0],
+ av_rescale(b, filesize, 256),
+ av_rescale(i, duration, XING_TOC_COUNT),
+ 0, 0, AVINDEX_KEYFRAME);
+ }
+ }
+
/**
* Try to find Xing/Info/VBRI tags and compute duration from info therein
*/
static int mp3_parse_vbr_tags(AVFormatContext *s, AVStream *st, int64_t base)
{
+ MP3Context *mp3 = s->priv_data;
uint32_t v, spf;
unsigned frames = 0; /* Total number of frames in file */
unsigned size = 0; /* Total number of bytes in the stream */
if(c.layer != 3)
return -1;
+ spf = c.lsf ? 576 : 1152; /* Samples per frame, layer 3 */
+
/* Check for Xing / Info tag */
avio_skip(s->pb, xing_offtbl[c.lsf == 1][c.nb_channels == 1]);
v = avio_rb32(s->pb);
if(v == MKBETAG('X', 'i', 'n', 'g') || v == MKBETAG('I', 'n', 'f', 'o')) {
v = avio_rb32(s->pb);
- if(v & 0x1)
+ if(v & XING_FLAG_FRAMES)
frames = avio_rb32(s->pb);
- if(v & 0x2)
+ if(v & XING_FLAG_SIZE)
size = avio_rb32(s->pb);
- if(v & 4)
- avio_skip(s->pb, 100);
+ if (v & XING_FLAG_TOC && frames)
+ read_xing_toc(s, size, av_rescale_q(frames, (AVRational){spf, c.sample_rate},
+ st->time_base));
+ if(v & 8)
+ avio_skip(s->pb, 4);
+
+ v = avio_rb32(s->pb);
+ if(v == MKBETAG('L', 'A', 'M', 'E') || v == MKBETAG('L', 'a', 'v', 'f')) {
+ avio_skip(s->pb, 21-4);
+ v= avio_rb24(s->pb);
+ mp3->start_pad = v>>12;
+ mp3-> end_pad = v&4095;
+ st->skip_samples = mp3->start_pad + 528 + 1;
+ av_log(s, AV_LOG_DEBUG, "pad %d %d\n", mp3->start_pad, mp3-> end_pad);
+ }
}
/* Check for VBRI tag (always 32 bytes after end of mpegaudio header) */
/* Skip the vbr tag frame */
avio_seek(s->pb, base + vbrtag_size, SEEK_SET);
- spf = c.lsf ? 576 : 1152; /* Samples per frame, layer 3 */
if(frames)
st->duration = av_rescale_q(frames, (AVRational){spf, c.sample_rate},
st->time_base);
static int mp3_read_header(AVFormatContext *s)
{
+ MP3Context *mp3 = s->priv_data;
AVStream *st;
int64_t off;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_MP3;
- st->need_parsing = AVSTREAM_PARSE_FULL;
+ st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
st->start_time = 0;
// lcm of all mp3 sample rates
avpriv_set_pts_info(st, 64, 1, 14112000);
+ s->pb->maxsize = -1;
off = avio_tell(s->pb);
if (!av_dict_get(s->metadata, "", NULL, AV_DICT_IGNORE_SUFFIX))
ff_id3v1_read(s);
+ if(s->pb->seekable)
+ mp3->filesize = avio_size(s->pb);
+
if (mp3_parse_vbr_tags(s, st, off) < 0)
avio_seek(s->pb, off, SEEK_SET);
static int mp3_read_packet(AVFormatContext *s, AVPacket *pkt)
{
- int ret;
-
- ret = av_get_packet(s->pb, pkt, MP3_PACKET_SIZE);
- if (ret < 0)
- return ret;
+ MP3Context *mp3 = s->priv_data;
+ int ret, size;
+ int64_t pos;
+
+ size= MP3_PACKET_SIZE;
+ pos = avio_tell(s->pb);
+ if(mp3->filesize > ID3v1_TAG_SIZE && pos < mp3->filesize)
+ size= FFMIN(size, mp3->filesize - pos);
+
+ ret= av_get_packet(s->pb, pkt, size);
+ if (ret <= 0) {
+ if(ret<0)
+ return ret;
+ return AVERROR_EOF;
+ }
+ pkt->flags &= ~AV_PKT_FLAG_CORRUPT;
pkt->stream_index = 0;
- if (ret > ID3v1_TAG_SIZE &&
+ if (ret >= ID3v1_TAG_SIZE &&
memcmp(&pkt->data[ret - ID3v1_TAG_SIZE], "TAG", 3) == 0)
ret -= ID3v1_TAG_SIZE;
return ret;
}
- static int read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp,
+ int flags)
{
- AVStream *st = s->streams[stream_index];
+ MP3Context *mp3 = s->priv_data;
+ AVIndexEntry *ie;
+ AVStream *st = s->streams[0];
+ int64_t ret = av_index_search_timestamp(st, timestamp, flags);
+ uint32_t header = 0;
+
+ if (ret < 0)
+ return ret;
+
+ ie = &st->index_entries[ret];
+ ret = avio_seek(s->pb, ie->pos, SEEK_SET);
+ if (ret < 0)
+ return ret;
- st->skip_samples = timestamp <= 0 ? mp3->start_pad + 528 + 1 : 0;
+ while (!s->pb->eof_reached) {
+ header = (header << 8) + avio_r8(s->pb);
+ if (ff_mpa_check_header(header) >= 0) {
+ ff_update_cur_dts(s, st, ie->timestamp);
+ ret = avio_seek(s->pb, -4, SEEK_CUR);
++
++ st->skip_samples = ie->timestamp <= 0 ? mp3->start_pad + 528 + 1 : 0;
++
+ return (ret >= 0) ? 0 : ret;
+ }
+ }
- return -1;
+ return AVERROR_EOF;
}
AVInputFormat ff_mp3_demuxer = {
.name = "mp3",
.long_name = NULL_IF_CONFIG_SMALL("MP2/3 (MPEG audio layer 2/3)"),
+ .priv_data_size = sizeof(MP3Context),
.read_probe = mp3_read_probe,
.read_header = mp3_read_header,
.read_packet = mp3_read_packet,
- .read_seek = read_seek,
+ .read_seek = mp3_seek,
.flags = AVFMT_GENERIC_INDEX,
.extensions = "mp2,mp3,m2a", /* XXX: use probe */
};