typedef struct {
ProresDSPContext dsp;
- AVFrame picture;
+ AVFrame *frame;
ScanTable scantable;
int scantable_type; ///< -1 = uninitialized, 0 = progressive, 1/2 = interlaced
avctx->bits_per_raw_sample = PRORES_BITS_PER_SAMPLE;
ff_proresdsp_init(&ctx->dsp);
- avctx->coded_frame = &ctx->picture;
- avcodec_get_frame_defaults(&ctx->picture);
- ctx->picture.type = AV_PICTURE_TYPE_I;
- ctx->picture.key_frame = 1;
-
ctx->scantable_type = -1; // set scantable type to uninitialized
memset(ctx->qmat_luma, 4, 64);
memset(ctx->qmat_chroma, 4, 64);
ctx->chroma_factor = (buf[12] >> 6) & 3;
ctx->mb_chroma_factor = ctx->chroma_factor + 2;
ctx->num_chroma_blocks = (1 << ctx->chroma_factor) >> 1;
+ ctx->alpha_info = buf[17] & 0xf;
+
+ if (ctx->alpha_info > 2) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
+ return AVERROR_INVALIDDATA;
+ }
+
switch (ctx->chroma_factor) {
case 2:
- avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
+ avctx->pix_fmt = ctx->alpha_info ? AV_PIX_FMT_YUVA422P10
+ : AV_PIX_FMT_YUV422P10;
break;
case 3:
- avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
+ avctx->pix_fmt = ctx->alpha_info ? AV_PIX_FMT_YUVA444P10
+ : AV_PIX_FMT_YUV444P10;
break;
default:
av_log(avctx, AV_LOG_ERROR,
}
if (ctx->frame_type) { /* if interlaced */
- ctx->picture.interlaced_frame = 1;
- ctx->picture.top_field_first = ctx->frame_type & 1;
+ ctx->frame->interlaced_frame = 1;
+ ctx->frame->top_field_first = ctx->frame_type & 1;
} else {
- ctx->picture.interlaced_frame = 0;
+ ctx->frame->interlaced_frame = 0;
}
avctx->color_primaries = buf[14];
avctx->color_trc = buf[15];
avctx->colorspace = buf[16];
- ctx->alpha_info = buf[17] & 0xf;
- if (ctx->alpha_info)
- av_log_missing_feature(avctx, "Alpha channel", 0);
-
ctx->qmat_changed = 0;
ptr = buf + 20;
flags = buf[19];
ctx->num_x_mbs = (avctx->width + 15) >> 4;
ctx->num_y_mbs = (avctx->height +
- (1 << (4 + ctx->picture.interlaced_frame)) - 1) >>
- (4 + ctx->picture.interlaced_frame);
+ (1 << (4 + ctx->frame->interlaced_frame)) - 1) >>
+ (4 + ctx->frame->interlaced_frame);
remainder = ctx->num_x_mbs & ((1 << slice_width_factor) - 1);
num_x_slices = (ctx->num_x_mbs >> slice_width_factor) + (remainder & 1) +
/**
* Decode AC coefficients for all blocks in a slice.
*/
-static inline void decode_ac_coeffs(GetBitContext *gb, int16_t *out,
- int blocks_per_slice,
- int plane_size_factor,
- const uint8_t *scan)
+static inline int decode_ac_coeffs(GetBitContext *gb, int16_t *out,
+ int blocks_per_slice,
+ int plane_size_factor,
+ const uint8_t *scan)
{
int pos, block_mask, run, level, sign, run_cb_index, lev_cb_index;
int max_coeffs, bits_left;
bits_left = get_bits_left(gb);
if (bits_left <= 0 || (bits_left <= 8 && !show_bits(gb, bits_left)))
- return;
+ return 0;
run = decode_vlc_codeword(gb, ff_prores_ac_codebook[run_cb_index]);
+ if (run < 0)
+ return AVERROR_INVALIDDATA;
bits_left = get_bits_left(gb);
if (bits_left <= 0 || (bits_left <= 8 && !show_bits(gb, bits_left)))
- return;
+ return AVERROR_INVALIDDATA;
level = decode_vlc_codeword(gb, ff_prores_ac_codebook[lev_cb_index]) + 1;
+ if (level < 0)
+ return AVERROR_INVALIDDATA;
pos += run + 1;
if (pos >= max_coeffs)
out[((pos & block_mask) << 6) + scan[pos >> plane_size_factor]] =
(level ^ sign) - sign;
}
+
+ return 0;
}
/**
* Decode a slice plane (luma or chroma).
*/
-static void decode_slice_plane(ProresContext *ctx, ProresThreadData *td,
- const uint8_t *buf,
- int data_size, uint16_t *out_ptr,
- int linesize, int mbs_per_slice,
- int blocks_per_mb, int plane_size_factor,
- const int16_t *qmat, int is_chroma)
+static int decode_slice_plane(ProresContext *ctx, ProresThreadData *td,
+ const uint8_t *buf,
+ int data_size, uint16_t *out_ptr,
+ int linesize, int mbs_per_slice,
+ int blocks_per_mb, int plane_size_factor,
+ const int16_t *qmat, int is_chroma)
{
GetBitContext gb;
int16_t *block_ptr;
- int mb_num, blocks_per_slice;
+ int mb_num, blocks_per_slice, ret;
blocks_per_slice = mbs_per_slice * blocks_per_mb;
decode_dc_coeffs(&gb, td->blocks, blocks_per_slice);
- decode_ac_coeffs(&gb, td->blocks, blocks_per_slice,
- plane_size_factor, ctx->scantable.permutated);
+ ret = decode_ac_coeffs(&gb, td->blocks, blocks_per_slice,
+ plane_size_factor, ctx->scantable.permutated);
+ if (ret < 0)
+ return ret;
/* inverse quantization, inverse transform and output */
block_ptr = td->blocks;
}
}
}
+ return 0;
}
+static void unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs,
+ const int num_bits)
+{
+ const int mask = (1 << num_bits) - 1;
+ int i, idx, val, alpha_val;
+
+ idx = 0;
+ alpha_val = mask;
+ do {
+ do {
+ if (get_bits1(gb))
+ val = get_bits(gb, num_bits);
+ else {
+ int sign;
+ val = get_bits(gb, num_bits == 16 ? 7 : 4);
+ sign = val & 1;
+ val = (val + 2) >> 1;
+ if (sign)
+ val = -val;
+ }
+ alpha_val = (alpha_val + val) & mask;
+ if (num_bits == 16)
+ dst[idx++] = alpha_val >> 6;
+ else
+ dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
+ if (idx >= num_coeffs - 1)
+ break;
+ } while (get_bits1(gb));
+ val = get_bits(gb, 4);
+ if (!val)
+ val = get_bits(gb, 11);
+ if (idx + val > num_coeffs)
+ val = num_coeffs - idx;
+ if (num_bits == 16)
+ for (i = 0; i < val; i++)
+ dst[idx++] = alpha_val >> 6;
+ else
+ for (i = 0; i < val; i++)
+ dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
+ } while (idx < num_coeffs);
+}
+
+/**
+ * Decode alpha slice plane.
+ */
+static void decode_alpha_plane(ProresContext *ctx, ProresThreadData *td,
+ const uint8_t *buf, int data_size,
+ uint16_t *out_ptr, int linesize,
+ int mbs_per_slice)
+{
+ GetBitContext gb;
+ int i;
+ uint16_t *block_ptr;
+
+ memset(td->blocks, 0, 8 * 4 * 64 * sizeof(*td->blocks));
+
+ init_get_bits(&gb, buf, data_size << 3);
+
+ if (ctx->alpha_info == 2)
+ unpack_alpha(&gb, td->blocks, mbs_per_slice * 4 * 64, 16);
+ else
+ unpack_alpha(&gb, td->blocks, mbs_per_slice * 4 * 64, 8);
+
+ block_ptr = td->blocks;
+
+ for (i = 0; i < 16; i++) {
+ memcpy(out_ptr, block_ptr, 16 * mbs_per_slice * sizeof(*out_ptr));
+ out_ptr += linesize >> 1;
+ block_ptr += 16 * mbs_per_slice;
+ }
+}
+
static int decode_slice(AVCodecContext *avctx, void *tdata)
{
ProresThreadData *td = tdata;
int slice_num = td->slice_num;
int mbs_per_slice = td->slice_width;
const uint8_t *buf;
- uint8_t *y_data, *u_data, *v_data;
- AVFrame *pic = avctx->coded_frame;
+ uint8_t *y_data, *u_data, *v_data, *a_data;
+ AVFrame *pic = ctx->frame;
int i, sf, slice_width_factor;
- int slice_data_size, hdr_size, y_data_size, u_data_size, v_data_size;
- int y_linesize, u_linesize, v_linesize;
+ int slice_data_size, hdr_size;
+ int y_data_size, u_data_size, v_data_size, a_data_size;
+ int y_linesize, u_linesize, v_linesize, a_linesize;
+ int coff[4];
+ int ret;
buf = ctx->slice_data[slice_num].index;
slice_data_size = ctx->slice_data[slice_num + 1].index - buf;
y_data = pic->data[0];
u_data = pic->data[1];
v_data = pic->data[2];
+ a_data = pic->data[3];
y_linesize = pic->linesize[0];
u_linesize = pic->linesize[1];
v_linesize = pic->linesize[2];
+ a_linesize = pic->linesize[3];
if (pic->interlaced_frame) {
if (!(pic_num ^ pic->top_field_first)) {
y_data += y_linesize;
u_data += u_linesize;
v_data += v_linesize;
+ if (a_data)
+ a_data += a_linesize;
}
y_linesize <<= 1;
u_linesize <<= 1;
v_linesize <<= 1;
+ a_linesize <<= 1;
}
+ y_data += (mb_y_pos << 4) * y_linesize + (mb_x_pos << 5);
+ u_data += (mb_y_pos << 4) * u_linesize + (mb_x_pos << ctx->mb_chroma_factor);
+ v_data += (mb_y_pos << 4) * v_linesize + (mb_x_pos << ctx->mb_chroma_factor);
+ if (a_data)
+ a_data += (mb_y_pos << 4) * a_linesize + (mb_x_pos << 5);
if (slice_data_size < 6) {
av_log(avctx, AV_LOG_ERROR, "slice data too small\n");
/* parse slice header */
hdr_size = buf[0] >> 3;
+ coff[0] = hdr_size;
y_data_size = AV_RB16(buf + 2);
+ coff[1] = coff[0] + y_data_size;
u_data_size = AV_RB16(buf + 4);
- v_data_size = hdr_size > 7 ? AV_RB16(buf + 6) :
- slice_data_size - y_data_size - u_data_size - hdr_size;
-
- if (hdr_size + y_data_size + u_data_size + v_data_size > slice_data_size ||
- v_data_size < 0 || hdr_size < 6) {
+ coff[2] = coff[1] + u_data_size;
+ v_data_size = hdr_size > 7 ? AV_RB16(buf + 6) : slice_data_size - coff[2];
+ coff[3] = coff[2] + v_data_size;
+ a_data_size = slice_data_size - coff[3];
+
+ /* if V or alpha component size is negative that means that previous
+ component sizes are too large */
+ if (v_data_size < 0 || a_data_size < 0 || hdr_size < 6) {
av_log(avctx, AV_LOG_ERROR, "invalid data size\n");
return AVERROR_INVALIDDATA;
}
}
/* decode luma plane */
- decode_slice_plane(ctx, td, buf + hdr_size, y_data_size,
- (uint16_t*) (y_data + (mb_y_pos << 4) * y_linesize +
- (mb_x_pos << 5)), y_linesize,
- mbs_per_slice, 4, slice_width_factor + 2,
- td->qmat_luma_scaled, 0);
+ ret = decode_slice_plane(ctx, td, buf + coff[0], y_data_size,
+ (uint16_t*) y_data, y_linesize,
+ mbs_per_slice, 4, slice_width_factor + 2,
+ td->qmat_luma_scaled, 0);
+
+ if (ret < 0)
+ return ret;
/* decode U chroma plane */
- decode_slice_plane(ctx, td, buf + hdr_size + y_data_size, u_data_size,
- (uint16_t*) (u_data + (mb_y_pos << 4) * u_linesize +
- (mb_x_pos << ctx->mb_chroma_factor)),
- u_linesize, mbs_per_slice, ctx->num_chroma_blocks,
- slice_width_factor + ctx->chroma_factor - 1,
- td->qmat_chroma_scaled, 1);
+ ret = decode_slice_plane(ctx, td, buf + coff[1], u_data_size,
+ (uint16_t*) u_data, u_linesize,
+ mbs_per_slice, ctx->num_chroma_blocks,
+ slice_width_factor + ctx->chroma_factor - 1,
+ td->qmat_chroma_scaled, 1);
+ if (ret < 0)
+ return ret;
/* decode V chroma plane */
- decode_slice_plane(ctx, td, buf + hdr_size + y_data_size + u_data_size,
- v_data_size,
- (uint16_t*) (v_data + (mb_y_pos << 4) * v_linesize +
- (mb_x_pos << ctx->mb_chroma_factor)),
- v_linesize, mbs_per_slice, ctx->num_chroma_blocks,
- slice_width_factor + ctx->chroma_factor - 1,
- td->qmat_chroma_scaled, 1);
+ ret = decode_slice_plane(ctx, td, buf + coff[2], v_data_size,
+ (uint16_t*) v_data, v_linesize,
+ mbs_per_slice, ctx->num_chroma_blocks,
+ slice_width_factor + ctx->chroma_factor - 1,
+ td->qmat_chroma_scaled, 1);
+ if (ret < 0)
+ return ret;
+
+ /* decode alpha plane if available */
+ if (a_data && a_data_size)
+ decode_alpha_plane(ctx, td, buf + coff[3], a_data_size,
+ (uint16_t*) a_data, a_linesize,
+ mbs_per_slice);
return 0;
}
AVPacket *avpkt)
{
ProresContext *ctx = avctx->priv_data;
- AVFrame *picture = avctx->coded_frame;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int frame_hdr_size, pic_num, pic_data_size;
+ ctx->frame = data;
+ ctx->frame->pict_type = AV_PICTURE_TYPE_I;
+ ctx->frame->key_frame = 1;
+
/* check frame atom container */
if (buf_size < 28 || buf_size < AV_RB32(buf) ||
AV_RB32(buf + 4) != FRAME_ID) {
MOVE_DATA_PTR(frame_hdr_size);
- if (picture->data[0])
- avctx->release_buffer(avctx, picture);
-
- picture->reference = 0;
- if (ff_get_buffer(avctx, picture) < 0)
+ if (ff_get_buffer(avctx, ctx->frame, 0) < 0)
return -1;
- for (pic_num = 0; ctx->picture.interlaced_frame - pic_num + 1; pic_num++) {
+ for (pic_num = 0; ctx->frame->interlaced_frame - pic_num + 1; pic_num++) {
pic_data_size = decode_picture_header(ctx, buf, buf_size, avctx);
if (pic_data_size < 0)
return AVERROR_INVALIDDATA;
MOVE_DATA_PTR(pic_data_size);
}
- *got_frame = 1;
- *(AVFrame*) data = *avctx->coded_frame;
+ ctx->frame = NULL;
+ *got_frame = 1;
return avpkt->size;
}
{
ProresContext *ctx = avctx->priv_data;
- if (ctx->picture.data[0])
- avctx->release_buffer(avctx, &ctx->picture);
-
av_freep(&ctx->slice_data);
return 0;
AVCodec ff_prores_decoder = {
.name = "prores",
+ .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_PRORES,
.priv_data_size = sizeof(ProresContext),
.close = decode_close,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
- .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)")
};