* Known FOURCCs: 'ap4h' (444), 'apch' (HQ), 'apcn' (422), 'apcs' (LT), 'acpo' (Proxy)
*/
+#include "libavutil/mem_internal.h"
#include "libavutil/opt.h"
#include "avcodec.h"
#include "dct.h"
{ FF_PROFILE_UNKNOWN }
};
-static const int qp_start_table[6] = { 8, 3, 2, 1, 1, 1};
-static const int qp_end_table[6] = { 13, 9, 6, 6, 5, 4};
-static const int bitrate_table[6] = { 1000, 2100, 3500, 5400, 7000, 10000};
+static const int qp_start_table[] = { 8, 3, 2, 1, 1, 1};
+static const int qp_end_table[] = { 13, 9, 6, 6, 5, 4};
+static const int bitrate_table[] = { 1000, 2100, 3500, 5400, 7000, 10000};
-static const int valid_primaries[9] = { AVCOL_PRI_RESERVED0, AVCOL_PRI_BT709, AVCOL_PRI_UNSPECIFIED, AVCOL_PRI_BT470BG,
- AVCOL_PRI_SMPTE170M, AVCOL_PRI_BT2020, AVCOL_PRI_SMPTE431, AVCOL_PRI_SMPTE432,INT_MAX };
-static const int valid_trc[4] = { AVCOL_TRC_RESERVED0, AVCOL_TRC_BT709, AVCOL_TRC_UNSPECIFIED, INT_MAX };
-static const int valid_colorspace[5] = { AVCOL_SPC_BT709, AVCOL_SPC_UNSPECIFIED, AVCOL_SPC_SMPTE170M,
- AVCOL_SPC_BT2020_NCL, INT_MAX };
+static const int valid_primaries[] = { AVCOL_PRI_RESERVED0, AVCOL_PRI_BT709, AVCOL_PRI_UNSPECIFIED, AVCOL_PRI_BT470BG,
+ AVCOL_PRI_SMPTE170M, AVCOL_PRI_BT2020, AVCOL_PRI_SMPTE431, AVCOL_PRI_SMPTE432, INT_MAX };
+static const int valid_trc[] = { AVCOL_TRC_RESERVED0, AVCOL_TRC_BT709, AVCOL_TRC_UNSPECIFIED, AVCOL_TRC_SMPTE2084,
+ AVCOL_TRC_ARIB_STD_B67, INT_MAX };
+static const int valid_colorspace[] = { AVCOL_SPC_BT709, AVCOL_SPC_UNSPECIFIED, AVCOL_SPC_SMPTE170M,
+ AVCOL_SPC_BT2020_NCL, INT_MAX };
static const uint8_t QMAT_LUMA[6][64] = {
{
int qmat_luma[16][64];
int qmat_chroma[16][64];
+ const uint8_t *scantable;
int is_422;
int need_alpha;
+ int is_interlaced;
char *vendor;
} ProresContext;
}
#define QSCALE(qmat,ind,val) ((val) / ((qmat)[ind]))
-#define TO_GOLOMB(val) (((val) << 1) ^ ((val) >> 31))
+#define TO_GOLOMB(val) (((val) * 2) ^ ((val) >> 31))
#define DIFF_SIGN(val, sign) (((val) >> 31) ^ (sign))
#define IS_NEGATIVE(val) ((((val) >> 31) ^ -1) + 1)
#define TO_GOLOMB2(val,sign) ((val)==0 ? 0 : ((val) << 1) + (sign))
0x28, 0x28, 0x28, 0x4C };
static void encode_ac_coeffs(PutBitContext *pb,
- int16_t *in, int blocks_per_slice, int *qmat)
+ int16_t *in, int blocks_per_slice, int *qmat, const uint8_t ff_prores_scan[64])
{
int prev_run = 4;
int prev_level = 2;
int run = 0, level, code, i, j;
for (i = 1; i < 64; i++) {
- int indp = ff_prores_progressive_scan[i];
+ int indp = ff_prores_scan[i];
for (j = 0; j < blocks_per_slice; j++) {
int val = QSCALE(qmat, indp, in[(j << 6) + indp]);
if (val) {
}
}
-static int encode_slice_plane(int16_t *blocks, int mb_count, uint8_t *buf, unsigned buf_size, int *qmat, int sub_sample_chroma)
+static int encode_slice_plane(int16_t *blocks, int mb_count, uint8_t *buf, unsigned buf_size, int *qmat, int sub_sample_chroma,
+ const uint8_t ff_prores_scan[64])
{
int blocks_per_slice;
PutBitContext pb;
init_put_bits(&pb, buf, buf_size);
encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat);
- encode_ac_coeffs(&pb, blocks, blocks_per_slice, qmat);
+ encode_ac_coeffs(&pb, blocks, blocks_per_slice, qmat, ff_prores_scan);
flush_put_bits(&pb);
return put_bits_ptr(&pb) - pb.buf;
ProresContext* ctx = avctx->priv_data;
*y_data_size = encode_slice_plane(blocks_y, mb_count,
- buf, data_size, ctx->qmat_luma[qp - 1], 0);
+ buf, data_size, ctx->qmat_luma[qp - 1], 0, ctx->scantable);
if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) {
*u_data_size = encode_slice_plane(blocks_u, mb_count, buf + *y_data_size, data_size - *y_data_size,
- ctx->qmat_chroma[qp - 1], ctx->is_422);
+ ctx->qmat_chroma[qp - 1], ctx->is_422, ctx->scantable);
*v_data_size = encode_slice_plane(blocks_v, mb_count, buf + *y_data_size + *u_data_size,
data_size - *y_data_size - *u_data_size,
- ctx->qmat_chroma[qp - 1], ctx->is_422);
+ ctx->qmat_chroma[qp - 1], ctx->is_422, ctx->scantable);
}
return *y_data_size + *u_data_size + *v_data_size;
if (run)
put_alpha_run(&pb, run);
flush_put_bits(&pb);
- *a_data_size = put_bits_count(&pb) >> 3;
+ *a_data_size = put_bytes_output(&pb);
if (put_bits_left(&pb) < 0) {
av_log(avctx, AV_LOG_ERROR,
}
}
-static void subimage_with_fill(uint16_t *src, unsigned x, unsigned y,
- unsigned stride, unsigned width, unsigned height, uint16_t *dst,
- unsigned dst_width, unsigned dst_height)
+static inline void subimage_with_fill_template(uint16_t *src, unsigned x, unsigned y,
+ unsigned stride, unsigned width, unsigned height, uint16_t *dst,
+ unsigned dst_width, unsigned dst_height, int is_alpha_plane,
+ int is_interlaced, int is_top_field)
{
-
int box_width = FFMIN(width - x, dst_width);
- int box_height = FFMIN(height - y, dst_height);
- int i, j, src_stride = stride >> 1;
+ int i, j, src_stride, box_height;
uint16_t last_pix, *last_line;
- src += y * src_stride + x;
+ if (!is_interlaced) {
+ src_stride = stride >> 1;
+ src += y * src_stride + x;
+ box_height = FFMIN(height - y, dst_height);
+ } else {
+ src_stride = stride; /* 2 lines stride */
+ src += y * src_stride + x;
+ box_height = FFMIN(height/2 - y, dst_height);
+ if (!is_top_field)
+ src += stride >> 1;
+ }
+
for (i = 0; i < box_height; ++i) {
for (j = 0; j < box_width; ++j) {
- dst[j] = src[j];
+ if (!is_alpha_plane) {
+ dst[j] = src[j];
+ } else {
+ dst[j] = src[j] << 6; /* alpha 10b to 16b */
+ }
+ }
+ if (!is_alpha_plane) {
+ last_pix = dst[j - 1];
+ } else {
+ last_pix = dst[j - 1] << 6; /* alpha 10b to 16b */
}
- last_pix = dst[j - 1];
for (; j < dst_width; j++)
dst[j] = last_pix;
src += src_stride;
}
}
+static void subimage_with_fill(uint16_t *src, unsigned x, unsigned y,
+ unsigned stride, unsigned width, unsigned height, uint16_t *dst,
+ unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
+{
+ subimage_with_fill_template(src, x, y, stride, width, height, dst, dst_width, dst_height, 0, is_interlaced, is_top_field);
+}
+
/* reorganize alpha data and convert 10b -> 16b */
static void subimage_alpha_with_fill(uint16_t *src, unsigned x, unsigned y,
unsigned stride, unsigned width, unsigned height, uint16_t *dst,
- unsigned dst_width, unsigned dst_height)
+ unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
{
- int box_width = FFMIN(width - x, dst_width);
- int box_height = FFMIN(height - y, dst_height);
- int i, j, src_stride = stride >> 1;
- uint16_t last_pix, *last_line;
-
- src += y * src_stride + x;
- for (i = 0; i < box_height; ++i) {
- for (j = 0; j < box_width; ++j) {
- dst[j] = src[j] << 6; /* 10b to 16b */
- }
- last_pix = dst[j - 1] << 6; /* 10b to 16b */
- for (; j < dst_width; j++)
- dst[j] = last_pix;
- src += src_stride;
- dst += dst_width;
- }
- last_line = dst - dst_width;
- for (; i < dst_height; i++) {
- for (j = 0; j < dst_width; ++j) {
- dst[j] = last_line[j];
- }
- dst += dst_width;
- }
+ subimage_with_fill_template(src, x, y, stride, width, height, dst, dst_width, dst_height, 1, is_interlaced, is_top_field);
}
static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x,
int mb_y, unsigned mb_count, uint8_t *buf, unsigned data_size,
- int unsafe, int *qp)
+ int unsafe, int *qp, int is_interlaced, int is_top_field)
{
int luma_stride, chroma_stride, alpha_stride = 0;
ProresContext* ctx = avctx->priv_data;
if (ctx->need_alpha)
alpha_stride = pic->linesize[3];
- dest_y = pic->data[0] + (mb_y << 4) * luma_stride + (mb_x << 5);
- dest_u = pic->data[1] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422));
- dest_v = pic->data[2] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422));
+ if (!is_interlaced) {
+ dest_y = pic->data[0] + (mb_y << 4) * luma_stride + (mb_x << 5);
+ dest_u = pic->data[1] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422));
+ dest_v = pic->data[2] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422));
+ } else {
+ dest_y = pic->data[0] + (mb_y << 4) * luma_stride * 2 + (mb_x << 5);
+ dest_u = pic->data[1] + (mb_y << 4) * chroma_stride * 2 + (mb_x << (5 - ctx->is_422));
+ dest_v = pic->data[2] + (mb_y << 4) * chroma_stride * 2 + (mb_x << (5 - ctx->is_422));
+ if (!is_top_field){ /* bottom field, offset dest */
+ dest_y += luma_stride;
+ dest_u += chroma_stride;
+ dest_v += chroma_stride;
+ }
+ }
if (unsafe) {
subimage_with_fill((uint16_t *) pic->data[0], mb_x << 4, mb_y << 4,
luma_stride, avctx->width, avctx->height,
- (uint16_t *) ctx->fill_y, mb_count << 4, 16);
+ (uint16_t *) ctx->fill_y, mb_count << 4, 16, is_interlaced, is_top_field);
subimage_with_fill((uint16_t *) pic->data[1], mb_x << (4 - ctx->is_422), mb_y << 4,
chroma_stride, avctx->width >> ctx->is_422, avctx->height,
- (uint16_t *) ctx->fill_u, mb_count << (4 - ctx->is_422), 16);
+ (uint16_t *) ctx->fill_u, mb_count << (4 - ctx->is_422), 16, is_interlaced, is_top_field);
subimage_with_fill((uint16_t *) pic->data[2], mb_x << (4 - ctx->is_422), mb_y << 4,
chroma_stride, avctx->width >> ctx->is_422, avctx->height,
- (uint16_t *) ctx->fill_v, mb_count << (4 - ctx->is_422), 16);
+ (uint16_t *) ctx->fill_v, mb_count << (4 - ctx->is_422), 16, is_interlaced, is_top_field);
+ /* no need for interlaced special case, data already reorganized in subimage_with_fill */
calc_plane_dct(fdsp, ctx->fill_y, blocks_y, mb_count << 5, mb_count, 0, 0);
calc_plane_dct(fdsp, ctx->fill_u, blocks_u, mb_count << (5 - ctx->is_422), mb_count, 1, ctx->is_422);
calc_plane_dct(fdsp, ctx->fill_v, blocks_v, mb_count << (5 - ctx->is_422), mb_count, 1, ctx->is_422);
&y_data_size, &u_data_size, &v_data_size,
*qp);
} else {
- calc_plane_dct(fdsp, dest_y, blocks_y, luma_stride, mb_count, 0, 0);
- calc_plane_dct(fdsp, dest_u, blocks_u, chroma_stride, mb_count, 1, ctx->is_422);
- calc_plane_dct(fdsp, dest_v, blocks_v, chroma_stride, mb_count, 1, ctx->is_422);
+ if (!is_interlaced) {
+ calc_plane_dct(fdsp, dest_y, blocks_y, luma_stride, mb_count, 0, 0);
+ calc_plane_dct(fdsp, dest_u, blocks_u, chroma_stride, mb_count, 1, ctx->is_422);
+ calc_plane_dct(fdsp, dest_v, blocks_v, chroma_stride, mb_count, 1, ctx->is_422);
+ } else {
+ calc_plane_dct(fdsp, dest_y, blocks_y, luma_stride * 2, mb_count, 0, 0);
+ calc_plane_dct(fdsp, dest_u, blocks_u, chroma_stride * 2, mb_count, 1, ctx->is_422);
+ calc_plane_dct(fdsp, dest_v, blocks_v, chroma_stride * 2, mb_count, 1, ctx->is_422);
+ }
slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
mb_count, buf + hdr_size, data_size - hdr_size,
subimage_alpha_with_fill((uint16_t *) pic->data[3], mb_x << 4, mb_y << 4,
alpha_stride, avctx->width, avctx->height,
- (uint16_t *) ctx->fill_a, mb_count << 4, 16);
+ (uint16_t *) ctx->fill_a, mb_count << 4, 16, is_interlaced, is_top_field);
ret = encode_alpha_slice_data(avctx, ctx->fill_a, mb_count,
buf + hdr_size + slice_size,
data_size - hdr_size - slice_size, &a_data_size);
}
static int prores_encode_picture(AVCodecContext *avctx, const AVFrame *pic,
- uint8_t *buf, const int buf_size)
+ uint8_t *buf, const int buf_size, const int picture_index, const int is_top_field)
{
+ ProresContext *ctx = avctx->priv_data;
int mb_width = (avctx->width + 15) >> 4;
- int mb_height = (avctx->height + 15) >> 4;
int hdr_size, sl_size, i;
- int mb_y, sl_data_size, qp;
+ int mb_y, sl_data_size, qp, mb_height, picture_height, unsafe_mb_height_limit;
int unsafe_bot, unsafe_right;
uint8_t *sl_data, *sl_data_sizes;
int slice_per_line = 0, rem = mb_width;
+ if (!ctx->is_interlaced) { /* progressive encoding */
+ mb_height = (avctx->height + 15) >> 4;
+ unsafe_mb_height_limit = mb_height;
+ } else {
+ if (is_top_field) {
+ picture_height = (avctx->height + 1) / 2;
+ } else {
+ picture_height = avctx->height / 2;
+ }
+ mb_height = (picture_height + 15) >> 4;
+ unsafe_mb_height_limit = mb_height;
+ }
+
for (i = av_log2(DEFAULT_SLICE_MB_WIDTH); i >= 0; --i) {
slice_per_line += rem >> i;
rem &= (1 << i) - 1;
while (mb_width - mb_x < slice_mb_count)
slice_mb_count >>= 1;
- unsafe_bot = (avctx->height & 0xf) && (mb_y == mb_height - 1);
+ unsafe_bot = (avctx->height & 0xf) && (mb_y == unsafe_mb_height_limit - 1);
unsafe_right = (avctx->width & 0xf) && (mb_x + slice_mb_count == mb_width);
sl_size = encode_slice(avctx, pic, mb_x, mb_y, slice_mb_count,
- sl_data, sl_data_size, unsafe_bot || unsafe_right, &qp);
+ sl_data, sl_data_size, unsafe_bot || unsafe_right, &qp, ctx->is_interlaced, is_top_field);
if (sl_size < 0){
return sl_size;
}
buf[0] = hdr_size << 3;
AV_WB32(buf + 1, sl_data - buf);
- AV_WB16(buf + 5, slice_per_line * mb_height);
- buf[7] = av_log2(DEFAULT_SLICE_MB_WIDTH) << 4;
+ AV_WB16(buf + 5, slice_per_line * mb_height); /* picture size */
+ buf[7] = av_log2(DEFAULT_SLICE_MB_WIDTH) << 4; /* number of slices */
return sl_data - buf;
}
ProresContext *ctx = avctx->priv_data;
int header_size = 148;
uint8_t *buf;
- int pic_size, ret;
+ int compress_frame_size, pic_size, ret, is_top_field_first = 0;
+ uint8_t frame_flags;
int frame_size = FFALIGN(avctx->width, 16) * FFALIGN(avctx->height, 16)*16 + 500 + AV_INPUT_BUFFER_MIN_SIZE; //FIXME choose tighter limit
return ret;
buf = pkt->data;
- pic_size = prores_encode_picture(avctx, pict, buf + header_size + 8,
- pkt->size - header_size - 8);
- if (pic_size < 0) {
- return pic_size;
- }
+ compress_frame_size = 8 + header_size;
- bytestream_put_be32(&buf, pic_size + 8 + header_size);
+ bytestream_put_be32(&buf, compress_frame_size);/* frame size will be update after picture(s) encoding */
bytestream_put_buffer(&buf, "icpf", 4);
bytestream_put_be16(&buf, header_size);
bytestream_put_buffer(&buf, ctx->vendor, 4);
bytestream_put_be16(&buf, avctx->width);
bytestream_put_be16(&buf, avctx->height);
- if (avctx->profile >= FF_PROFILE_PRORES_4444) { /* 4444 or 4444 Xq */
- *buf++ = 0xC2; // 444, not interlaced
+ frame_flags = 0x82; /* 422 not interlaced */
+ if (avctx->profile >= FF_PROFILE_PRORES_4444) /* 4444 or 4444 Xq */
+ frame_flags |= 0x40; /* 444 chroma */
+ if (ctx->is_interlaced) {
+ if (pict->top_field_first || !pict->interlaced_frame) { /* tff frame or progressive frame interpret as tff */
+ av_log(avctx, AV_LOG_DEBUG, "use interlaced encoding, top field first\n");
+ frame_flags |= 0x04; /* interlaced tff */
+ is_top_field_first = 1;
+ } else {
+ av_log(avctx, AV_LOG_DEBUG, "use interlaced encoding, bottom field first\n");
+ frame_flags |= 0x08; /* interlaced bff */
+ }
} else {
- *buf++ = 0x82; // 422, not interlaced
+ av_log(avctx, AV_LOG_DEBUG, "use progressive encoding\n");
}
+ *buf++ = frame_flags;
*buf++ = 0; /* reserved */
/* only write color properties, if valid value. set to unspecified otherwise */
*buf++ = ff_int_from_list_or_default(avctx, "frame color primaries", pict->color_primaries, valid_primaries, 0);
bytestream_put_buffer(&buf, QMAT_LUMA[avctx->profile], 64);
bytestream_put_buffer(&buf, QMAT_CHROMA[avctx->profile], 64);
+ pic_size = prores_encode_picture(avctx, pict, buf,
+ pkt->size - compress_frame_size, 0, is_top_field_first);/* encode progressive or first field */
+ if (pic_size < 0) {
+ return pic_size;
+ }
+ compress_frame_size += pic_size;
+
+ if (ctx->is_interlaced) { /* encode second field */
+ pic_size = prores_encode_picture(avctx, pict, pkt->data + compress_frame_size,
+ pkt->size - compress_frame_size, 1, !is_top_field_first);
+ if (pic_size < 0) {
+ return pic_size;
+ }
+ compress_frame_size += pic_size;
+ }
+
+ AV_WB32(pkt->data, compress_frame_size);/* update frame size */
pkt->flags |= AV_PKT_FLAG_KEY;
- pkt->size = pic_size + 8 + header_size;
+ pkt->size = compress_frame_size;
*got_packet = 1;
return 0;
avctx->bits_per_raw_sample = 10;
ctx->need_alpha = 0;
+ ctx->is_interlaced = !!(avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT);
+ if (ctx->is_interlaced) {
+ ctx->scantable = ff_prores_interlaced_scan;
+ } else {
+ ctx->scantable = ff_prores_progressive_scan;
+ }
if (avctx->width & 0x1) {
av_log(avctx, AV_LOG_ERROR,
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
- { "vendor", "vendor ID", OFFSET(vendor), AV_OPT_TYPE_STRING, { .str = "fmpg" }, CHAR_MIN, CHAR_MAX, VE },
+ { "vendor", "vendor ID", OFFSET(vendor), AV_OPT_TYPE_STRING, { .str = "fmpg" }, 0, 0, VE },
{ NULL }
};
.version = LIBAVUTIL_VERSION_INT,
};
-AVCodec ff_prores_aw_encoder = {
+const AVCodec ff_prores_aw_encoder = {
.name = "prores_aw",
.long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
.type = AVMEDIA_TYPE_VIDEO,
.close = prores_encode_close,
.encode2 = prores_encode_frame,
.pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_NONE},
- .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
+ .capabilities = AV_CODEC_CAP_FRAME_THREADS,
.priv_class = &proresaw_enc_class,
.profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles),
+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
};
-AVCodec ff_prores_encoder = {
+const AVCodec ff_prores_encoder = {
.name = "prores",
.long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
.type = AVMEDIA_TYPE_VIDEO,
.close = prores_encode_close,
.encode2 = prores_encode_frame,
.pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_NONE},
- .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
+ .capabilities = AV_CODEC_CAP_FRAME_THREADS,
.priv_class = &prores_enc_class,
.profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles),
+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
};