2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of Libav.
11 * Libav is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * Libav is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with Libav; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
52 typedef enum Predictor {
58 typedef struct HYuvContext {
59 AVCodecContext *avctx;
67 int yuy2; //use yuy2 instead of 422P
68 int bgr32; //use bgr32 instead of bgr24
75 uint64_t stats[3][256];
77 uint32_t bits[3][256];
78 uint32_t pix_bgr_map[1<<VLC_BITS];
79 VLC vlc[6]; //Y,U,V,YY,YU,YV
81 uint8_t *bitstream_buffer;
82 unsigned int bitstream_buffer_size;
86 #define classic_shift_luma_table_size 42
87 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
88 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
89 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
93 #define classic_shift_chroma_table_size 59
94 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
95 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
96 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
97 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
100 static const unsigned char classic_add_luma[256] = {
101 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
102 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
103 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
104 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
105 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
106 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
107 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
108 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
109 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
110 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
111 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
112 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
113 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
114 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
115 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
116 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
119 static const unsigned char classic_add_chroma[256] = {
120 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
121 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
122 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
123 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
124 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
125 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
126 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
127 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
128 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
129 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
130 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
131 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
132 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
133 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
134 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
135 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
138 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
139 uint8_t *src, int w, int left)
143 for (i = 0; i < w; i++) {
144 const int temp = src[i];
145 dst[i] = temp - left;
150 for (i = 0; i < 16; i++) {
151 const int temp = src[i];
152 dst[i] = temp - left;
155 s->dsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
160 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
162 int *red, int *green, int *blue)
170 for (i = 0; i < FFMIN(w, 4); i++) {
171 const int rt = src[i * 4 + R];
172 const int gt = src[i * 4 + G];
173 const int bt = src[i * 4 + B];
174 dst[i * 4 + R] = rt - r;
175 dst[i * 4 + G] = gt - g;
176 dst[i * 4 + B] = bt - b;
182 s->dsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
184 *red = src[(w - 1) * 4 + R];
185 *green = src[(w - 1) * 4 + G];
186 *blue = src[(w - 1) * 4 + B];
189 static int read_len_table(uint8_t *dst, GetBitContext *gb)
193 for (i = 0; i < 256;) {
194 repeat = get_bits(gb, 3);
195 val = get_bits(gb, 5);
197 repeat = get_bits(gb, 8);
198 if (i + repeat > 256 || get_bits_left(gb) < 0) {
199 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
208 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table)
213 for (len = 32; len > 0; len--) {
214 for (index = 0; index < 256; index++) {
215 if (len_table[index] == len)
219 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
227 static void generate_joint_tables(HYuvContext *s)
229 uint16_t symbols[1 << VLC_BITS];
230 uint16_t bits[1 << VLC_BITS];
231 uint8_t len[1 << VLC_BITS];
232 if (s->bitstream_bpp < 24) {
234 for (p = 0; p < 3; p++) {
235 for (i = y = 0; y < 256; y++) {
236 int len0 = s->len[0][y];
237 int limit = VLC_BITS - len0;
240 for (u = 0; u < 256; u++) {
241 int len1 = s->len[p][u];
244 len[i] = len0 + len1;
245 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
246 symbols[i] = (y << 8) + u;
247 if(symbols[i] != 0xffff) // reserved to mean "invalid"
251 ff_free_vlc(&s->vlc[3 + p]);
252 ff_init_vlc_sparse(&s->vlc[3 + p], VLC_BITS, i, len, 1, 1,
253 bits, 2, 2, symbols, 2, 2, 0);
256 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
257 int i, b, g, r, code;
258 int p0 = s->decorrelate;
259 int p1 = !s->decorrelate;
260 // restrict the range to +/-16 because that's pretty much guaranteed to
261 // cover all the combinations that fit in 11 bits total, and it doesn't
262 // matter if we miss a few rare codes.
263 for (i = 0, g = -16; g < 16; g++) {
264 int len0 = s->len[p0][g & 255];
265 int limit0 = VLC_BITS - len0;
268 for (b = -16; b < 16; b++) {
269 int len1 = s->len[p1][b & 255];
270 int limit1 = limit0 - len1;
273 code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
274 for (r = -16; r < 16; r++) {
275 int len2 = s->len[2][r & 255];
278 len[i] = len0 + len1 + len2;
279 bits[i] = (code << len2) + s->bits[2][r & 255];
280 if (s->decorrelate) {
293 ff_free_vlc(&s->vlc[3]);
294 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
298 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
303 init_get_bits(&gb, src, length * 8);
305 for (i = 0; i < 3; i++) {
306 if (read_len_table(s->len[i], &gb) < 0)
308 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
311 ff_free_vlc(&s->vlc[i]);
312 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
313 s->bits[i], 4, 4, 0);
316 generate_joint_tables(s);
318 return (get_bits_count(&gb) + 7) / 8;
321 static int read_old_huffman_tables(HYuvContext *s)
326 init_get_bits(&gb, classic_shift_luma,
327 classic_shift_luma_table_size * 8);
328 if (read_len_table(s->len[0], &gb) < 0)
331 init_get_bits(&gb, classic_shift_chroma,
332 classic_shift_chroma_table_size * 8);
333 if (read_len_table(s->len[1], &gb) < 0)
336 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
337 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
339 if (s->bitstream_bpp >= 24) {
340 memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
341 memcpy(s->len[1] , s->len [0], 256 * sizeof(uint8_t));
343 memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
344 memcpy(s->len[2] , s->len [1], 256 * sizeof(uint8_t));
346 for (i = 0; i < 3; i++) {
347 ff_free_vlc(&s->vlc[i]);
348 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
349 s->bits[i], 4, 4, 0);
352 generate_joint_tables(s);
357 static av_cold void alloc_temp(HYuvContext *s)
361 if (s->bitstream_bpp<24) {
362 for (i=0; i<3; i++) {
363 s->temp[i]= av_malloc(s->width + 16);
366 s->temp[0]= av_mallocz(4*s->width + 16);
370 static av_cold int common_init(AVCodecContext *avctx)
372 HYuvContext *s = avctx->priv_data;
375 s->flags = avctx->flags;
377 ff_dsputil_init(&s->dsp, avctx);
379 s->width = avctx->width;
380 s->height = avctx->height;
381 assert(s->width>0 && s->height>0);
386 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
387 static av_cold int decode_init(AVCodecContext *avctx)
389 HYuvContext *s = avctx->priv_data;
392 memset(s->vlc, 0, 3 * sizeof(VLC));
394 avctx->coded_frame = &s->picture;
395 s->interlaced = s->height > 288;
399 if (avctx->extradata_size) {
400 if ((avctx->bits_per_coded_sample & 7) &&
401 avctx->bits_per_coded_sample != 12)
402 s->version = 1; // do such files exist at all?
408 if (s->version == 2) {
409 int method, interlace;
411 if (avctx->extradata_size < 4)
414 method = ((uint8_t*)avctx->extradata)[0];
415 s->decorrelate = method & 64 ? 1 : 0;
416 s->predictor = method & 63;
417 s->bitstream_bpp = ((uint8_t*)avctx->extradata)[1];
418 if (s->bitstream_bpp == 0)
419 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
420 interlace = (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
421 s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
422 s->context = ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
424 if ( read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
425 avctx->extradata_size - 4) < 0)
428 switch (avctx->bits_per_coded_sample & 7) {
438 s->predictor = PLANE;
439 s->decorrelate = avctx->bits_per_coded_sample >= 24;
442 s->predictor = MEDIAN;
446 s->predictor = LEFT; //OLD
450 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
453 if (read_old_huffman_tables(s) < 0)
457 switch (s->bitstream_bpp) {
459 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
463 avctx->pix_fmt = AV_PIX_FMT_YUYV422;
465 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
471 avctx->pix_fmt = AV_PIX_FMT_RGB32;
473 avctx->pix_fmt = AV_PIX_FMT_BGR24;
477 return AVERROR_INVALIDDATA;
485 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
487 HYuvContext *s = avctx->priv_data;
490 avctx->coded_frame= &s->picture;
493 for (i = 0; i < 6; i++)
494 s->vlc[i].table = NULL;
496 if (s->version == 2) {
497 if (read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
498 avctx->extradata_size) < 0)
501 if (read_old_huffman_tables(s) < 0)
507 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
509 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
510 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
515 for (i = 0; i < 256;) {
519 for (; i < 256 && len[i] == val && repeat < 255; i++)
522 assert(val < 32 && val >0 && repeat<256 && repeat>0);
525 buf[index++] = repeat;
527 buf[index++] = val | (repeat << 5);
534 static av_cold int encode_init(AVCodecContext *avctx)
536 HYuvContext *s = avctx->priv_data;
541 avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
542 avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
545 avctx->coded_frame = &s->picture;
547 switch (avctx->pix_fmt) {
548 case AV_PIX_FMT_YUV420P:
549 s->bitstream_bpp = 12;
551 case AV_PIX_FMT_YUV422P:
552 s->bitstream_bpp = 16;
554 case AV_PIX_FMT_RGB32:
555 s->bitstream_bpp = 24;
558 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
561 avctx->bits_per_coded_sample = s->bitstream_bpp;
562 s->decorrelate = s->bitstream_bpp >= 24;
563 s->predictor = avctx->prediction_method;
564 s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
565 if (avctx->context_model == 1) {
566 s->context = avctx->context_model;
567 if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) {
568 av_log(avctx, AV_LOG_ERROR,
569 "context=1 is not compatible with "
570 "2 pass huffyuv encoding\n");
575 if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
576 if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
577 av_log(avctx, AV_LOG_ERROR,
578 "Error: YV12 is not supported by huffyuv; use "
579 "vcodec=ffvhuff or format=422p\n");
582 if (avctx->context_model) {
583 av_log(avctx, AV_LOG_ERROR,
584 "Error: per-frame huffman tables are not supported "
585 "by huffyuv; use vcodec=ffvhuff\n");
588 if (s->interlaced != ( s->height > 288 ))
589 av_log(avctx, AV_LOG_INFO,
590 "using huffyuv 2.2.0 or newer interlacing flag\n");
593 if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
594 av_log(avctx, AV_LOG_ERROR,
595 "Error: RGB is incompatible with median predictor\n");
599 ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
600 ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
601 ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
603 ((uint8_t*)avctx->extradata)[2] |= 0x40;
604 ((uint8_t*)avctx->extradata)[3] = 0;
605 s->avctx->extradata_size = 4;
607 if (avctx->stats_in) {
608 char *p = avctx->stats_in;
610 for (i = 0; i < 3; i++)
611 for (j = 0; j < 256; j++)
615 for (i = 0; i < 3; i++) {
618 for (j = 0; j < 256; j++) {
619 s->stats[i][j] += strtol(p, &next, 0);
620 if (next == p) return -1;
624 if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
627 for (i = 0; i < 3; i++)
628 for (j = 0; j < 256; j++) {
629 int d = FFMIN(j, 256 - j);
631 s->stats[i][j] = 100000000 / (d + 1);
635 for (i = 0; i < 3; i++) {
636 ff_huff_gen_len_table(s->len[i], s->stats[i]);
638 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
642 s->avctx->extradata_size +=
643 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
647 for (i = 0; i < 3; i++) {
648 int pels = s->width * s->height / (i ? 40 : 10);
649 for (j = 0; j < 256; j++) {
650 int d = FFMIN(j, 256 - j);
651 s->stats[i][j] = pels/(d + 1);
655 for (i = 0; i < 3; i++)
656 for (j = 0; j < 256; j++)
666 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
668 /* TODO instead of restarting the read when the code isn't in the first level
669 * of the joint table, jump into the 2nd level of the individual table. */
670 #define READ_2PIX(dst0, dst1, plane1){\
671 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
676 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
677 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
681 static void decode_422_bitstream(HYuvContext *s, int count)
687 if (count >= (get_bits_left(&s->gb)) / (31 * 4)) {
688 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
689 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
690 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
693 for (i = 0; i < count; i++) {
694 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
695 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
700 static void decode_gray_bitstream(HYuvContext *s, int count)
706 if (count >= (get_bits_left(&s->gb)) / (31 * 2)) {
707 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
708 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
711 for(i=0; i<count; i++){
712 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
717 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
718 static int encode_422_bitstream(HYuvContext *s, int offset, int count)
721 const uint8_t *y = s->temp[0] + offset;
722 const uint8_t *u = s->temp[1] + offset / 2;
723 const uint8_t *v = s->temp[2] + offset / 2;
725 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
726 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
732 int y1 = y[2 * i + 1];\
738 if (s->flags & CODEC_FLAG_PASS1) {
739 for(i = 0; i < count; i++) {
747 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
750 for (i = 0; i < count; i++) {
753 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
755 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
757 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
759 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
762 for(i = 0; i < count; i++) {
764 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
765 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
766 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
767 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
773 static int encode_gray_bitstream(HYuvContext *s, int count)
777 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
778 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
783 int y0 = s->temp[0][2 * i];\
784 int y1 = s->temp[0][2 * i + 1];
789 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
790 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
794 if (s->flags & CODEC_FLAG_PASS1) {
795 for (i = 0; i < count; i++) {
800 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
804 for (i = 0; i < count; i++) {
810 for (i = 0; i < count; i++) {
817 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
819 static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
820 int decorrelate, int alpha)
823 for (i = 0; i < count; i++) {
824 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
826 *(uint32_t*)&s->temp[0][4 * i] = s->pix_bgr_map[code];
827 } else if(decorrelate) {
828 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
829 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) +
830 s->temp[0][4 * i + G];
831 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) +
832 s->temp[0][4 * i + G];
834 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
835 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
836 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
839 s->temp[0][4 * i + A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
843 static void decode_bgr_bitstream(HYuvContext *s, int count)
845 if (s->decorrelate) {
846 if (s->bitstream_bpp==24)
847 decode_bgr_1(s, count, 1, 0);
849 decode_bgr_1(s, count, 1, 1);
851 if (s->bitstream_bpp==24)
852 decode_bgr_1(s, count, 0, 0);
854 decode_bgr_1(s, count, 0, 1);
858 static int encode_bgr_bitstream(HYuvContext *s, int count)
862 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 3 * 4 * count) {
863 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
868 int g = s->temp[0][4 * i + G];\
869 int b = (s->temp[0][4 * i + B] - g) & 0xff;\
870 int r = (s->temp[0][4 * i + R] - g) & 0xff;
876 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
877 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
878 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
880 if ((s->flags & CODEC_FLAG_PASS1) &&
881 (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
882 for (i = 0; i < count; i++) {
886 } else if (s->context || (s->flags & CODEC_FLAG_PASS1)) {
887 for (i = 0; i < count; i++) {
893 for (i = 0; i < count; i++) {
901 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
902 static void draw_slice(HYuvContext *s, int y)
905 int offset[AV_NUM_DATA_POINTERS];
907 if (s->avctx->draw_horiz_band==NULL)
910 h = y - s->last_slice_end;
913 if (s->bitstream_bpp == 12) {
919 offset[0] = s->picture.linesize[0]*y;
920 offset[1] = s->picture.linesize[1]*cy;
921 offset[2] = s->picture.linesize[2]*cy;
922 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
926 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
928 s->last_slice_end = y + h;
931 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
934 const uint8_t *buf = avpkt->data;
935 int buf_size = avpkt->size;
936 HYuvContext *s = avctx->priv_data;
937 const int width = s->width;
938 const int width2 = s->width>>1;
939 const int height = s->height;
940 int fake_ystride, fake_ustride, fake_vstride;
941 AVFrame * const p = &s->picture;
944 AVFrame *picture = data;
946 av_fast_malloc(&s->bitstream_buffer,
947 &s->bitstream_buffer_size,
948 buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
949 if (!s->bitstream_buffer)
950 return AVERROR(ENOMEM);
952 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
953 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer,
954 (const uint32_t*)buf, buf_size / 4);
957 ff_thread_release_buffer(avctx, p);
960 if (ff_thread_get_buffer(avctx, p) < 0) {
961 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
966 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
971 if ((unsigned)(buf_size-table_size) >= INT_MAX / 8)
974 init_get_bits(&s->gb, s->bitstream_buffer+table_size,
975 (buf_size-table_size) * 8);
977 fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
978 fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
979 fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
981 s->last_slice_end = 0;
983 if (s->bitstream_bpp < 24) {
985 int lefty, leftu, leftv;
986 int lefttopy, lefttopu, lefttopv;
989 p->data[0][3] = get_bits(&s->gb, 8);
990 p->data[0][2] = get_bits(&s->gb, 8);
991 p->data[0][1] = get_bits(&s->gb, 8);
992 p->data[0][0] = get_bits(&s->gb, 8);
994 av_log(avctx, AV_LOG_ERROR,
995 "YUY2 output is not implemented yet\n");
999 leftv = p->data[2][0] = get_bits(&s->gb, 8);
1000 lefty = p->data[0][1] = get_bits(&s->gb, 8);
1001 leftu = p->data[1][0] = get_bits(&s->gb, 8);
1002 p->data[0][0] = get_bits(&s->gb, 8);
1004 switch (s->predictor) {
1007 decode_422_bitstream(s, width-2);
1008 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1009 if (!(s->flags&CODEC_FLAG_GRAY)) {
1010 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1011 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1014 for (cy = y = 1; y < s->height; y++, cy++) {
1015 uint8_t *ydst, *udst, *vdst;
1017 if (s->bitstream_bpp == 12) {
1018 decode_gray_bitstream(s, width);
1020 ydst = p->data[0] + p->linesize[0] * y;
1022 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1023 if (s->predictor == PLANE) {
1024 if (y > s->interlaced)
1025 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1028 if (y >= s->height) break;
1033 ydst = p->data[0] + p->linesize[0]*y;
1034 udst = p->data[1] + p->linesize[1]*cy;
1035 vdst = p->data[2] + p->linesize[2]*cy;
1037 decode_422_bitstream(s, width);
1038 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1039 if (!(s->flags & CODEC_FLAG_GRAY)) {
1040 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1041 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1043 if (s->predictor == PLANE) {
1044 if (cy > s->interlaced) {
1045 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1046 if (!(s->flags & CODEC_FLAG_GRAY)) {
1047 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1048 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1053 draw_slice(s, height);
1057 /* first line except first 2 pixels is left predicted */
1058 decode_422_bitstream(s, width - 2);
1059 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width - 2, lefty);
1060 if (!(s->flags & CODEC_FLAG_GRAY)) {
1061 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1062 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1067 /* second line is left predicted for interlaced case */
1068 if (s->interlaced) {
1069 decode_422_bitstream(s, width);
1070 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1071 if (!(s->flags & CODEC_FLAG_GRAY)) {
1072 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1073 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1078 /* next 4 pixels are left predicted too */
1079 decode_422_bitstream(s, 4);
1080 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1081 if (!(s->flags&CODEC_FLAG_GRAY)) {
1082 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1083 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1086 /* next line except the first 4 pixels is median predicted */
1087 lefttopy = p->data[0][3];
1088 decode_422_bitstream(s, width - 4);
1089 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1090 if (!(s->flags&CODEC_FLAG_GRAY)) {
1091 lefttopu = p->data[1][1];
1092 lefttopv = p->data[2][1];
1093 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
1094 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
1098 for (; y<height; y++, cy++) {
1099 uint8_t *ydst, *udst, *vdst;
1101 if (s->bitstream_bpp == 12) {
1102 while (2 * cy > y) {
1103 decode_gray_bitstream(s, width);
1104 ydst = p->data[0] + p->linesize[0] * y;
1105 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1108 if (y >= height) break;
1112 decode_422_bitstream(s, width);
1114 ydst = p->data[0] + p->linesize[0] * y;
1115 udst = p->data[1] + p->linesize[1] * cy;
1116 vdst = p->data[2] + p->linesize[2] * cy;
1118 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1119 if (!(s->flags & CODEC_FLAG_GRAY)) {
1120 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1121 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1125 draw_slice(s, height);
1131 int leftr, leftg, leftb, lefta;
1132 const int last_line = (height - 1) * p->linesize[0];
1134 if (s->bitstream_bpp == 32) {
1135 lefta = p->data[0][last_line+A] = get_bits(&s->gb, 8);
1136 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
1137 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
1138 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
1140 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
1141 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
1142 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
1143 lefta = p->data[0][last_line+A] = 255;
1144 skip_bits(&s->gb, 8);
1148 switch (s->predictor) {
1151 decode_bgr_bitstream(s, width - 1);
1152 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width - 1, &leftr, &leftg, &leftb, &lefta);
1154 for (y = s->height - 2; y >= 0; y--) { //Yes it is stored upside down.
1155 decode_bgr_bitstream(s, width);
1157 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1158 if (s->predictor == PLANE) {
1159 if (s->bitstream_bpp != 32) lefta = 0;
1160 if ((y & s->interlaced) == 0 &&
1161 y < s->height - 1 - s->interlaced) {
1162 s->dsp.add_bytes(p->data[0] + p->linesize[0] * y,
1163 p->data[0] + p->linesize[0] * y +
1164 fake_ystride, fake_ystride);
1168 // just 1 large slice as this is not possible in reverse order
1169 draw_slice(s, height);
1172 av_log(avctx, AV_LOG_ERROR,
1173 "prediction type not supported!\n");
1176 av_log(avctx, AV_LOG_ERROR,
1177 "BGR24 output is not implemented yet\n");
1184 *data_size = sizeof(AVFrame);
1186 return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
1188 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1190 static int common_end(HYuvContext *s)
1194 for(i = 0; i < 3; i++) {
1195 av_freep(&s->temp[i]);
1200 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1201 static av_cold int decode_end(AVCodecContext *avctx)
1203 HYuvContext *s = avctx->priv_data;
1206 if (s->picture.data[0])
1207 avctx->release_buffer(avctx, &s->picture);
1210 av_freep(&s->bitstream_buffer);
1212 for (i = 0; i < 6; i++) {
1213 ff_free_vlc(&s->vlc[i]);
1218 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1220 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1221 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1222 const AVFrame *pict, int *got_packet)
1224 HYuvContext *s = avctx->priv_data;
1225 const int width = s->width;
1226 const int width2 = s->width>>1;
1227 const int height = s->height;
1228 const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1229 const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1230 const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1231 AVFrame * const p = &s->picture;
1232 int i, j, size = 0, ret;
1235 (ret = av_new_packet(pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
1236 av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
1241 p->pict_type = AV_PICTURE_TYPE_I;
1245 for (i = 0; i < 3; i++) {
1246 ff_huff_gen_len_table(s->len[i], s->stats[i]);
1247 if (generate_bits_table(s->bits[i], s->len[i]) < 0)
1249 size += store_table(s, s->len[i], &pkt->data[size]);
1252 for (i = 0; i < 3; i++)
1253 for (j = 0; j < 256; j++)
1254 s->stats[i][j] >>= 1;
1257 init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
1259 if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
1260 avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
1261 int lefty, leftu, leftv, y, cy;
1263 put_bits(&s->pb, 8, leftv = p->data[2][0]);
1264 put_bits(&s->pb, 8, lefty = p->data[0][1]);
1265 put_bits(&s->pb, 8, leftu = p->data[1][0]);
1266 put_bits(&s->pb, 8, p->data[0][0]);
1268 lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1269 leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1270 leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1272 encode_422_bitstream(s, 2, width-2);
1274 if (s->predictor==MEDIAN) {
1275 int lefttopy, lefttopu, lefttopv;
1277 if (s->interlaced) {
1278 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
1279 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
1280 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
1282 encode_422_bitstream(s, 0, width);
1286 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
1287 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
1288 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
1290 encode_422_bitstream(s, 0, 4);
1292 lefttopy = p->data[0][3];
1293 lefttopu = p->data[1][1];
1294 lefttopv = p->data[2][1];
1295 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride + 4, width - 4 , &lefty, &lefttopy);
1296 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
1297 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
1298 encode_422_bitstream(s, 0, width - 4);
1301 for (; y < height; y++,cy++) {
1302 uint8_t *ydst, *udst, *vdst;
1304 if (s->bitstream_bpp == 12) {
1305 while (2 * cy > y) {
1306 ydst = p->data[0] + p->linesize[0] * y;
1307 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1308 encode_gray_bitstream(s, width);
1311 if (y >= height) break;
1313 ydst = p->data[0] + p->linesize[0] * y;
1314 udst = p->data[1] + p->linesize[1] * cy;
1315 vdst = p->data[2] + p->linesize[2] * cy;
1317 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1318 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1319 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1321 encode_422_bitstream(s, 0, width);
1324 for (cy = y = 1; y < height; y++, cy++) {
1325 uint8_t *ydst, *udst, *vdst;
1327 /* encode a luma only line & y++ */
1328 if (s->bitstream_bpp == 12) {
1329 ydst = p->data[0] + p->linesize[0] * y;
1331 if (s->predictor == PLANE && s->interlaced < y) {
1332 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1334 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1336 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1338 encode_gray_bitstream(s, width);
1340 if (y >= height) break;
1343 ydst = p->data[0] + p->linesize[0] * y;
1344 udst = p->data[1] + p->linesize[1] * cy;
1345 vdst = p->data[2] + p->linesize[2] * cy;
1347 if (s->predictor == PLANE && s->interlaced < cy) {
1348 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1349 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1350 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1352 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1353 leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1354 leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1356 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1357 leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1358 leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1361 encode_422_bitstream(s, 0, width);
1364 } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
1365 uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
1366 const int stride = -p->linesize[0];
1367 const int fake_stride = -fake_ystride;
1369 int leftr, leftg, leftb;
1371 put_bits(&s->pb, 8, leftr = data[R]);
1372 put_bits(&s->pb, 8, leftg = data[G]);
1373 put_bits(&s->pb, 8, leftb = data[B]);
1374 put_bits(&s->pb, 8, 0);
1376 sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb);
1377 encode_bgr_bitstream(s, width - 1);
1379 for (y = 1; y < s->height; y++) {
1380 uint8_t *dst = data + y*stride;
1381 if (s->predictor == PLANE && s->interlaced < y) {
1382 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
1383 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1385 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1387 encode_bgr_bitstream(s, width);
1390 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1394 size += (put_bits_count(&s->pb) + 31) / 8;
1395 put_bits(&s->pb, 16, 0);
1396 put_bits(&s->pb, 15, 0);
1399 if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
1401 char *p = avctx->stats_out;
1402 char *end = p + 1024*30;
1403 for (i = 0; i < 3; i++) {
1404 for (j = 0; j < 256; j++) {
1405 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1409 snprintf(p, end-p, "\n");
1413 avctx->stats_out[0] = '\0';
1414 if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
1415 flush_put_bits(&s->pb);
1416 s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
1419 s->picture_number++;
1421 pkt->size = size * 4;
1422 pkt->flags |= AV_PKT_FLAG_KEY;
1428 static av_cold int encode_end(AVCodecContext *avctx)
1430 HYuvContext *s = avctx->priv_data;
1434 av_freep(&avctx->extradata);
1435 av_freep(&avctx->stats_out);
1439 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1441 #if CONFIG_HUFFYUV_DECODER
1442 AVCodec ff_huffyuv_decoder = {
1444 .type = AVMEDIA_TYPE_VIDEO,
1445 .id = AV_CODEC_ID_HUFFYUV,
1446 .priv_data_size = sizeof(HYuvContext),
1447 .init = decode_init,
1448 .close = decode_end,
1449 .decode = decode_frame,
1450 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
1451 CODEC_CAP_FRAME_THREADS,
1452 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1453 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1457 #if CONFIG_FFVHUFF_DECODER
1458 AVCodec ff_ffvhuff_decoder = {
1460 .type = AVMEDIA_TYPE_VIDEO,
1461 .id = AV_CODEC_ID_FFVHUFF,
1462 .priv_data_size = sizeof(HYuvContext),
1463 .init = decode_init,
1464 .close = decode_end,
1465 .decode = decode_frame,
1466 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
1467 CODEC_CAP_FRAME_THREADS,
1468 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1469 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1473 #if CONFIG_HUFFYUV_ENCODER
1474 AVCodec ff_huffyuv_encoder = {
1476 .type = AVMEDIA_TYPE_VIDEO,
1477 .id = AV_CODEC_ID_HUFFYUV,
1478 .priv_data_size = sizeof(HYuvContext),
1479 .init = encode_init,
1480 .encode2 = encode_frame,
1481 .close = encode_end,
1482 .pix_fmts = (const enum AVPixelFormat[]){
1483 AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1485 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1489 #if CONFIG_FFVHUFF_ENCODER
1490 AVCodec ff_ffvhuff_encoder = {
1492 .type = AVMEDIA_TYPE_VIDEO,
1493 .id = AV_CODEC_ID_FFVHUFF,
1494 .priv_data_size = sizeof(HYuvContext),
1495 .init = encode_init,
1496 .encode2 = encode_frame,
1497 .close = encode_end,
1498 .pix_fmts = (const enum AVPixelFormat[]){
1499 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1501 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),