2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of Libav.
11 * Libav is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * Libav is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with Libav; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
51 typedef enum Predictor {
57 typedef struct HYuvContext {
58 AVCodecContext *avctx;
66 int yuy2; //use yuy2 instead of 422P
67 int bgr32; //use bgr32 instead of bgr24
74 uint64_t stats[3][256];
76 uint32_t bits[3][256];
77 uint32_t pix_bgr_map[1<<VLC_BITS];
78 VLC vlc[6]; //Y,U,V,YY,YU,YV
80 uint8_t *bitstream_buffer;
81 unsigned int bitstream_buffer_size;
85 #define classic_shift_luma_table_size 42
86 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
87 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
88 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
92 #define classic_shift_chroma_table_size 59
93 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
94 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
95 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
96 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
99 static const unsigned char classic_add_luma[256] = {
100 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
101 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
102 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
103 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
104 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
105 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
106 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
107 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
108 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
109 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
110 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
111 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
112 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
113 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
114 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
115 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
118 static const unsigned char classic_add_chroma[256] = {
119 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
120 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
121 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
122 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
123 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
124 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
125 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
126 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
127 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
128 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
129 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
130 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
131 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
132 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
133 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
134 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
137 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
138 uint8_t *src, int w, int left)
142 for (i = 0; i < w; i++) {
143 const int temp = src[i];
144 dst[i] = temp - left;
149 for (i = 0; i < 16; i++) {
150 const int temp = src[i];
151 dst[i] = temp - left;
154 s->dsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
159 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
161 int *red, int *green, int *blue)
169 for (i = 0; i < FFMIN(w, 4); i++) {
170 const int rt = src[i * 4 + R];
171 const int gt = src[i * 4 + G];
172 const int bt = src[i * 4 + B];
173 dst[i * 4 + R] = rt - r;
174 dst[i * 4 + G] = gt - g;
175 dst[i * 4 + B] = bt - b;
181 s->dsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
183 *red = src[(w - 1) * 4 + R];
184 *green = src[(w - 1) * 4 + G];
185 *blue = src[(w - 1) * 4 + B];
188 static int read_len_table(uint8_t *dst, GetBitContext *gb)
192 for (i = 0; i < 256;) {
193 repeat = get_bits(gb, 3);
194 val = get_bits(gb, 5);
196 repeat = get_bits(gb, 8);
197 if (i + repeat > 256 || get_bits_left(gb) < 0) {
198 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
207 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table)
212 for (len = 32; len > 0; len--) {
213 for (index = 0; index < 256; index++) {
214 if (len_table[index] == len)
218 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
226 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
232 static void heap_sift(HeapElem *h, int root, int size)
234 while (root * 2 + 1 < size) {
235 int child = root * 2 + 1;
236 if (child < size - 1 && h[child].val > h[child + 1].val)
238 if (h[root].val > h[child].val) {
239 FFSWAP(HeapElem, h[root], h[child]);
246 static void generate_len_table(uint8_t *dst, const uint64_t *stats)
254 for (offset = 1; ; offset <<= 1) {
255 for (i = 0; i < size; i++) {
257 h[i].val = (stats[i] << 8) + offset;
259 for (i = size / 2 - 1; i >= 0; i--)
260 heap_sift(h, i, size);
262 for (next = size; next < size * 2 - 1; next++) {
263 // merge the two smallest entries, and put it back in the heap
264 uint64_t min1v = h[0].val;
265 up[h[0].name] = next;
266 h[0].val = INT64_MAX;
267 heap_sift(h, 0, size);
268 up[h[0].name] = next;
271 heap_sift(h, 0, size);
274 len[2 * size - 2] = 0;
275 for (i = 2 * size - 3; i >= size; i--)
276 len[i] = len[up[i]] + 1;
277 for (i = 0; i < size; i++) {
278 dst[i] = len[up[i]] + 1;
279 if (dst[i] >= 32) break;
284 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
286 static void generate_joint_tables(HYuvContext *s)
288 uint16_t symbols[1 << VLC_BITS];
289 uint16_t bits[1 << VLC_BITS];
290 uint8_t len[1 << VLC_BITS];
291 if (s->bitstream_bpp < 24) {
293 for (p = 0; p < 3; p++) {
294 for (i = y = 0; y < 256; y++) {
295 int len0 = s->len[0][y];
296 int limit = VLC_BITS - len0;
299 for (u = 0; u < 256; u++) {
300 int len1 = s->len[p][u];
303 len[i] = len0 + len1;
304 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
305 symbols[i] = (y << 8) + u;
306 if(symbols[i] != 0xffff) // reserved to mean "invalid"
310 ff_free_vlc(&s->vlc[3 + p]);
311 ff_init_vlc_sparse(&s->vlc[3 + p], VLC_BITS, i, len, 1, 1,
312 bits, 2, 2, symbols, 2, 2, 0);
315 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
316 int i, b, g, r, code;
317 int p0 = s->decorrelate;
318 int p1 = !s->decorrelate;
319 // restrict the range to +/-16 because that's pretty much guaranteed to
320 // cover all the combinations that fit in 11 bits total, and it doesn't
321 // matter if we miss a few rare codes.
322 for (i = 0, g = -16; g < 16; g++) {
323 int len0 = s->len[p0][g & 255];
324 int limit0 = VLC_BITS - len0;
327 for (b = -16; b < 16; b++) {
328 int len1 = s->len[p1][b & 255];
329 int limit1 = limit0 - len1;
332 code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
333 for (r = -16; r < 16; r++) {
334 int len2 = s->len[2][r & 255];
337 len[i] = len0 + len1 + len2;
338 bits[i] = (code << len2) + s->bits[2][r & 255];
339 if (s->decorrelate) {
352 ff_free_vlc(&s->vlc[3]);
353 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
357 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
362 init_get_bits(&gb, src, length * 8);
364 for (i = 0; i < 3; i++) {
365 if (read_len_table(s->len[i], &gb) < 0)
367 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
370 ff_free_vlc(&s->vlc[i]);
371 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
372 s->bits[i], 4, 4, 0);
375 generate_joint_tables(s);
377 return (get_bits_count(&gb) + 7) / 8;
380 static int read_old_huffman_tables(HYuvContext *s)
386 init_get_bits(&gb, classic_shift_luma,
387 classic_shift_luma_table_size * 8);
388 if (read_len_table(s->len[0], &gb) < 0)
391 init_get_bits(&gb, classic_shift_chroma,
392 classic_shift_chroma_table_size * 8);
393 if (read_len_table(s->len[1], &gb) < 0)
396 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
397 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
399 if (s->bitstream_bpp >= 24) {
400 memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
401 memcpy(s->len[1] , s->len [0], 256 * sizeof(uint8_t));
403 memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
404 memcpy(s->len[2] , s->len [1], 256 * sizeof(uint8_t));
406 for (i = 0; i < 3; i++) {
407 ff_free_vlc(&s->vlc[i]);
408 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
409 s->bits[i], 4, 4, 0);
412 generate_joint_tables(s);
416 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
421 static av_cold void alloc_temp(HYuvContext *s)
425 if (s->bitstream_bpp<24) {
426 for (i=0; i<3; i++) {
427 s->temp[i]= av_malloc(s->width + 16);
430 s->temp[0]= av_mallocz(4*s->width + 16);
434 static av_cold int common_init(AVCodecContext *avctx)
436 HYuvContext *s = avctx->priv_data;
439 s->flags = avctx->flags;
441 ff_dsputil_init(&s->dsp, avctx);
443 s->width = avctx->width;
444 s->height = avctx->height;
445 assert(s->width>0 && s->height>0);
450 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
451 static av_cold int decode_init(AVCodecContext *avctx)
453 HYuvContext *s = avctx->priv_data;
456 memset(s->vlc, 0, 3 * sizeof(VLC));
458 avctx->coded_frame = &s->picture;
459 s->interlaced = s->height > 288;
463 if (avctx->extradata_size) {
464 if ((avctx->bits_per_coded_sample & 7) &&
465 avctx->bits_per_coded_sample != 12)
466 s->version = 1; // do such files exist at all?
472 if (s->version == 2) {
473 int method, interlace;
475 if (avctx->extradata_size < 4)
478 method = ((uint8_t*)avctx->extradata)[0];
479 s->decorrelate = method & 64 ? 1 : 0;
480 s->predictor = method & 63;
481 s->bitstream_bpp = ((uint8_t*)avctx->extradata)[1];
482 if (s->bitstream_bpp == 0)
483 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
484 interlace = (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
485 s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
486 s->context = ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
488 if ( read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
489 avctx->extradata_size - 4) < 0)
492 switch (avctx->bits_per_coded_sample & 7) {
502 s->predictor = PLANE;
503 s->decorrelate = avctx->bits_per_coded_sample >= 24;
506 s->predictor = MEDIAN;
510 s->predictor = LEFT; //OLD
514 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
517 if (read_old_huffman_tables(s) < 0)
521 switch (s->bitstream_bpp) {
523 avctx->pix_fmt = PIX_FMT_YUV420P;
527 avctx->pix_fmt = PIX_FMT_YUYV422;
529 avctx->pix_fmt = PIX_FMT_YUV422P;
535 avctx->pix_fmt = PIX_FMT_RGB32;
537 avctx->pix_fmt = PIX_FMT_BGR24;
541 return AVERROR_INVALIDDATA;
549 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
551 HYuvContext *s = avctx->priv_data;
554 avctx->coded_frame= &s->picture;
557 for (i = 0; i < 6; i++)
558 s->vlc[i].table = NULL;
560 if (s->version == 2) {
561 if (read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
562 avctx->extradata_size) < 0)
565 if (read_old_huffman_tables(s) < 0)
571 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
573 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
574 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
579 for (i = 0; i < 256;) {
583 for (; i < 256 && len[i] == val && repeat < 255; i++)
586 assert(val < 32 && val >0 && repeat<256 && repeat>0);
589 buf[index++] = repeat;
591 buf[index++] = val | (repeat << 5);
598 static av_cold int encode_init(AVCodecContext *avctx)
600 HYuvContext *s = avctx->priv_data;
605 avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
606 avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
609 avctx->coded_frame = &s->picture;
611 switch (avctx->pix_fmt) {
612 case PIX_FMT_YUV420P:
613 s->bitstream_bpp = 12;
615 case PIX_FMT_YUV422P:
616 s->bitstream_bpp = 16;
619 s->bitstream_bpp = 24;
622 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
625 avctx->bits_per_coded_sample = s->bitstream_bpp;
626 s->decorrelate = s->bitstream_bpp >= 24;
627 s->predictor = avctx->prediction_method;
628 s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
629 if (avctx->context_model == 1) {
630 s->context = avctx->context_model;
631 if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) {
632 av_log(avctx, AV_LOG_ERROR,
633 "context=1 is not compatible with "
634 "2 pass huffyuv encoding\n");
639 if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
640 if (avctx->pix_fmt == PIX_FMT_YUV420P) {
641 av_log(avctx, AV_LOG_ERROR,
642 "Error: YV12 is not supported by huffyuv; use "
643 "vcodec=ffvhuff or format=422p\n");
646 if (avctx->context_model) {
647 av_log(avctx, AV_LOG_ERROR,
648 "Error: per-frame huffman tables are not supported "
649 "by huffyuv; use vcodec=ffvhuff\n");
652 if (s->interlaced != ( s->height > 288 ))
653 av_log(avctx, AV_LOG_INFO,
654 "using huffyuv 2.2.0 or newer interlacing flag\n");
657 if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
658 av_log(avctx, AV_LOG_ERROR,
659 "Error: RGB is incompatible with median predictor\n");
663 ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
664 ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
665 ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
667 ((uint8_t*)avctx->extradata)[2] |= 0x40;
668 ((uint8_t*)avctx->extradata)[3] = 0;
669 s->avctx->extradata_size = 4;
671 if (avctx->stats_in) {
672 char *p = avctx->stats_in;
674 for (i = 0; i < 3; i++)
675 for (j = 0; j < 256; j++)
679 for (i = 0; i < 3; i++) {
682 for (j = 0; j < 256; j++) {
683 s->stats[i][j] += strtol(p, &next, 0);
684 if (next == p) return -1;
688 if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
691 for (i = 0; i < 3; i++)
692 for (j = 0; j < 256; j++) {
693 int d = FFMIN(j, 256 - j);
695 s->stats[i][j] = 100000000 / (d + 1);
699 for (i = 0; i < 3; i++) {
700 generate_len_table(s->len[i], s->stats[i]);
702 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
706 s->avctx->extradata_size +=
707 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
711 for (i = 0; i < 3; i++) {
712 int pels = s->width * s->height / (i ? 40 : 10);
713 for (j = 0; j < 256; j++) {
714 int d = FFMIN(j, 256 - j);
715 s->stats[i][j] = pels/(d + 1);
719 for (i = 0; i < 3; i++)
720 for (j = 0; j < 256; j++)
730 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
732 /* TODO instead of restarting the read when the code isn't in the first level
733 * of the joint table, jump into the 2nd level of the individual table. */
734 #define READ_2PIX(dst0, dst1, plane1){\
735 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
740 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
741 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
745 static void decode_422_bitstream(HYuvContext *s, int count)
751 if (count >= (get_bits_left(&s->gb)) / (31 * 4)) {
752 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
753 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
754 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
757 for (i = 0; i < count; i++) {
758 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
759 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
764 static void decode_gray_bitstream(HYuvContext *s, int count)
770 if (count >= (get_bits_left(&s->gb)) / (31 * 2)) {
771 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
772 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
775 for(i=0; i<count; i++){
776 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
781 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
782 static int encode_422_bitstream(HYuvContext *s, int offset, int count)
785 const uint8_t *y = s->temp[0] + offset;
786 const uint8_t *u = s->temp[1] + offset / 2;
787 const uint8_t *v = s->temp[2] + offset / 2;
789 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
790 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
796 int y1 = y[2 * i + 1];\
802 if (s->flags & CODEC_FLAG_PASS1) {
803 for(i = 0; i < count; i++) {
811 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
814 for (i = 0; i < count; i++) {
817 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
819 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
821 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
823 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
826 for(i = 0; i < count; i++) {
828 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
829 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
830 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
831 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
837 static int encode_gray_bitstream(HYuvContext *s, int count)
841 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
842 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
847 int y0 = s->temp[0][2 * i];\
848 int y1 = s->temp[0][2 * i + 1];
853 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
854 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
858 if (s->flags & CODEC_FLAG_PASS1) {
859 for (i = 0; i < count; i++) {
864 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
868 for (i = 0; i < count; i++) {
874 for (i = 0; i < count; i++) {
881 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
883 static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
884 int decorrelate, int alpha)
887 for (i = 0; i < count; i++) {
888 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
890 *(uint32_t*)&s->temp[0][4 * i] = s->pix_bgr_map[code];
891 } else if(decorrelate) {
892 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
893 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) +
894 s->temp[0][4 * i + G];
895 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) +
896 s->temp[0][4 * i + G];
898 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
899 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
900 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
903 s->temp[0][4 * i + A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
907 static void decode_bgr_bitstream(HYuvContext *s, int count)
909 if (s->decorrelate) {
910 if (s->bitstream_bpp==24)
911 decode_bgr_1(s, count, 1, 0);
913 decode_bgr_1(s, count, 1, 1);
915 if (s->bitstream_bpp==24)
916 decode_bgr_1(s, count, 0, 0);
918 decode_bgr_1(s, count, 0, 1);
922 static int encode_bgr_bitstream(HYuvContext *s, int count)
926 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 3 * 4 * count) {
927 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
932 int g = s->temp[0][4 * i + G];\
933 int b = (s->temp[0][4 * i + B] - g) & 0xff;\
934 int r = (s->temp[0][4 * i + R] - g) & 0xff;
940 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
941 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
942 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
944 if ((s->flags & CODEC_FLAG_PASS1) &&
945 (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
946 for (i = 0; i < count; i++) {
950 } else if (s->context || (s->flags & CODEC_FLAG_PASS1)) {
951 for (i = 0; i < count; i++) {
957 for (i = 0; i < count; i++) {
965 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
966 static void draw_slice(HYuvContext *s, int y)
969 int offset[AV_NUM_DATA_POINTERS];
971 if (s->avctx->draw_horiz_band==NULL)
974 h = y - s->last_slice_end;
977 if (s->bitstream_bpp == 12) {
983 offset[0] = s->picture.linesize[0]*y;
984 offset[1] = s->picture.linesize[1]*cy;
985 offset[2] = s->picture.linesize[2]*cy;
986 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
990 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
992 s->last_slice_end = y + h;
995 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
998 const uint8_t *buf = avpkt->data;
999 int buf_size = avpkt->size;
1000 HYuvContext *s = avctx->priv_data;
1001 const int width = s->width;
1002 const int width2 = s->width>>1;
1003 const int height = s->height;
1004 int fake_ystride, fake_ustride, fake_vstride;
1005 AVFrame * const p = &s->picture;
1008 AVFrame *picture = data;
1010 av_fast_malloc(&s->bitstream_buffer,
1011 &s->bitstream_buffer_size,
1012 buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
1013 if (!s->bitstream_buffer)
1014 return AVERROR(ENOMEM);
1016 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1017 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer,
1018 (const uint32_t*)buf, buf_size / 4);
1021 ff_thread_release_buffer(avctx, p);
1024 if (ff_thread_get_buffer(avctx, p) < 0) {
1025 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1030 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1035 if ((unsigned)(buf_size-table_size) >= INT_MAX / 8)
1038 init_get_bits(&s->gb, s->bitstream_buffer+table_size,
1039 (buf_size-table_size) * 8);
1041 fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
1042 fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
1043 fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
1045 s->last_slice_end = 0;
1047 if (s->bitstream_bpp < 24) {
1049 int lefty, leftu, leftv;
1050 int lefttopy, lefttopu, lefttopv;
1053 p->data[0][3] = get_bits(&s->gb, 8);
1054 p->data[0][2] = get_bits(&s->gb, 8);
1055 p->data[0][1] = get_bits(&s->gb, 8);
1056 p->data[0][0] = get_bits(&s->gb, 8);
1058 av_log(avctx, AV_LOG_ERROR,
1059 "YUY2 output is not implemented yet\n");
1063 leftv = p->data[2][0] = get_bits(&s->gb, 8);
1064 lefty = p->data[0][1] = get_bits(&s->gb, 8);
1065 leftu = p->data[1][0] = get_bits(&s->gb, 8);
1066 p->data[0][0] = get_bits(&s->gb, 8);
1068 switch (s->predictor) {
1071 decode_422_bitstream(s, width-2);
1072 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1073 if (!(s->flags&CODEC_FLAG_GRAY)) {
1074 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1075 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1078 for (cy = y = 1; y < s->height; y++, cy++) {
1079 uint8_t *ydst, *udst, *vdst;
1081 if (s->bitstream_bpp == 12) {
1082 decode_gray_bitstream(s, width);
1084 ydst = p->data[0] + p->linesize[0] * y;
1086 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1087 if (s->predictor == PLANE) {
1088 if (y > s->interlaced)
1089 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1092 if (y >= s->height) break;
1097 ydst = p->data[0] + p->linesize[0]*y;
1098 udst = p->data[1] + p->linesize[1]*cy;
1099 vdst = p->data[2] + p->linesize[2]*cy;
1101 decode_422_bitstream(s, width);
1102 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1103 if (!(s->flags & CODEC_FLAG_GRAY)) {
1104 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1105 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1107 if (s->predictor == PLANE) {
1108 if (cy > s->interlaced) {
1109 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1110 if (!(s->flags & CODEC_FLAG_GRAY)) {
1111 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1112 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1117 draw_slice(s, height);
1121 /* first line except first 2 pixels is left predicted */
1122 decode_422_bitstream(s, width - 2);
1123 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width - 2, lefty);
1124 if (!(s->flags & CODEC_FLAG_GRAY)) {
1125 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1126 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1131 /* second line is left predicted for interlaced case */
1132 if (s->interlaced) {
1133 decode_422_bitstream(s, width);
1134 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1135 if (!(s->flags & CODEC_FLAG_GRAY)) {
1136 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1137 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1142 /* next 4 pixels are left predicted too */
1143 decode_422_bitstream(s, 4);
1144 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1145 if (!(s->flags&CODEC_FLAG_GRAY)) {
1146 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1147 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1150 /* next line except the first 4 pixels is median predicted */
1151 lefttopy = p->data[0][3];
1152 decode_422_bitstream(s, width - 4);
1153 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1154 if (!(s->flags&CODEC_FLAG_GRAY)) {
1155 lefttopu = p->data[1][1];
1156 lefttopv = p->data[2][1];
1157 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
1158 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
1162 for (; y<height; y++, cy++) {
1163 uint8_t *ydst, *udst, *vdst;
1165 if (s->bitstream_bpp == 12) {
1166 while (2 * cy > y) {
1167 decode_gray_bitstream(s, width);
1168 ydst = p->data[0] + p->linesize[0] * y;
1169 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1172 if (y >= height) break;
1176 decode_422_bitstream(s, width);
1178 ydst = p->data[0] + p->linesize[0] * y;
1179 udst = p->data[1] + p->linesize[1] * cy;
1180 vdst = p->data[2] + p->linesize[2] * cy;
1182 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1183 if (!(s->flags & CODEC_FLAG_GRAY)) {
1184 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1185 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1189 draw_slice(s, height);
1195 int leftr, leftg, leftb, lefta;
1196 const int last_line = (height - 1) * p->linesize[0];
1198 if (s->bitstream_bpp == 32) {
1199 lefta = p->data[0][last_line+A] = get_bits(&s->gb, 8);
1200 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
1201 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
1202 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
1204 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
1205 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
1206 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
1207 lefta = p->data[0][last_line+A] = 255;
1208 skip_bits(&s->gb, 8);
1212 switch (s->predictor) {
1215 decode_bgr_bitstream(s, width - 1);
1216 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width - 1, &leftr, &leftg, &leftb, &lefta);
1218 for (y = s->height - 2; y >= 0; y--) { //Yes it is stored upside down.
1219 decode_bgr_bitstream(s, width);
1221 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1222 if (s->predictor == PLANE) {
1223 if (s->bitstream_bpp != 32) lefta = 0;
1224 if ((y & s->interlaced) == 0 &&
1225 y < s->height - 1 - s->interlaced) {
1226 s->dsp.add_bytes(p->data[0] + p->linesize[0] * y,
1227 p->data[0] + p->linesize[0] * y +
1228 fake_ystride, fake_ystride);
1232 // just 1 large slice as this is not possible in reverse order
1233 draw_slice(s, height);
1236 av_log(avctx, AV_LOG_ERROR,
1237 "prediction type not supported!\n");
1240 av_log(avctx, AV_LOG_ERROR,
1241 "BGR24 output is not implemented yet\n");
1248 *data_size = sizeof(AVFrame);
1250 return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
1252 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1254 static int common_end(HYuvContext *s)
1258 for(i = 0; i < 3; i++) {
1259 av_freep(&s->temp[i]);
1264 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1265 static av_cold int decode_end(AVCodecContext *avctx)
1267 HYuvContext *s = avctx->priv_data;
1270 if (s->picture.data[0])
1271 avctx->release_buffer(avctx, &s->picture);
1274 av_freep(&s->bitstream_buffer);
1276 for (i = 0; i < 6; i++) {
1277 ff_free_vlc(&s->vlc[i]);
1282 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1284 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1285 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1286 const AVFrame *pict, int *got_packet)
1288 HYuvContext *s = avctx->priv_data;
1289 const int width = s->width;
1290 const int width2 = s->width>>1;
1291 const int height = s->height;
1292 const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1293 const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1294 const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1295 AVFrame * const p = &s->picture;
1296 int i, j, size = 0, ret;
1299 (ret = av_new_packet(pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
1300 av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
1305 p->pict_type = AV_PICTURE_TYPE_I;
1309 for (i = 0; i < 3; i++) {
1310 generate_len_table(s->len[i], s->stats[i]);
1311 if (generate_bits_table(s->bits[i], s->len[i]) < 0)
1313 size += store_table(s, s->len[i], &pkt->data[size]);
1316 for (i = 0; i < 3; i++)
1317 for (j = 0; j < 256; j++)
1318 s->stats[i][j] >>= 1;
1321 init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
1323 if (avctx->pix_fmt == PIX_FMT_YUV422P ||
1324 avctx->pix_fmt == PIX_FMT_YUV420P) {
1325 int lefty, leftu, leftv, y, cy;
1327 put_bits(&s->pb, 8, leftv = p->data[2][0]);
1328 put_bits(&s->pb, 8, lefty = p->data[0][1]);
1329 put_bits(&s->pb, 8, leftu = p->data[1][0]);
1330 put_bits(&s->pb, 8, p->data[0][0]);
1332 lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1333 leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1334 leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1336 encode_422_bitstream(s, 2, width-2);
1338 if (s->predictor==MEDIAN) {
1339 int lefttopy, lefttopu, lefttopv;
1341 if (s->interlaced) {
1342 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
1343 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
1344 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
1346 encode_422_bitstream(s, 0, width);
1350 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
1351 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
1352 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
1354 encode_422_bitstream(s, 0, 4);
1356 lefttopy = p->data[0][3];
1357 lefttopu = p->data[1][1];
1358 lefttopv = p->data[2][1];
1359 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride + 4, width - 4 , &lefty, &lefttopy);
1360 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
1361 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
1362 encode_422_bitstream(s, 0, width - 4);
1365 for (; y < height; y++,cy++) {
1366 uint8_t *ydst, *udst, *vdst;
1368 if (s->bitstream_bpp == 12) {
1369 while (2 * cy > y) {
1370 ydst = p->data[0] + p->linesize[0] * y;
1371 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1372 encode_gray_bitstream(s, width);
1375 if (y >= height) break;
1377 ydst = p->data[0] + p->linesize[0] * y;
1378 udst = p->data[1] + p->linesize[1] * cy;
1379 vdst = p->data[2] + p->linesize[2] * cy;
1381 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1382 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1383 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1385 encode_422_bitstream(s, 0, width);
1388 for (cy = y = 1; y < height; y++, cy++) {
1389 uint8_t *ydst, *udst, *vdst;
1391 /* encode a luma only line & y++ */
1392 if (s->bitstream_bpp == 12) {
1393 ydst = p->data[0] + p->linesize[0] * y;
1395 if (s->predictor == PLANE && s->interlaced < y) {
1396 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1398 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1400 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1402 encode_gray_bitstream(s, width);
1404 if (y >= height) break;
1407 ydst = p->data[0] + p->linesize[0] * y;
1408 udst = p->data[1] + p->linesize[1] * cy;
1409 vdst = p->data[2] + p->linesize[2] * cy;
1411 if (s->predictor == PLANE && s->interlaced < cy) {
1412 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1413 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1414 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1416 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1417 leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1418 leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1420 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1421 leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1422 leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1425 encode_422_bitstream(s, 0, width);
1428 } else if(avctx->pix_fmt == PIX_FMT_RGB32) {
1429 uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
1430 const int stride = -p->linesize[0];
1431 const int fake_stride = -fake_ystride;
1433 int leftr, leftg, leftb;
1435 put_bits(&s->pb, 8, leftr = data[R]);
1436 put_bits(&s->pb, 8, leftg = data[G]);
1437 put_bits(&s->pb, 8, leftb = data[B]);
1438 put_bits(&s->pb, 8, 0);
1440 sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb);
1441 encode_bgr_bitstream(s, width - 1);
1443 for (y = 1; y < s->height; y++) {
1444 uint8_t *dst = data + y*stride;
1445 if (s->predictor == PLANE && s->interlaced < y) {
1446 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
1447 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1449 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1451 encode_bgr_bitstream(s, width);
1454 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1458 size += (put_bits_count(&s->pb) + 31) / 8;
1459 put_bits(&s->pb, 16, 0);
1460 put_bits(&s->pb, 15, 0);
1463 if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
1465 char *p = avctx->stats_out;
1466 char *end = p + 1024*30;
1467 for (i = 0; i < 3; i++) {
1468 for (j = 0; j < 256; j++) {
1469 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1473 snprintf(p, end-p, "\n");
1477 avctx->stats_out[0] = '\0';
1478 if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
1479 flush_put_bits(&s->pb);
1480 s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
1483 s->picture_number++;
1485 pkt->size = size * 4;
1486 pkt->flags |= AV_PKT_FLAG_KEY;
1492 static av_cold int encode_end(AVCodecContext *avctx)
1494 HYuvContext *s = avctx->priv_data;
1498 av_freep(&avctx->extradata);
1499 av_freep(&avctx->stats_out);
1503 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1505 #if CONFIG_HUFFYUV_DECODER
1506 AVCodec ff_huffyuv_decoder = {
1508 .type = AVMEDIA_TYPE_VIDEO,
1509 .id = AV_CODEC_ID_HUFFYUV,
1510 .priv_data_size = sizeof(HYuvContext),
1511 .init = decode_init,
1512 .close = decode_end,
1513 .decode = decode_frame,
1514 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
1515 CODEC_CAP_FRAME_THREADS,
1516 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1517 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1521 #if CONFIG_FFVHUFF_DECODER
1522 AVCodec ff_ffvhuff_decoder = {
1524 .type = AVMEDIA_TYPE_VIDEO,
1525 .id = AV_CODEC_ID_FFVHUFF,
1526 .priv_data_size = sizeof(HYuvContext),
1527 .init = decode_init,
1528 .close = decode_end,
1529 .decode = decode_frame,
1530 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
1531 CODEC_CAP_FRAME_THREADS,
1532 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1533 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1537 #if CONFIG_HUFFYUV_ENCODER
1538 AVCodec ff_huffyuv_encoder = {
1540 .type = AVMEDIA_TYPE_VIDEO,
1541 .id = AV_CODEC_ID_HUFFYUV,
1542 .priv_data_size = sizeof(HYuvContext),
1543 .init = encode_init,
1544 .encode2 = encode_frame,
1545 .close = encode_end,
1546 .pix_fmts = (const enum PixelFormat[]){
1547 PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE
1549 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1553 #if CONFIG_FFVHUFF_ENCODER
1554 AVCodec ff_ffvhuff_encoder = {
1556 .type = AVMEDIA_TYPE_VIDEO,
1557 .id = AV_CODEC_ID_FFVHUFF,
1558 .priv_data_size = sizeof(HYuvContext),
1559 .init = encode_init,
1560 .encode2 = encode_frame,
1561 .close = encode_end,
1562 .pix_fmts = (const enum PixelFormat[]){
1563 PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE
1565 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),