2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
53 typedef enum Predictor {
59 typedef struct HYuvContext {
60 AVCodecContext *avctx;
68 int yuy2; //use yuy2 instead of 422P
69 int bgr32; //use bgr32 instead of bgr24
76 uint64_t stats[3][256];
78 uint32_t bits[3][256];
79 uint32_t pix_bgr_map[1<<VLC_BITS];
80 VLC vlc[6]; //Y,U,V,YY,YU,YV
82 uint8_t *bitstream_buffer;
83 unsigned int bitstream_buffer_size;
87 #define classic_shift_luma_table_size 42
88 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
89 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
90 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
95 #define classic_shift_chroma_table_size 59
96 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
97 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
98 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
99 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0,
103 static const unsigned char classic_add_luma[256] = {
104 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
105 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
106 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
107 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
108 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
109 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
110 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
111 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
112 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
113 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
114 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
115 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
116 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
117 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
118 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
119 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
122 static const unsigned char classic_add_chroma[256] = {
123 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
124 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
125 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
126 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
127 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
128 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
129 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
130 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
131 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
132 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
133 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
134 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
135 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
136 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
137 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
138 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
141 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
142 const uint8_t *src, int w, int left)
146 for (i = 0; i < w; i++) {
147 const int temp = src[i];
148 dst[i] = temp - left;
153 for (i = 0; i < 16; i++) {
154 const int temp = src[i];
155 dst[i] = temp - left;
158 s->dsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
163 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
164 const uint8_t *src, int w,
165 int *red, int *green, int *blue, int *alpha)
173 for (i = 0; i < FFMIN(w, 4); i++) {
174 const int rt = src[i * 4 + R];
175 const int gt = src[i * 4 + G];
176 const int bt = src[i * 4 + B];
177 const int at = src[i * 4 + A];
178 dst[i * 4 + R] = rt - r;
179 dst[i * 4 + G] = gt - g;
180 dst[i * 4 + B] = bt - b;
181 dst[i * 4 + A] = at - a;
188 s->dsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
190 *red = src[(w - 1) * 4 + R];
191 *green = src[(w - 1) * 4 + G];
192 *blue = src[(w - 1) * 4 + B];
193 *alpha = src[(w - 1) * 4 + A];
196 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue){
202 for (i = 0; i < FFMIN(w,16); i++) {
203 const int rt = src[i*3 + 0];
204 const int gt = src[i*3 + 1];
205 const int bt = src[i*3 + 2];
206 dst[i*3 + 0] = rt - r;
207 dst[i*3 + 1] = gt - g;
208 dst[i*3 + 2] = bt - b;
214 s->dsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w*3 - 48);
216 *red = src[(w - 1)*3 + 0];
217 *green = src[(w - 1)*3 + 1];
218 *blue = src[(w - 1)*3 + 2];
221 static int read_len_table(uint8_t *dst, GetBitContext *gb)
225 for (i = 0; i < 256;) {
226 repeat = get_bits(gb, 3);
227 val = get_bits(gb, 5);
229 repeat = get_bits(gb, 8);
230 if (i + repeat > 256 || get_bits_left(gb) < 0) {
231 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
240 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table)
245 for (len = 32; len > 0; len--) {
246 for (index = 0; index < 256; index++) {
247 if (len_table[index] == len)
251 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
259 static void generate_joint_tables(HYuvContext *s)
261 uint16_t symbols[1 << VLC_BITS];
262 uint16_t bits[1 << VLC_BITS];
263 uint8_t len[1 << VLC_BITS];
264 if (s->bitstream_bpp < 24) {
266 for (p = 0; p < 3; p++) {
267 for (i = y = 0; y < 256; y++) {
268 int len0 = s->len[0][y];
269 int limit = VLC_BITS - len0;
272 for (u = 0; u < 256; u++) {
273 int len1 = s->len[p][u];
276 len[i] = len0 + len1;
277 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
278 symbols[i] = (y << 8) + u;
279 if(symbols[i] != 0xffff) // reserved to mean "invalid"
283 ff_free_vlc(&s->vlc[3 + p]);
284 ff_init_vlc_sparse(&s->vlc[3 + p], VLC_BITS, i, len, 1, 1,
285 bits, 2, 2, symbols, 2, 2, 0);
288 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
289 int i, b, g, r, code;
290 int p0 = s->decorrelate;
291 int p1 = !s->decorrelate;
292 // restrict the range to +/-16 because that's pretty much guaranteed to
293 // cover all the combinations that fit in 11 bits total, and it doesn't
294 // matter if we miss a few rare codes.
295 for (i = 0, g = -16; g < 16; g++) {
296 int len0 = s->len[p0][g & 255];
297 int limit0 = VLC_BITS - len0;
300 for (b = -16; b < 16; b++) {
301 int len1 = s->len[p1][b & 255];
302 int limit1 = limit0 - len1;
305 code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
306 for (r = -16; r < 16; r++) {
307 int len2 = s->len[2][r & 255];
310 len[i] = len0 + len1 + len2;
311 bits[i] = (code << len2) + s->bits[2][r & 255];
312 if (s->decorrelate) {
325 ff_free_vlc(&s->vlc[3]);
326 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
330 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
335 init_get_bits(&gb, src, length * 8);
337 for (i = 0; i < 3; i++) {
338 if (read_len_table(s->len[i], &gb) < 0)
340 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
343 ff_free_vlc(&s->vlc[i]);
344 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
345 s->bits[i], 4, 4, 0);
348 generate_joint_tables(s);
350 return (get_bits_count(&gb) + 7) / 8;
353 static int read_old_huffman_tables(HYuvContext *s)
358 init_get_bits(&gb, classic_shift_luma,
359 classic_shift_luma_table_size * 8);
360 if (read_len_table(s->len[0], &gb) < 0)
363 init_get_bits(&gb, classic_shift_chroma,
364 classic_shift_chroma_table_size * 8);
365 if (read_len_table(s->len[1], &gb) < 0)
368 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
369 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
371 if (s->bitstream_bpp >= 24) {
372 memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
373 memcpy(s->len[1] , s->len [0], 256 * sizeof(uint8_t));
375 memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
376 memcpy(s->len[2] , s->len [1], 256 * sizeof(uint8_t));
378 for (i = 0; i < 3; i++) {
379 ff_free_vlc(&s->vlc[i]);
380 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
381 s->bits[i], 4, 4, 0);
384 generate_joint_tables(s);
389 static av_cold void alloc_temp(HYuvContext *s)
393 if (s->bitstream_bpp<24) {
394 for (i=0; i<3; i++) {
395 s->temp[i]= av_malloc(s->width + 16);
398 s->temp[0]= av_mallocz(4*s->width + 16);
402 static av_cold int common_init(AVCodecContext *avctx)
404 HYuvContext *s = avctx->priv_data;
407 s->flags = avctx->flags;
409 ff_dsputil_init(&s->dsp, avctx);
411 s->width = avctx->width;
412 s->height = avctx->height;
413 av_assert1(s->width > 0 && s->height > 0);
418 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
419 static av_cold int decode_init(AVCodecContext *avctx)
421 HYuvContext *s = avctx->priv_data;
424 memset(s->vlc, 0, 3 * sizeof(VLC));
426 avctx->coded_frame = &s->picture;
427 avcodec_get_frame_defaults(&s->picture);
428 s->interlaced = s->height > 288;
432 if (avctx->extradata_size) {
433 if ((avctx->bits_per_coded_sample & 7) &&
434 avctx->bits_per_coded_sample != 12)
435 s->version = 1; // do such files exist at all?
441 if (s->version == 2) {
442 int method, interlace;
444 if (avctx->extradata_size < 4)
447 method = ((uint8_t*)avctx->extradata)[0];
448 s->decorrelate = method & 64 ? 1 : 0;
449 s->predictor = method & 63;
450 s->bitstream_bpp = ((uint8_t*)avctx->extradata)[1];
451 if (s->bitstream_bpp == 0)
452 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
453 interlace = (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
454 s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
455 s->context = ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
457 if ( read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
458 avctx->extradata_size - 4) < 0)
461 switch (avctx->bits_per_coded_sample & 7) {
471 s->predictor = PLANE;
472 s->decorrelate = avctx->bits_per_coded_sample >= 24;
475 s->predictor = MEDIAN;
479 s->predictor = LEFT; //OLD
483 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
486 if (read_old_huffman_tables(s) < 0)
490 switch (s->bitstream_bpp) {
492 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
496 avctx->pix_fmt = AV_PIX_FMT_YUYV422;
498 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
504 avctx->pix_fmt = AV_PIX_FMT_RGB32;
506 avctx->pix_fmt = AV_PIX_FMT_BGR24;
510 return AVERROR_INVALIDDATA;
513 if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUV420P) && avctx->width & 1) {
514 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
515 return AVERROR_INVALIDDATA;
523 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
525 HYuvContext *s = avctx->priv_data;
528 avctx->coded_frame= &s->picture;
531 for (i = 0; i < 6; i++)
532 s->vlc[i].table = NULL;
534 if (s->version == 2) {
535 if (read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
536 avctx->extradata_size) < 0)
539 if (read_old_huffman_tables(s) < 0)
545 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
547 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
548 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
553 for (i = 0; i < 256;) {
557 for (; i < 256 && len[i] == val && repeat < 255; i++)
560 av_assert0(val < 32 && val >0 && repeat<256 && repeat>0);
563 buf[index++] = repeat;
565 buf[index++] = val | (repeat << 5);
572 static av_cold int encode_init(AVCodecContext *avctx)
574 HYuvContext *s = avctx->priv_data;
579 avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
580 avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
583 avctx->coded_frame = &s->picture;
585 switch (avctx->pix_fmt) {
586 case AV_PIX_FMT_YUV420P:
587 case AV_PIX_FMT_YUV422P:
589 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
590 return AVERROR(EINVAL);
592 s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
594 case AV_PIX_FMT_RGB32:
595 s->bitstream_bpp = 32;
597 case AV_PIX_FMT_RGB24:
598 s->bitstream_bpp = 24;
601 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
604 avctx->bits_per_coded_sample = s->bitstream_bpp;
605 s->decorrelate = s->bitstream_bpp >= 24;
606 s->predictor = avctx->prediction_method;
607 s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
608 if (avctx->context_model == 1) {
609 s->context = avctx->context_model;
610 if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) {
611 av_log(avctx, AV_LOG_ERROR,
612 "context=1 is not compatible with "
613 "2 pass huffyuv encoding\n");
618 if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
619 if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
620 av_log(avctx, AV_LOG_ERROR,
621 "Error: YV12 is not supported by huffyuv; use "
622 "vcodec=ffvhuff or format=422p\n");
625 if (avctx->context_model) {
626 av_log(avctx, AV_LOG_ERROR,
627 "Error: per-frame huffman tables are not supported "
628 "by huffyuv; use vcodec=ffvhuff\n");
631 if (s->interlaced != ( s->height > 288 ))
632 av_log(avctx, AV_LOG_INFO,
633 "using huffyuv 2.2.0 or newer interlacing flag\n");
636 if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
637 av_log(avctx, AV_LOG_ERROR,
638 "Error: RGB is incompatible with median predictor\n");
642 ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
643 ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
644 ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
646 ((uint8_t*)avctx->extradata)[2] |= 0x40;
647 ((uint8_t*)avctx->extradata)[3] = 0;
648 s->avctx->extradata_size = 4;
650 if (avctx->stats_in) {
651 char *p = avctx->stats_in;
653 for (i = 0; i < 3; i++)
654 for (j = 0; j < 256; j++)
658 for (i = 0; i < 3; i++) {
661 for (j = 0; j < 256; j++) {
662 s->stats[i][j] += strtol(p, &next, 0);
663 if (next == p) return -1;
667 if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
670 for (i = 0; i < 3; i++)
671 for (j = 0; j < 256; j++) {
672 int d = FFMIN(j, 256 - j);
674 s->stats[i][j] = 100000000 / (d + 1);
678 for (i = 0; i < 3; i++) {
679 ff_huff_gen_len_table(s->len[i], s->stats[i]);
681 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
685 s->avctx->extradata_size +=
686 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
690 for (i = 0; i < 3; i++) {
691 int pels = s->width * s->height / (i ? 40 : 10);
692 for (j = 0; j < 256; j++) {
693 int d = FFMIN(j, 256 - j);
694 s->stats[i][j] = pels/(d + 1);
698 for (i = 0; i < 3; i++)
699 for (j = 0; j < 256; j++)
709 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
711 /* TODO instead of restarting the read when the code isn't in the first level
712 * of the joint table, jump into the 2nd level of the individual table. */
713 #define READ_2PIX(dst0, dst1, plane1){\
714 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
719 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
720 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
724 static void decode_422_bitstream(HYuvContext *s, int count)
730 if (count >= (get_bits_left(&s->gb)) / (31 * 4)) {
731 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
732 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
733 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
736 for (i = 0; i < count; i++) {
737 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
738 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
743 static void decode_gray_bitstream(HYuvContext *s, int count)
749 if (count >= (get_bits_left(&s->gb)) / (31 * 2)) {
750 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
751 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
754 for(i=0; i<count; i++){
755 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
760 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
761 static int encode_422_bitstream(HYuvContext *s, int offset, int count)
764 const uint8_t *y = s->temp[0] + offset;
765 const uint8_t *u = s->temp[1] + offset / 2;
766 const uint8_t *v = s->temp[2] + offset / 2;
768 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
769 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
775 int y1 = y[2 * i + 1];\
781 if (s->flags & CODEC_FLAG_PASS1) {
782 for(i = 0; i < count; i++) {
790 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
793 for (i = 0; i < count; i++) {
796 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
798 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
800 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
802 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
805 for(i = 0; i < count; i++) {
807 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
808 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
809 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
810 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
816 static int encode_gray_bitstream(HYuvContext *s, int count)
820 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
821 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
826 int y0 = s->temp[0][2 * i];\
827 int y1 = s->temp[0][2 * i + 1];
832 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
833 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
837 if (s->flags & CODEC_FLAG_PASS1) {
838 for (i = 0; i < count; i++) {
843 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
847 for (i = 0; i < count; i++) {
853 for (i = 0; i < count; i++) {
860 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
862 static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
863 int decorrelate, int alpha)
866 for (i = 0; i < count; i++) {
867 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
869 *(uint32_t*)&s->temp[0][4 * i] = s->pix_bgr_map[code];
870 } else if(decorrelate) {
871 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
872 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) +
873 s->temp[0][4 * i + G];
874 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) +
875 s->temp[0][4 * i + G];
877 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
878 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
879 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
882 s->temp[0][4 * i + A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
886 static void decode_bgr_bitstream(HYuvContext *s, int count)
888 if (s->decorrelate) {
889 if (s->bitstream_bpp==24)
890 decode_bgr_1(s, count, 1, 0);
892 decode_bgr_1(s, count, 1, 1);
894 if (s->bitstream_bpp==24)
895 decode_bgr_1(s, count, 0, 0);
897 decode_bgr_1(s, count, 0, 1);
901 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
905 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count) {
906 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
911 int g = s->temp[0][planes==3 ? 3*i + 1 : 4*i + G];\
912 int b = (s->temp[0][planes==3 ? 3*i + 2 : 4*i + B] - g) & 0xff;\
913 int r = (s->temp[0][planes==3 ? 3*i + 0 : 4*i + R] - g) & 0xff;\
914 int a = s->temp[0][planes*i + A];
919 if(planes==4) s->stats[2][a]++;
921 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
922 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
923 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
924 if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
926 if ((s->flags & CODEC_FLAG_PASS1) &&
927 (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
928 for (i = 0; i < count; i++) {
932 } else if (s->context || (s->flags & CODEC_FLAG_PASS1)) {
933 for (i = 0; i < count; i++) {
939 for (i = 0; i < count; i++) {
947 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
948 static void draw_slice(HYuvContext *s, int y)
951 int offset[AV_NUM_DATA_POINTERS];
953 if (s->avctx->draw_horiz_band==NULL)
956 h = y - s->last_slice_end;
959 if (s->bitstream_bpp == 12) {
965 offset[0] = s->picture.linesize[0]*y;
966 offset[1] = s->picture.linesize[1]*cy;
967 offset[2] = s->picture.linesize[2]*cy;
968 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
972 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
974 s->last_slice_end = y + h;
977 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
980 const uint8_t *buf = avpkt->data;
981 int buf_size = avpkt->size;
982 HYuvContext *s = avctx->priv_data;
983 const int width = s->width;
984 const int width2 = s->width>>1;
985 const int height = s->height;
986 int fake_ystride, fake_ustride, fake_vstride;
987 AVFrame * const p = &s->picture;
990 AVFrame *picture = data;
992 av_fast_malloc(&s->bitstream_buffer,
993 &s->bitstream_buffer_size,
994 buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
995 if (!s->bitstream_buffer)
996 return AVERROR(ENOMEM);
998 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
999 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer,
1000 (const uint32_t*)buf, buf_size / 4);
1003 ff_thread_release_buffer(avctx, p);
1006 if (ff_thread_get_buffer(avctx, p) < 0) {
1007 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1012 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1017 if ((unsigned)(buf_size-table_size) >= INT_MAX / 8)
1020 init_get_bits(&s->gb, s->bitstream_buffer+table_size,
1021 (buf_size-table_size) * 8);
1023 fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
1024 fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
1025 fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
1027 s->last_slice_end = 0;
1029 if (s->bitstream_bpp < 24) {
1031 int lefty, leftu, leftv;
1032 int lefttopy, lefttopu, lefttopv;
1035 p->data[0][3] = get_bits(&s->gb, 8);
1036 p->data[0][2] = get_bits(&s->gb, 8);
1037 p->data[0][1] = get_bits(&s->gb, 8);
1038 p->data[0][0] = get_bits(&s->gb, 8);
1040 av_log(avctx, AV_LOG_ERROR,
1041 "YUY2 output is not implemented yet\n");
1045 leftv = p->data[2][0] = get_bits(&s->gb, 8);
1046 lefty = p->data[0][1] = get_bits(&s->gb, 8);
1047 leftu = p->data[1][0] = get_bits(&s->gb, 8);
1048 p->data[0][0] = get_bits(&s->gb, 8);
1050 switch (s->predictor) {
1053 decode_422_bitstream(s, width-2);
1054 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1055 if (!(s->flags&CODEC_FLAG_GRAY)) {
1056 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1057 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1060 for (cy = y = 1; y < s->height; y++, cy++) {
1061 uint8_t *ydst, *udst, *vdst;
1063 if (s->bitstream_bpp == 12) {
1064 decode_gray_bitstream(s, width);
1066 ydst = p->data[0] + p->linesize[0] * y;
1068 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1069 if (s->predictor == PLANE) {
1070 if (y > s->interlaced)
1071 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1074 if (y >= s->height) break;
1079 ydst = p->data[0] + p->linesize[0]*y;
1080 udst = p->data[1] + p->linesize[1]*cy;
1081 vdst = p->data[2] + p->linesize[2]*cy;
1083 decode_422_bitstream(s, width);
1084 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1085 if (!(s->flags & CODEC_FLAG_GRAY)) {
1086 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1087 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1089 if (s->predictor == PLANE) {
1090 if (cy > s->interlaced) {
1091 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1092 if (!(s->flags & CODEC_FLAG_GRAY)) {
1093 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1094 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1099 draw_slice(s, height);
1103 /* first line except first 2 pixels is left predicted */
1104 decode_422_bitstream(s, width - 2);
1105 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width - 2, lefty);
1106 if (!(s->flags & CODEC_FLAG_GRAY)) {
1107 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1108 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1113 /* second line is left predicted for interlaced case */
1114 if (s->interlaced) {
1115 decode_422_bitstream(s, width);
1116 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1117 if (!(s->flags & CODEC_FLAG_GRAY)) {
1118 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1119 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1124 /* next 4 pixels are left predicted too */
1125 decode_422_bitstream(s, 4);
1126 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1127 if (!(s->flags&CODEC_FLAG_GRAY)) {
1128 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1129 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1132 /* next line except the first 4 pixels is median predicted */
1133 lefttopy = p->data[0][3];
1134 decode_422_bitstream(s, width - 4);
1135 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1136 if (!(s->flags&CODEC_FLAG_GRAY)) {
1137 lefttopu = p->data[1][1];
1138 lefttopv = p->data[2][1];
1139 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
1140 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
1144 for (; y<height; y++, cy++) {
1145 uint8_t *ydst, *udst, *vdst;
1147 if (s->bitstream_bpp == 12) {
1148 while (2 * cy > y) {
1149 decode_gray_bitstream(s, width);
1150 ydst = p->data[0] + p->linesize[0] * y;
1151 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1154 if (y >= height) break;
1158 decode_422_bitstream(s, width);
1160 ydst = p->data[0] + p->linesize[0] * y;
1161 udst = p->data[1] + p->linesize[1] * cy;
1162 vdst = p->data[2] + p->linesize[2] * cy;
1164 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1165 if (!(s->flags & CODEC_FLAG_GRAY)) {
1166 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1167 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1171 draw_slice(s, height);
1177 int leftr, leftg, leftb, lefta;
1178 const int last_line = (height - 1) * p->linesize[0];
1180 if (s->bitstream_bpp == 32) {
1181 lefta = p->data[0][last_line+A] = get_bits(&s->gb, 8);
1182 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
1183 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
1184 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
1186 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
1187 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
1188 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
1189 lefta = p->data[0][last_line+A] = 255;
1190 skip_bits(&s->gb, 8);
1194 switch (s->predictor) {
1197 decode_bgr_bitstream(s, width - 1);
1198 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width - 1, &leftr, &leftg, &leftb, &lefta);
1200 for (y = s->height - 2; y >= 0; y--) { //Yes it is stored upside down.
1201 decode_bgr_bitstream(s, width);
1203 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1204 if (s->predictor == PLANE) {
1205 if (s->bitstream_bpp != 32) lefta = 0;
1206 if ((y & s->interlaced) == 0 &&
1207 y < s->height - 1 - s->interlaced) {
1208 s->dsp.add_bytes(p->data[0] + p->linesize[0] * y,
1209 p->data[0] + p->linesize[0] * y +
1210 fake_ystride, fake_ystride);
1214 // just 1 large slice as this is not possible in reverse order
1215 draw_slice(s, height);
1218 av_log(avctx, AV_LOG_ERROR,
1219 "prediction type not supported!\n");
1222 av_log(avctx, AV_LOG_ERROR,
1223 "BGR24 output is not implemented yet\n");
1230 *data_size = sizeof(AVFrame);
1232 return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
1234 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1236 static int common_end(HYuvContext *s)
1240 for(i = 0; i < 3; i++) {
1241 av_freep(&s->temp[i]);
1246 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1247 static av_cold int decode_end(AVCodecContext *avctx)
1249 HYuvContext *s = avctx->priv_data;
1252 if (s->picture.data[0])
1253 avctx->release_buffer(avctx, &s->picture);
1256 av_freep(&s->bitstream_buffer);
1258 for (i = 0; i < 6; i++) {
1259 ff_free_vlc(&s->vlc[i]);
1264 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1266 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1267 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1268 const AVFrame *pict, int *got_packet)
1270 HYuvContext *s = avctx->priv_data;
1271 const int width = s->width;
1272 const int width2 = s->width>>1;
1273 const int height = s->height;
1274 const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1275 const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1276 const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1277 AVFrame * const p = &s->picture;
1278 int i, j, size = 0, ret;
1280 if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0)
1284 p->pict_type = AV_PICTURE_TYPE_I;
1288 for (i = 0; i < 3; i++) {
1289 ff_huff_gen_len_table(s->len[i], s->stats[i]);
1290 if (generate_bits_table(s->bits[i], s->len[i]) < 0)
1292 size += store_table(s, s->len[i], &pkt->data[size]);
1295 for (i = 0; i < 3; i++)
1296 for (j = 0; j < 256; j++)
1297 s->stats[i][j] >>= 1;
1300 init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
1302 if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
1303 avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
1304 int lefty, leftu, leftv, y, cy;
1306 put_bits(&s->pb, 8, leftv = p->data[2][0]);
1307 put_bits(&s->pb, 8, lefty = p->data[0][1]);
1308 put_bits(&s->pb, 8, leftu = p->data[1][0]);
1309 put_bits(&s->pb, 8, p->data[0][0]);
1311 lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1312 leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1313 leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1315 encode_422_bitstream(s, 2, width-2);
1317 if (s->predictor==MEDIAN) {
1318 int lefttopy, lefttopu, lefttopv;
1320 if (s->interlaced) {
1321 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
1322 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
1323 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
1325 encode_422_bitstream(s, 0, width);
1329 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
1330 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
1331 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
1333 encode_422_bitstream(s, 0, 4);
1335 lefttopy = p->data[0][3];
1336 lefttopu = p->data[1][1];
1337 lefttopv = p->data[2][1];
1338 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride + 4, width - 4 , &lefty, &lefttopy);
1339 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
1340 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
1341 encode_422_bitstream(s, 0, width - 4);
1344 for (; y < height; y++,cy++) {
1345 uint8_t *ydst, *udst, *vdst;
1347 if (s->bitstream_bpp == 12) {
1348 while (2 * cy > y) {
1349 ydst = p->data[0] + p->linesize[0] * y;
1350 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1351 encode_gray_bitstream(s, width);
1354 if (y >= height) break;
1356 ydst = p->data[0] + p->linesize[0] * y;
1357 udst = p->data[1] + p->linesize[1] * cy;
1358 vdst = p->data[2] + p->linesize[2] * cy;
1360 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1361 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1362 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1364 encode_422_bitstream(s, 0, width);
1367 for (cy = y = 1; y < height; y++, cy++) {
1368 uint8_t *ydst, *udst, *vdst;
1370 /* encode a luma only line & y++ */
1371 if (s->bitstream_bpp == 12) {
1372 ydst = p->data[0] + p->linesize[0] * y;
1374 if (s->predictor == PLANE && s->interlaced < y) {
1375 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1377 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1379 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1381 encode_gray_bitstream(s, width);
1383 if (y >= height) break;
1386 ydst = p->data[0] + p->linesize[0] * y;
1387 udst = p->data[1] + p->linesize[1] * cy;
1388 vdst = p->data[2] + p->linesize[2] * cy;
1390 if (s->predictor == PLANE && s->interlaced < cy) {
1391 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1392 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1393 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1395 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1396 leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1397 leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1399 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1400 leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1401 leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1404 encode_422_bitstream(s, 0, width);
1407 } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
1408 uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
1409 const int stride = -p->linesize[0];
1410 const int fake_stride = -fake_ystride;
1412 int leftr, leftg, leftb, lefta;
1414 put_bits(&s->pb, 8, lefta = data[A]);
1415 put_bits(&s->pb, 8, leftr = data[R]);
1416 put_bits(&s->pb, 8, leftg = data[G]);
1417 put_bits(&s->pb, 8, leftb = data[B]);
1419 sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb, &lefta);
1420 encode_bgra_bitstream(s, width - 1, 4);
1422 for (y = 1; y < s->height; y++) {
1423 uint8_t *dst = data + y*stride;
1424 if (s->predictor == PLANE && s->interlaced < y) {
1425 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
1426 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
1428 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
1430 encode_bgra_bitstream(s, width, 4);
1432 }else if(avctx->pix_fmt == AV_PIX_FMT_RGB24){
1433 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1434 const int stride = -p->linesize[0];
1435 const int fake_stride = -fake_ystride;
1437 int leftr, leftg, leftb;
1439 put_bits(&s->pb, 8, leftr= data[0]);
1440 put_bits(&s->pb, 8, leftg= data[1]);
1441 put_bits(&s->pb, 8, leftb= data[2]);
1442 put_bits(&s->pb, 8, 0);
1444 sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
1445 encode_bgra_bitstream(s, width-1, 3);
1447 for(y=1; y<s->height; y++){
1448 uint8_t *dst = data + y*stride;
1449 if(s->predictor == PLANE && s->interlaced < y){
1450 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
1451 sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1453 sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1455 encode_bgra_bitstream(s, width, 3);
1458 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1462 size += (put_bits_count(&s->pb) + 31) / 8;
1463 put_bits(&s->pb, 16, 0);
1464 put_bits(&s->pb, 15, 0);
1467 if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
1469 char *p = avctx->stats_out;
1470 char *end = p + 1024*30;
1471 for (i = 0; i < 3; i++) {
1472 for (j = 0; j < 256; j++) {
1473 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1477 snprintf(p, end-p, "\n");
1481 avctx->stats_out[0] = '\0';
1482 if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
1483 flush_put_bits(&s->pb);
1484 s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
1487 s->picture_number++;
1489 pkt->size = size * 4;
1490 pkt->flags |= AV_PKT_FLAG_KEY;
1496 static av_cold int encode_end(AVCodecContext *avctx)
1498 HYuvContext *s = avctx->priv_data;
1502 av_freep(&avctx->extradata);
1503 av_freep(&avctx->stats_out);
1507 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1509 #if CONFIG_HUFFYUV_DECODER
1510 AVCodec ff_huffyuv_decoder = {
1512 .type = AVMEDIA_TYPE_VIDEO,
1513 .id = AV_CODEC_ID_HUFFYUV,
1514 .priv_data_size = sizeof(HYuvContext),
1515 .init = decode_init,
1516 .close = decode_end,
1517 .decode = decode_frame,
1518 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
1519 CODEC_CAP_FRAME_THREADS,
1520 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1521 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1525 #if CONFIG_FFVHUFF_DECODER
1526 AVCodec ff_ffvhuff_decoder = {
1528 .type = AVMEDIA_TYPE_VIDEO,
1529 .id = AV_CODEC_ID_FFVHUFF,
1530 .priv_data_size = sizeof(HYuvContext),
1531 .init = decode_init,
1532 .close = decode_end,
1533 .decode = decode_frame,
1534 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
1535 CODEC_CAP_FRAME_THREADS,
1536 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1537 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1541 #if CONFIG_HUFFYUV_ENCODER
1542 AVCodec ff_huffyuv_encoder = {
1544 .type = AVMEDIA_TYPE_VIDEO,
1545 .id = AV_CODEC_ID_HUFFYUV,
1546 .priv_data_size = sizeof(HYuvContext),
1547 .init = encode_init,
1548 .encode2 = encode_frame,
1549 .close = encode_end,
1550 .pix_fmts = (const enum AVPixelFormat[]){
1551 AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1553 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1557 #if CONFIG_FFVHUFF_ENCODER
1558 AVCodec ff_ffvhuff_encoder = {
1560 .type = AVMEDIA_TYPE_VIDEO,
1561 .id = AV_CODEC_ID_FFVHUFF,
1562 .priv_data_size = sizeof(HYuvContext),
1563 .init = encode_init,
1564 .encode2 = encode_frame,
1565 .close = encode_end,
1566 .pix_fmts = (const enum AVPixelFormat[]){
1567 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1569 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),