2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
53 typedef enum Predictor {
59 typedef struct HYuvContext {
60 AVCodecContext *avctx;
68 int yuy2; //use yuy2 instead of 422P
69 int bgr32; //use bgr32 instead of bgr24
76 uint64_t stats[3][256];
78 uint32_t bits[3][256];
79 uint32_t pix_bgr_map[1<<VLC_BITS];
80 VLC vlc[6]; //Y,U,V,YY,YU,YV
82 uint8_t *bitstream_buffer;
83 unsigned int bitstream_buffer_size;
87 #define classic_shift_luma_table_size 42
88 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
89 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
90 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
95 #define classic_shift_chroma_table_size 59
96 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
97 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
98 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
99 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0,
103 static const unsigned char classic_add_luma[256] = {
104 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
105 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
106 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
107 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
108 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
109 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
110 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
111 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
112 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
113 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
114 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
115 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
116 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
117 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
118 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
119 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
122 static const unsigned char classic_add_chroma[256] = {
123 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
124 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
125 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
126 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
127 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
128 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
129 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
130 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
131 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
132 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
133 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
134 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
135 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
136 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
137 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
138 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
141 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
142 const uint8_t *src, int w, int left)
146 for (i = 0; i < w; i++) {
147 const int temp = src[i];
148 dst[i] = temp - left;
153 for (i = 0; i < 16; i++) {
154 const int temp = src[i];
155 dst[i] = temp - left;
158 s->dsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
163 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
164 const uint8_t *src, int w,
165 int *red, int *green, int *blue, int *alpha)
173 for (i = 0; i < FFMIN(w, 4); i++) {
174 const int rt = src[i * 4 + R];
175 const int gt = src[i * 4 + G];
176 const int bt = src[i * 4 + B];
177 const int at = src[i * 4 + A];
178 dst[i * 4 + R] = rt - r;
179 dst[i * 4 + G] = gt - g;
180 dst[i * 4 + B] = bt - b;
181 dst[i * 4 + A] = at - a;
188 s->dsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
190 *red = src[(w - 1) * 4 + R];
191 *green = src[(w - 1) * 4 + G];
192 *blue = src[(w - 1) * 4 + B];
193 *alpha = src[(w - 1) * 4 + A];
196 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue){
202 for (i = 0; i < FFMIN(w,16); i++) {
203 const int rt = src[i*3 + 0];
204 const int gt = src[i*3 + 1];
205 const int bt = src[i*3 + 2];
206 dst[i*3 + 0] = rt - r;
207 dst[i*3 + 1] = gt - g;
208 dst[i*3 + 2] = bt - b;
214 s->dsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w*3 - 48);
216 *red = src[(w - 1)*3 + 0];
217 *green = src[(w - 1)*3 + 1];
218 *blue = src[(w - 1)*3 + 2];
221 static int read_len_table(uint8_t *dst, GetBitContext *gb)
225 for (i = 0; i < 256;) {
226 repeat = get_bits(gb, 3);
227 val = get_bits(gb, 5);
229 repeat = get_bits(gb, 8);
230 if (i + repeat > 256 || get_bits_left(gb) < 0) {
231 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
240 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table)
245 for (len = 32; len > 0; len--) {
246 for (index = 0; index < 256; index++) {
247 if (len_table[index] == len)
251 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
259 static void generate_joint_tables(HYuvContext *s)
261 uint16_t symbols[1 << VLC_BITS];
262 uint16_t bits[1 << VLC_BITS];
263 uint8_t len[1 << VLC_BITS];
264 if (s->bitstream_bpp < 24) {
266 for (p = 0; p < 3; p++) {
267 for (i = y = 0; y < 256; y++) {
268 int len0 = s->len[0][y];
269 int limit = VLC_BITS - len0;
272 for (u = 0; u < 256; u++) {
273 int len1 = s->len[p][u];
276 len[i] = len0 + len1;
277 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
278 symbols[i] = (y << 8) + u;
279 if(symbols[i] != 0xffff) // reserved to mean "invalid"
283 ff_free_vlc(&s->vlc[3 + p]);
284 ff_init_vlc_sparse(&s->vlc[3 + p], VLC_BITS, i, len, 1, 1,
285 bits, 2, 2, symbols, 2, 2, 0);
288 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
289 int i, b, g, r, code;
290 int p0 = s->decorrelate;
291 int p1 = !s->decorrelate;
292 // restrict the range to +/-16 because that's pretty much guaranteed to
293 // cover all the combinations that fit in 11 bits total, and it doesn't
294 // matter if we miss a few rare codes.
295 for (i = 0, g = -16; g < 16; g++) {
296 int len0 = s->len[p0][g & 255];
297 int limit0 = VLC_BITS - len0;
300 for (b = -16; b < 16; b++) {
301 int len1 = s->len[p1][b & 255];
302 int limit1 = limit0 - len1;
305 code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
306 for (r = -16; r < 16; r++) {
307 int len2 = s->len[2][r & 255];
310 len[i] = len0 + len1 + len2;
311 bits[i] = (code << len2) + s->bits[2][r & 255];
312 if (s->decorrelate) {
325 ff_free_vlc(&s->vlc[3]);
326 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
330 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
335 init_get_bits(&gb, src, length * 8);
337 for (i = 0; i < 3; i++) {
338 if (read_len_table(s->len[i], &gb) < 0)
340 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
343 ff_free_vlc(&s->vlc[i]);
344 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
345 s->bits[i], 4, 4, 0);
348 generate_joint_tables(s);
350 return (get_bits_count(&gb) + 7) / 8;
353 static int read_old_huffman_tables(HYuvContext *s)
358 init_get_bits(&gb, classic_shift_luma,
359 classic_shift_luma_table_size * 8);
360 if (read_len_table(s->len[0], &gb) < 0)
363 init_get_bits(&gb, classic_shift_chroma,
364 classic_shift_chroma_table_size * 8);
365 if (read_len_table(s->len[1], &gb) < 0)
368 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
369 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
371 if (s->bitstream_bpp >= 24) {
372 memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
373 memcpy(s->len[1] , s->len [0], 256 * sizeof(uint8_t));
375 memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
376 memcpy(s->len[2] , s->len [1], 256 * sizeof(uint8_t));
378 for (i = 0; i < 3; i++) {
379 ff_free_vlc(&s->vlc[i]);
380 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
381 s->bits[i], 4, 4, 0);
384 generate_joint_tables(s);
389 static av_cold int alloc_temp(HYuvContext *s)
393 if (s->bitstream_bpp<24) {
394 for (i=0; i<3; i++) {
395 s->temp[i]= av_malloc(s->width + 16);
397 return AVERROR(ENOMEM);
400 s->temp[0]= av_mallocz(4*s->width + 16);
402 return AVERROR(ENOMEM);
408 static av_cold int common_init(AVCodecContext *avctx)
410 HYuvContext *s = avctx->priv_data;
413 s->flags = avctx->flags;
415 ff_dsputil_init(&s->dsp, avctx);
417 s->width = avctx->width;
418 s->height = avctx->height;
419 av_assert1(s->width > 0 && s->height > 0);
424 static av_cold int common_end(HYuvContext *s)
428 for(i = 0; i < 3; i++) {
429 av_freep(&s->temp[i]);
434 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
435 static av_cold int decode_init(AVCodecContext *avctx)
437 HYuvContext *s = avctx->priv_data;
440 memset(s->vlc, 0, 3 * sizeof(VLC));
442 avctx->coded_frame = &s->picture;
443 avcodec_get_frame_defaults(&s->picture);
444 s->interlaced = s->height > 288;
448 if (avctx->extradata_size) {
449 if ((avctx->bits_per_coded_sample & 7) &&
450 avctx->bits_per_coded_sample != 12)
451 s->version = 1; // do such files exist at all?
457 if (s->version == 2) {
458 int method, interlace;
460 if (avctx->extradata_size < 4)
463 method = ((uint8_t*)avctx->extradata)[0];
464 s->decorrelate = method & 64 ? 1 : 0;
465 s->predictor = method & 63;
466 s->bitstream_bpp = ((uint8_t*)avctx->extradata)[1];
467 if (s->bitstream_bpp == 0)
468 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
469 interlace = (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
470 s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
471 s->context = ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
473 if ( read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
474 avctx->extradata_size - 4) < 0)
475 return AVERROR_INVALIDDATA;
477 switch (avctx->bits_per_coded_sample & 7) {
487 s->predictor = PLANE;
488 s->decorrelate = avctx->bits_per_coded_sample >= 24;
491 s->predictor = MEDIAN;
495 s->predictor = LEFT; //OLD
499 s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
502 if (read_old_huffman_tables(s) < 0)
503 return AVERROR_INVALIDDATA;
506 switch (s->bitstream_bpp) {
508 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
512 avctx->pix_fmt = AV_PIX_FMT_YUYV422;
514 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
520 avctx->pix_fmt = AV_PIX_FMT_RGB32;
522 avctx->pix_fmt = AV_PIX_FMT_BGR24;
526 return AVERROR_INVALIDDATA;
529 if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUV420P) && avctx->width & 1) {
530 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
531 return AVERROR_INVALIDDATA;
533 if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P && avctx->width%4) {
534 av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 this colorspace and predictor\n");
535 return AVERROR_INVALIDDATA;
539 return AVERROR(ENOMEM);
545 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
547 HYuvContext *s = avctx->priv_data;
550 avctx->coded_frame= &s->picture;
553 return AVERROR(ENOMEM);
556 for (i = 0; i < 6; i++)
557 s->vlc[i].table = NULL;
559 if (s->version == 2) {
560 if (read_huffman_tables(s, ((uint8_t*)avctx->extradata) + 4,
561 avctx->extradata_size) < 0)
562 return AVERROR_INVALIDDATA;
564 if (read_old_huffman_tables(s) < 0)
565 return AVERROR_INVALIDDATA;
570 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
572 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
573 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
578 for (i = 0; i < 256;) {
582 for (; i < 256 && len[i] == val && repeat < 255; i++)
585 av_assert0(val < 32 && val >0 && repeat<256 && repeat>0);
588 buf[index++] = repeat;
590 buf[index++] = val | (repeat << 5);
597 static av_cold int encode_init(AVCodecContext *avctx)
599 HYuvContext *s = avctx->priv_data;
604 avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
605 avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
606 if (!avctx->extradata || !avctx->stats_out) {
607 av_freep(&avctx->stats_out);
608 return AVERROR(ENOMEM);
612 avctx->coded_frame = &s->picture;
614 switch (avctx->pix_fmt) {
615 case AV_PIX_FMT_YUV420P:
616 case AV_PIX_FMT_YUV422P:
618 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
619 return AVERROR(EINVAL);
621 s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
623 case AV_PIX_FMT_RGB32:
624 s->bitstream_bpp = 32;
626 case AV_PIX_FMT_RGB24:
627 s->bitstream_bpp = 24;
630 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
631 return AVERROR(EINVAL);
633 avctx->bits_per_coded_sample = s->bitstream_bpp;
634 s->decorrelate = s->bitstream_bpp >= 24;
635 s->predictor = avctx->prediction_method;
636 s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
637 if (avctx->context_model == 1) {
638 s->context = avctx->context_model;
639 if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) {
640 av_log(avctx, AV_LOG_ERROR,
641 "context=1 is not compatible with "
642 "2 pass huffyuv encoding\n");
643 return AVERROR(EINVAL);
647 if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
648 if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
649 av_log(avctx, AV_LOG_ERROR,
650 "Error: YV12 is not supported by huffyuv; use "
651 "vcodec=ffvhuff or format=422p\n");
652 return AVERROR(EINVAL);
654 if (avctx->context_model) {
655 av_log(avctx, AV_LOG_ERROR,
656 "Error: per-frame huffman tables are not supported "
657 "by huffyuv; use vcodec=ffvhuff\n");
658 return AVERROR(EINVAL);
660 if (s->interlaced != ( s->height > 288 ))
661 av_log(avctx, AV_LOG_INFO,
662 "using huffyuv 2.2.0 or newer interlacing flag\n");
665 if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
666 av_log(avctx, AV_LOG_ERROR,
667 "Error: RGB is incompatible with median predictor\n");
668 return AVERROR(EINVAL);
671 ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
672 ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
673 ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
675 ((uint8_t*)avctx->extradata)[2] |= 0x40;
676 ((uint8_t*)avctx->extradata)[3] = 0;
677 s->avctx->extradata_size = 4;
679 if (avctx->stats_in) {
680 char *p = avctx->stats_in;
682 for (i = 0; i < 3; i++)
683 for (j = 0; j < 256; j++)
687 for (i = 0; i < 3; i++) {
690 for (j = 0; j < 256; j++) {
691 s->stats[i][j] += strtol(p, &next, 0);
692 if (next == p) return -1;
696 if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
699 for (i = 0; i < 3; i++)
700 for (j = 0; j < 256; j++) {
701 int d = FFMIN(j, 256 - j);
703 s->stats[i][j] = 100000000 / (d + 1);
707 for (i = 0; i < 3; i++) {
708 ff_huff_gen_len_table(s->len[i], s->stats[i]);
710 if (generate_bits_table(s->bits[i], s->len[i]) < 0) {
714 s->avctx->extradata_size +=
715 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
719 for (i = 0; i < 3; i++) {
720 int pels = s->width * s->height / (i ? 40 : 10);
721 for (j = 0; j < 256; j++) {
722 int d = FFMIN(j, 256 - j);
723 s->stats[i][j] = pels/(d + 1);
727 for (i = 0; i < 3; i++)
728 for (j = 0; j < 256; j++)
734 return AVERROR(ENOMEM);
741 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
743 /* TODO instead of restarting the read when the code isn't in the first level
744 * of the joint table, jump into the 2nd level of the individual table. */
745 #define READ_2PIX(dst0, dst1, plane1){\
746 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
751 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
752 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
756 static void decode_422_bitstream(HYuvContext *s, int count)
762 if (count >= (get_bits_left(&s->gb)) / (31 * 4)) {
763 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
764 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
765 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
768 for (i = 0; i < count; i++) {
769 READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
770 READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
775 static void decode_gray_bitstream(HYuvContext *s, int count)
781 if (count >= (get_bits_left(&s->gb)) / (31 * 2)) {
782 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
783 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
786 for(i=0; i<count; i++){
787 READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
792 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
793 static int encode_422_bitstream(HYuvContext *s, int offset, int count)
796 const uint8_t *y = s->temp[0] + offset;
797 const uint8_t *u = s->temp[1] + offset / 2;
798 const uint8_t *v = s->temp[2] + offset / 2;
800 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
801 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
807 int y1 = y[2 * i + 1];\
813 if (s->flags & CODEC_FLAG_PASS1) {
814 for(i = 0; i < count; i++) {
822 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
825 for (i = 0; i < count; i++) {
828 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
830 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
832 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
834 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
837 for(i = 0; i < count; i++) {
839 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
840 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
841 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
842 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
848 static int encode_gray_bitstream(HYuvContext *s, int count)
852 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
853 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
858 int y0 = s->temp[0][2 * i];\
859 int y1 = s->temp[0][2 * i + 1];
864 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
865 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
869 if (s->flags & CODEC_FLAG_PASS1) {
870 for (i = 0; i < count; i++) {
875 if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
879 for (i = 0; i < count; i++) {
885 for (i = 0; i < count; i++) {
892 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
894 static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
895 int decorrelate, int alpha)
898 for (i = 0; i < count; i++) {
899 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
901 *(uint32_t*)&s->temp[0][4 * i] = s->pix_bgr_map[code];
902 } else if(decorrelate) {
903 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
904 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) +
905 s->temp[0][4 * i + G];
906 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) +
907 s->temp[0][4 * i + G];
909 s->temp[0][4 * i + B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
910 s->temp[0][4 * i + G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
911 s->temp[0][4 * i + R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
914 s->temp[0][4 * i + A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
918 static void decode_bgr_bitstream(HYuvContext *s, int count)
920 if (s->decorrelate) {
921 if (s->bitstream_bpp==24)
922 decode_bgr_1(s, count, 1, 0);
924 decode_bgr_1(s, count, 1, 1);
926 if (s->bitstream_bpp==24)
927 decode_bgr_1(s, count, 0, 0);
929 decode_bgr_1(s, count, 0, 1);
933 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
937 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count) {
938 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
943 int g = s->temp[0][planes==3 ? 3*i + 1 : 4*i + G];\
944 int b = (s->temp[0][planes==3 ? 3*i + 2 : 4*i + B] - g) & 0xff;\
945 int r = (s->temp[0][planes==3 ? 3*i + 0 : 4*i + R] - g) & 0xff;\
946 int a = s->temp[0][planes*i + A];
951 if(planes==4) s->stats[2][a]++;
953 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
954 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
955 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
956 if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
958 if ((s->flags & CODEC_FLAG_PASS1) &&
959 (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
960 for (i = 0; i < count; i++) {
964 } else if (s->context || (s->flags & CODEC_FLAG_PASS1)) {
965 for (i = 0; i < count; i++) {
971 for (i = 0; i < count; i++) {
979 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
980 static void draw_slice(HYuvContext *s, int y)
983 int offset[AV_NUM_DATA_POINTERS];
985 if (s->avctx->draw_horiz_band==NULL)
988 h = y - s->last_slice_end;
991 if (s->bitstream_bpp == 12) {
997 offset[0] = s->picture.linesize[0]*y;
998 offset[1] = s->picture.linesize[1]*cy;
999 offset[2] = s->picture.linesize[2]*cy;
1000 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
1004 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
1006 s->last_slice_end = y + h;
1009 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
1012 const uint8_t *buf = avpkt->data;
1013 int buf_size = avpkt->size;
1014 HYuvContext *s = avctx->priv_data;
1015 const int width = s->width;
1016 const int width2 = s->width>>1;
1017 const int height = s->height;
1018 int fake_ystride, fake_ustride, fake_vstride;
1019 AVFrame * const p = &s->picture;
1020 int table_size = 0, ret;
1022 AVFrame *picture = data;
1024 av_fast_padded_malloc(&s->bitstream_buffer,
1025 &s->bitstream_buffer_size,
1027 if (!s->bitstream_buffer)
1028 return AVERROR(ENOMEM);
1030 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer,
1031 (const uint32_t*)buf, buf_size / 4);
1034 ff_thread_release_buffer(avctx, p);
1037 if ((ret = ff_thread_get_buffer(avctx, p)) < 0) {
1038 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1043 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1045 return AVERROR_INVALIDDATA;
1048 if ((unsigned)(buf_size-table_size) >= INT_MAX / 8)
1049 return AVERROR_INVALIDDATA;
1051 init_get_bits(&s->gb, s->bitstream_buffer+table_size,
1052 (buf_size-table_size) * 8);
1054 fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
1055 fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
1056 fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
1058 s->last_slice_end = 0;
1060 if (s->bitstream_bpp < 24) {
1062 int lefty, leftu, leftv;
1063 int lefttopy, lefttopu, lefttopv;
1066 p->data[0][3] = get_bits(&s->gb, 8);
1067 p->data[0][2] = get_bits(&s->gb, 8);
1068 p->data[0][1] = get_bits(&s->gb, 8);
1069 p->data[0][0] = get_bits(&s->gb, 8);
1071 av_log(avctx, AV_LOG_ERROR,
1072 "YUY2 output is not implemented yet\n");
1073 return AVERROR_PATCHWELCOME;
1076 leftv = p->data[2][0] = get_bits(&s->gb, 8);
1077 lefty = p->data[0][1] = get_bits(&s->gb, 8);
1078 leftu = p->data[1][0] = get_bits(&s->gb, 8);
1079 p->data[0][0] = get_bits(&s->gb, 8);
1081 switch (s->predictor) {
1084 decode_422_bitstream(s, width-2);
1085 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1086 if (!(s->flags&CODEC_FLAG_GRAY)) {
1087 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1088 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1091 for (cy = y = 1; y < s->height; y++, cy++) {
1092 uint8_t *ydst, *udst, *vdst;
1094 if (s->bitstream_bpp == 12) {
1095 decode_gray_bitstream(s, width);
1097 ydst = p->data[0] + p->linesize[0] * y;
1099 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1100 if (s->predictor == PLANE) {
1101 if (y > s->interlaced)
1102 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1105 if (y >= s->height) break;
1110 ydst = p->data[0] + p->linesize[0]*y;
1111 udst = p->data[1] + p->linesize[1]*cy;
1112 vdst = p->data[2] + p->linesize[2]*cy;
1114 decode_422_bitstream(s, width);
1115 lefty = s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1116 if (!(s->flags & CODEC_FLAG_GRAY)) {
1117 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1118 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1120 if (s->predictor == PLANE) {
1121 if (cy > s->interlaced) {
1122 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1123 if (!(s->flags & CODEC_FLAG_GRAY)) {
1124 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1125 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1130 draw_slice(s, height);
1134 /* first line except first 2 pixels is left predicted */
1135 decode_422_bitstream(s, width - 2);
1136 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width - 2, lefty);
1137 if (!(s->flags & CODEC_FLAG_GRAY)) {
1138 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1139 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1144 /* second line is left predicted for interlaced case */
1145 if (s->interlaced) {
1146 decode_422_bitstream(s, width);
1147 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1148 if (!(s->flags & CODEC_FLAG_GRAY)) {
1149 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1150 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1155 /* next 4 pixels are left predicted too */
1156 decode_422_bitstream(s, 4);
1157 lefty = s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1158 if (!(s->flags&CODEC_FLAG_GRAY)) {
1159 leftu = s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1160 leftv = s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1163 /* next line except the first 4 pixels is median predicted */
1164 lefttopy = p->data[0][3];
1165 decode_422_bitstream(s, width - 4);
1166 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1167 if (!(s->flags&CODEC_FLAG_GRAY)) {
1168 lefttopu = p->data[1][1];
1169 lefttopv = p->data[2][1];
1170 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
1171 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
1175 for (; y<height; y++, cy++) {
1176 uint8_t *ydst, *udst, *vdst;
1178 if (s->bitstream_bpp == 12) {
1179 while (2 * cy > y) {
1180 decode_gray_bitstream(s, width);
1181 ydst = p->data[0] + p->linesize[0] * y;
1182 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1185 if (y >= height) break;
1189 decode_422_bitstream(s, width);
1191 ydst = p->data[0] + p->linesize[0] * y;
1192 udst = p->data[1] + p->linesize[1] * cy;
1193 vdst = p->data[2] + p->linesize[2] * cy;
1195 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1196 if (!(s->flags & CODEC_FLAG_GRAY)) {
1197 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1198 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1202 draw_slice(s, height);
1208 int leftr, leftg, leftb, lefta;
1209 const int last_line = (height - 1) * p->linesize[0];
1211 if (s->bitstream_bpp == 32) {
1212 lefta = p->data[0][last_line+A] = get_bits(&s->gb, 8);
1213 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
1214 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
1215 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
1217 leftr = p->data[0][last_line+R] = get_bits(&s->gb, 8);
1218 leftg = p->data[0][last_line+G] = get_bits(&s->gb, 8);
1219 leftb = p->data[0][last_line+B] = get_bits(&s->gb, 8);
1220 lefta = p->data[0][last_line+A] = 255;
1221 skip_bits(&s->gb, 8);
1225 switch (s->predictor) {
1228 decode_bgr_bitstream(s, width - 1);
1229 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width - 1, &leftr, &leftg, &leftb, &lefta);
1231 for (y = s->height - 2; y >= 0; y--) { //Yes it is stored upside down.
1232 decode_bgr_bitstream(s, width);
1234 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1235 if (s->predictor == PLANE) {
1236 if (s->bitstream_bpp != 32) lefta = 0;
1237 if ((y & s->interlaced) == 0 &&
1238 y < s->height - 1 - s->interlaced) {
1239 s->dsp.add_bytes(p->data[0] + p->linesize[0] * y,
1240 p->data[0] + p->linesize[0] * y +
1241 fake_ystride, fake_ystride);
1245 // just 1 large slice as this is not possible in reverse order
1246 draw_slice(s, height);
1249 av_log(avctx, AV_LOG_ERROR,
1250 "prediction type not supported!\n");
1253 av_log(avctx, AV_LOG_ERROR,
1254 "BGR24 output is not implemented yet\n");
1255 return AVERROR_PATCHWELCOME;
1263 return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
1265 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1267 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1268 static av_cold int decode_end(AVCodecContext *avctx)
1270 HYuvContext *s = avctx->priv_data;
1273 if (s->picture.data[0])
1274 avctx->release_buffer(avctx, &s->picture);
1277 av_freep(&s->bitstream_buffer);
1279 for (i = 0; i < 6; i++) {
1280 ff_free_vlc(&s->vlc[i]);
1285 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1287 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1288 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1289 const AVFrame *pict, int *got_packet)
1291 HYuvContext *s = avctx->priv_data;
1292 const int width = s->width;
1293 const int width2 = s->width>>1;
1294 const int height = s->height;
1295 const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1296 const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1297 const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1298 AVFrame * const p = &s->picture;
1299 int i, j, size = 0, ret;
1301 if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0)
1305 p->pict_type = AV_PICTURE_TYPE_I;
1309 for (i = 0; i < 3; i++) {
1310 ff_huff_gen_len_table(s->len[i], s->stats[i]);
1311 if (generate_bits_table(s->bits[i], s->len[i]) < 0)
1313 size += store_table(s, s->len[i], &pkt->data[size]);
1316 for (i = 0; i < 3; i++)
1317 for (j = 0; j < 256; j++)
1318 s->stats[i][j] >>= 1;
1321 init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
1323 if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
1324 avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
1325 int lefty, leftu, leftv, y, cy;
1327 put_bits(&s->pb, 8, leftv = p->data[2][0]);
1328 put_bits(&s->pb, 8, lefty = p->data[0][1]);
1329 put_bits(&s->pb, 8, leftu = p->data[1][0]);
1330 put_bits(&s->pb, 8, p->data[0][0]);
1332 lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1333 leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1334 leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1336 encode_422_bitstream(s, 2, width-2);
1338 if (s->predictor==MEDIAN) {
1339 int lefttopy, lefttopu, lefttopv;
1341 if (s->interlaced) {
1342 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
1343 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
1344 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
1346 encode_422_bitstream(s, 0, width);
1350 lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
1351 leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
1352 leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
1354 encode_422_bitstream(s, 0, 4);
1356 lefttopy = p->data[0][3];
1357 lefttopu = p->data[1][1];
1358 lefttopv = p->data[2][1];
1359 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride + 4, width - 4 , &lefty, &lefttopy);
1360 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
1361 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
1362 encode_422_bitstream(s, 0, width - 4);
1365 for (; y < height; y++,cy++) {
1366 uint8_t *ydst, *udst, *vdst;
1368 if (s->bitstream_bpp == 12) {
1369 while (2 * cy > y) {
1370 ydst = p->data[0] + p->linesize[0] * y;
1371 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1372 encode_gray_bitstream(s, width);
1375 if (y >= height) break;
1377 ydst = p->data[0] + p->linesize[0] * y;
1378 udst = p->data[1] + p->linesize[1] * cy;
1379 vdst = p->data[2] + p->linesize[2] * cy;
1381 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1382 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1383 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1385 encode_422_bitstream(s, 0, width);
1388 for (cy = y = 1; y < height; y++, cy++) {
1389 uint8_t *ydst, *udst, *vdst;
1391 /* encode a luma only line & y++ */
1392 if (s->bitstream_bpp == 12) {
1393 ydst = p->data[0] + p->linesize[0] * y;
1395 if (s->predictor == PLANE && s->interlaced < y) {
1396 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1398 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1400 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1402 encode_gray_bitstream(s, width);
1404 if (y >= height) break;
1407 ydst = p->data[0] + p->linesize[0] * y;
1408 udst = p->data[1] + p->linesize[1] * cy;
1409 vdst = p->data[2] + p->linesize[2] * cy;
1411 if (s->predictor == PLANE && s->interlaced < cy) {
1412 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1413 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1414 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1416 lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1417 leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1418 leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1420 lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1421 leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1422 leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1425 encode_422_bitstream(s, 0, width);
1428 } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
1429 uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
1430 const int stride = -p->linesize[0];
1431 const int fake_stride = -fake_ystride;
1433 int leftr, leftg, leftb, lefta;
1435 put_bits(&s->pb, 8, lefta = data[A]);
1436 put_bits(&s->pb, 8, leftr = data[R]);
1437 put_bits(&s->pb, 8, leftg = data[G]);
1438 put_bits(&s->pb, 8, leftb = data[B]);
1440 sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb, &lefta);
1441 encode_bgra_bitstream(s, width - 1, 4);
1443 for (y = 1; y < s->height; y++) {
1444 uint8_t *dst = data + y*stride;
1445 if (s->predictor == PLANE && s->interlaced < y) {
1446 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
1447 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
1449 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
1451 encode_bgra_bitstream(s, width, 4);
1453 }else if(avctx->pix_fmt == AV_PIX_FMT_RGB24){
1454 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1455 const int stride = -p->linesize[0];
1456 const int fake_stride = -fake_ystride;
1458 int leftr, leftg, leftb;
1460 put_bits(&s->pb, 8, leftr= data[0]);
1461 put_bits(&s->pb, 8, leftg= data[1]);
1462 put_bits(&s->pb, 8, leftb= data[2]);
1463 put_bits(&s->pb, 8, 0);
1465 sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
1466 encode_bgra_bitstream(s, width-1, 3);
1468 for(y=1; y<s->height; y++){
1469 uint8_t *dst = data + y*stride;
1470 if(s->predictor == PLANE && s->interlaced < y){
1471 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
1472 sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1474 sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1476 encode_bgra_bitstream(s, width, 3);
1479 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1483 size += (put_bits_count(&s->pb) + 31) / 8;
1484 put_bits(&s->pb, 16, 0);
1485 put_bits(&s->pb, 15, 0);
1488 if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
1490 char *p = avctx->stats_out;
1491 char *end = p + 1024*30;
1492 for (i = 0; i < 3; i++) {
1493 for (j = 0; j < 256; j++) {
1494 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1498 snprintf(p, end-p, "\n");
1502 avctx->stats_out[0] = '\0';
1503 if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
1504 flush_put_bits(&s->pb);
1505 s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
1508 s->picture_number++;
1510 pkt->size = size * 4;
1511 pkt->flags |= AV_PKT_FLAG_KEY;
1517 static av_cold int encode_end(AVCodecContext *avctx)
1519 HYuvContext *s = avctx->priv_data;
1523 av_freep(&avctx->extradata);
1524 av_freep(&avctx->stats_out);
1528 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1530 #if CONFIG_HUFFYUV_DECODER
1531 AVCodec ff_huffyuv_decoder = {
1533 .type = AVMEDIA_TYPE_VIDEO,
1534 .id = AV_CODEC_ID_HUFFYUV,
1535 .priv_data_size = sizeof(HYuvContext),
1536 .init = decode_init,
1537 .close = decode_end,
1538 .decode = decode_frame,
1539 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
1540 CODEC_CAP_FRAME_THREADS,
1541 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1542 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1546 #if CONFIG_FFVHUFF_DECODER
1547 AVCodec ff_ffvhuff_decoder = {
1549 .type = AVMEDIA_TYPE_VIDEO,
1550 .id = AV_CODEC_ID_FFVHUFF,
1551 .priv_data_size = sizeof(HYuvContext),
1552 .init = decode_init,
1553 .close = decode_end,
1554 .decode = decode_frame,
1555 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
1556 CODEC_CAP_FRAME_THREADS,
1557 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1558 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1562 #if CONFIG_HUFFYUV_ENCODER
1563 AVCodec ff_huffyuv_encoder = {
1565 .type = AVMEDIA_TYPE_VIDEO,
1566 .id = AV_CODEC_ID_HUFFYUV,
1567 .priv_data_size = sizeof(HYuvContext),
1568 .init = encode_init,
1569 .encode2 = encode_frame,
1570 .close = encode_end,
1571 .pix_fmts = (const enum AVPixelFormat[]){
1572 AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1574 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1578 #if CONFIG_FFVHUFF_ENCODER
1579 AVCodec ff_ffvhuff_encoder = {
1581 .type = AVMEDIA_TYPE_VIDEO,
1582 .id = AV_CODEC_ID_FFVHUFF,
1583 .priv_data_size = sizeof(HYuvContext),
1584 .init = encode_init,
1585 .encode2 = encode_frame,
1586 .close = encode_end,
1587 .pix_fmts = (const enum AVPixelFormat[]){
1588 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
1590 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),