2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
51 typedef enum Predictor{
57 typedef struct HYuvContext{
58 AVCodecContext *avctx;
66 int yuy2; //use yuy2 instead of 422P
67 int bgr32; //use bgr32 instead of bgr24
74 uint64_t stats[3][256];
76 uint32_t bits[3][256];
77 uint32_t pix_bgr_map[1<<VLC_BITS];
78 VLC vlc[6]; //Y,U,V,YY,YU,YV
80 uint8_t *bitstream_buffer;
81 unsigned int bitstream_buffer_size;
85 static const unsigned char classic_shift_luma[] = {
86 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
87 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
91 static const unsigned char classic_shift_chroma[] = {
92 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
93 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
94 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
97 static const unsigned char classic_add_luma[256] = {
98 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
99 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
100 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
101 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
102 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
103 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
104 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
105 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
106 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
107 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
108 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
109 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
110 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
111 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
112 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
113 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
116 static const unsigned char classic_add_chroma[256] = {
117 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
118 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
119 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
120 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
121 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
122 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
123 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
124 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
125 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
126 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
127 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
128 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
129 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
130 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
131 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
132 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
135 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
139 const int temp= src[i];
146 const int temp= src[i];
150 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
155 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
161 for(i=0; i<FFMIN(w,4); i++){
162 const int rt= src[i*4+R];
163 const int gt= src[i*4+G];
164 const int bt= src[i*4+B];
172 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
173 *red= src[(w-1)*4+R];
174 *green= src[(w-1)*4+G];
175 *blue= src[(w-1)*4+B];
178 static int read_len_table(uint8_t *dst, GetBitContext *gb){
182 repeat= get_bits(gb, 3);
183 val = get_bits(gb, 5);
185 repeat= get_bits(gb, 8);
186 //printf("%d %d\n", val, repeat);
188 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
197 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
201 for(len=32; len>0; len--){
202 for(index=0; index<256; index++){
203 if(len_table[index]==len)
207 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
215 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
221 static void heap_sift(HeapElem *h, int root, int size)
223 while(root*2+1 < size) {
224 int child = root*2+1;
225 if(child < size-1 && h[child].val > h[child+1].val)
227 if(h[root].val > h[child].val) {
228 FFSWAP(HeapElem, h[root], h[child]);
235 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
242 for(offset=1; ; offset<<=1){
243 for(i=0; i<size; i++){
245 h[i].val = (stats[i] << 8) + offset;
247 for(i=size/2-1; i>=0; i--)
248 heap_sift(h, i, size);
250 for(next=size; next<size*2-1; next++){
251 // merge the two smallest entries, and put it back in the heap
252 uint64_t min1v = h[0].val;
253 up[h[0].name] = next;
254 h[0].val = INT64_MAX;
255 heap_sift(h, 0, size);
256 up[h[0].name] = next;
259 heap_sift(h, 0, size);
263 for(i=2*size-3; i>=size; i--)
264 len[i] = len[up[i]] + 1;
265 for(i=0; i<size; i++) {
266 dst[i] = len[up[i]] + 1;
267 if(dst[i] >= 32) break;
272 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
274 static void generate_joint_tables(HYuvContext *s){
275 uint16_t symbols[1<<VLC_BITS];
276 uint16_t bits[1<<VLC_BITS];
277 uint8_t len[1<<VLC_BITS];
278 if(s->bitstream_bpp < 24){
281 for(i=y=0; y<256; y++){
282 int len0 = s->len[0][y];
283 int limit = VLC_BITS - len0;
286 for(u=0; u<256; u++){
287 int len1 = s->len[p][u];
290 len[i] = len0 + len1;
291 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
292 symbols[i] = (y<<8) + u;
293 if(symbols[i] != 0xffff) // reserved to mean "invalid"
297 free_vlc(&s->vlc[3+p]);
298 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
301 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
302 int i, b, g, r, code;
303 int p0 = s->decorrelate;
304 int p1 = !s->decorrelate;
305 // restrict the range to +/-16 becaues that's pretty much guaranteed to
306 // cover all the combinations that fit in 11 bits total, and it doesn't
307 // matter if we miss a few rare codes.
308 for(i=0, g=-16; g<16; g++){
309 int len0 = s->len[p0][g&255];
310 int limit0 = VLC_BITS - len0;
313 for(b=-16; b<16; b++){
314 int len1 = s->len[p1][b&255];
315 int limit1 = limit0 - len1;
318 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
319 for(r=-16; r<16; r++){
320 int len2 = s->len[2][r&255];
323 len[i] = len0 + len1 + len2;
324 bits[i] = (code << len2) + s->bits[2][r&255];
338 free_vlc(&s->vlc[3]);
339 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
343 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
347 init_get_bits(&gb, src, length*8);
350 if(read_len_table(s->len[i], &gb)<0)
352 if(generate_bits_table(s->bits[i], s->len[i])<0){
356 for(j=0; j<256; j++){
357 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
360 free_vlc(&s->vlc[i]);
361 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
364 generate_joint_tables(s);
366 return (get_bits_count(&gb)+7)/8;
369 static int read_old_huffman_tables(HYuvContext *s){
374 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
375 if(read_len_table(s->len[0], &gb)<0)
377 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
378 if(read_len_table(s->len[1], &gb)<0)
381 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
382 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
384 if(s->bitstream_bpp >= 24){
385 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
386 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
388 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
389 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
392 free_vlc(&s->vlc[i]);
393 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
396 generate_joint_tables(s);
400 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
405 static av_cold void alloc_temp(HYuvContext *s){
408 if(s->bitstream_bpp<24){
410 s->temp[i]= av_malloc(s->width + 16);
413 s->temp[0]= av_mallocz(4*s->width + 16);
417 static av_cold int common_init(AVCodecContext *avctx){
418 HYuvContext *s = avctx->priv_data;
421 s->flags= avctx->flags;
423 dsputil_init(&s->dsp, avctx);
425 s->width= avctx->width;
426 s->height= avctx->height;
427 assert(s->width>0 && s->height>0);
432 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
433 static av_cold int decode_init(AVCodecContext *avctx)
435 HYuvContext *s = avctx->priv_data;
438 memset(s->vlc, 0, 3*sizeof(VLC));
440 avctx->coded_frame= &s->picture;
441 s->interlaced= s->height > 288;
444 //if(avctx->extradata)
445 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
446 if(avctx->extradata_size){
447 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
448 s->version=1; // do such files exist at all?
455 int method, interlace;
457 if (avctx->extradata_size < 4)
460 method= ((uint8_t*)avctx->extradata)[0];
461 s->decorrelate= method&64 ? 1 : 0;
462 s->predictor= method&63;
463 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
464 if(s->bitstream_bpp==0)
465 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
466 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
467 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
468 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
470 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
473 switch(avctx->bits_per_coded_sample&7){
484 s->decorrelate= avctx->bits_per_coded_sample >= 24;
487 s->predictor= MEDIAN;
491 s->predictor= LEFT; //OLD
495 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
498 if(read_old_huffman_tables(s) < 0)
502 switch(s->bitstream_bpp){
504 avctx->pix_fmt = PIX_FMT_YUV420P;
508 avctx->pix_fmt = PIX_FMT_YUYV422;
510 avctx->pix_fmt = PIX_FMT_YUV422P;
516 avctx->pix_fmt = PIX_FMT_RGB32;
518 avctx->pix_fmt = PIX_FMT_BGR24;
527 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
532 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
534 HYuvContext *s = avctx->priv_data;
537 avctx->coded_frame= &s->picture;
540 for (i = 0; i < 6; i++)
541 s->vlc[i].table = NULL;
544 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
547 if(read_old_huffman_tables(s) < 0)
553 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
555 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
556 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
564 for(; i<256 && len[i]==val && repeat<255; i++)
567 assert(val < 32 && val >0 && repeat<256 && repeat>0);
570 buf[index++]= repeat;
572 buf[index++]= val | (repeat<<5);
579 static av_cold int encode_init(AVCodecContext *avctx)
581 HYuvContext *s = avctx->priv_data;
586 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
587 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
590 avctx->coded_frame= &s->picture;
592 switch(avctx->pix_fmt){
593 case PIX_FMT_YUV420P:
594 s->bitstream_bpp= 12;
596 case PIX_FMT_YUV422P:
597 s->bitstream_bpp= 16;
600 s->bitstream_bpp= 24;
603 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
606 avctx->bits_per_coded_sample= s->bitstream_bpp;
607 s->decorrelate= s->bitstream_bpp >= 24;
608 s->predictor= avctx->prediction_method;
609 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
610 if(avctx->context_model==1){
611 s->context= avctx->context_model;
612 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
613 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
618 if(avctx->codec->id==CODEC_ID_HUFFYUV){
619 if(avctx->pix_fmt==PIX_FMT_YUV420P){
620 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
623 if(avctx->context_model){
624 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
627 if(s->interlaced != ( s->height > 288 ))
628 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
631 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
632 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
636 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
637 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
638 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
640 ((uint8_t*)avctx->extradata)[2]|= 0x40;
641 ((uint8_t*)avctx->extradata)[3]= 0;
642 s->avctx->extradata_size= 4;
645 char *p= avctx->stats_in;
655 for(j=0; j<256; j++){
656 s->stats[i][j]+= strtol(p, &next, 0);
657 if(next==p) return -1;
661 if(p[0]==0 || p[1]==0 || p[2]==0) break;
665 for(j=0; j<256; j++){
666 int d= FFMIN(j, 256-j);
668 s->stats[i][j]= 100000000/(d+1);
673 generate_len_table(s->len[i], s->stats[i]);
675 if(generate_bits_table(s->bits[i], s->len[i])<0){
679 s->avctx->extradata_size+=
680 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
685 int pels = s->width*s->height / (i?40:10);
686 for(j=0; j<256; j++){
687 int d= FFMIN(j, 256-j);
688 s->stats[i][j]= pels/(d+1);
697 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
705 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
707 /* TODO instead of restarting the read when the code isn't in the first level
708 * of the joint table, jump into the 2nd level of the individual table. */
709 #define READ_2PIX(dst0, dst1, plane1){\
710 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
715 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
716 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
720 static void decode_422_bitstream(HYuvContext *s, int count){
725 if(count >= (get_bits_left(&s->gb))/(31*4)){
726 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
727 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
728 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
731 for(i=0; i<count; i++){
732 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
733 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
738 static void decode_gray_bitstream(HYuvContext *s, int count){
743 if(count >= (get_bits_left(&s->gb))/(31*2)){
744 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
745 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
748 for(i=0; i<count; i++){
749 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
754 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
755 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
757 const uint8_t *y = s->temp[0] + offset;
758 const uint8_t *u = s->temp[1] + offset/2;
759 const uint8_t *v = s->temp[2] + offset/2;
761 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
762 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
773 if(s->flags&CODEC_FLAG_PASS1){
774 for(i=0; i<count; i++){
782 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
785 for(i=0; i<count; i++){
788 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
790 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
792 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
794 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
797 for(i=0; i<count; i++){
799 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
800 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
801 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
802 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
808 static int encode_gray_bitstream(HYuvContext *s, int count){
811 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
812 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
817 int y0 = s->temp[0][2*i];\
818 int y1 = s->temp[0][2*i+1];
823 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
824 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
827 if(s->flags&CODEC_FLAG_PASS1){
828 for(i=0; i<count; i++){
833 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
837 for(i=0; i<count; i++){
843 for(i=0; i<count; i++){
850 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
852 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
854 for(i=0; i<count; i++){
855 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
857 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
858 }else if(decorrelate){
859 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
860 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
861 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
863 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
864 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
865 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
868 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
872 static void decode_bgr_bitstream(HYuvContext *s, int count){
874 if(s->bitstream_bpp==24)
875 decode_bgr_1(s, count, 1, 0);
877 decode_bgr_1(s, count, 1, 1);
879 if(s->bitstream_bpp==24)
880 decode_bgr_1(s, count, 0, 0);
882 decode_bgr_1(s, count, 0, 1);
886 static int encode_bgr_bitstream(HYuvContext *s, int count){
889 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
890 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
895 int g= s->temp[0][4*i+G];\
896 int b= (s->temp[0][4*i+B] - g) & 0xff;\
897 int r= (s->temp[0][4*i+R] - g) & 0xff;
903 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
904 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
905 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
907 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
908 for(i=0; i<count; i++){
912 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
913 for(i=0; i<count; i++){
919 for(i=0; i<count; i++){
927 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
928 static void draw_slice(HYuvContext *s, int y){
932 if(s->avctx->draw_horiz_band==NULL)
935 h= y - s->last_slice_end;
938 if(s->bitstream_bpp==12){
944 offset[0] = s->picture.linesize[0]*y;
945 offset[1] = s->picture.linesize[1]*cy;
946 offset[2] = s->picture.linesize[2]*cy;
950 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
952 s->last_slice_end= y + h;
955 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
956 const uint8_t *buf = avpkt->data;
957 int buf_size = avpkt->size;
958 HYuvContext *s = avctx->priv_data;
959 const int width= s->width;
960 const int width2= s->width>>1;
961 const int height= s->height;
962 int fake_ystride, fake_ustride, fake_vstride;
963 AVFrame * const p= &s->picture;
966 AVFrame *picture = data;
968 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
969 if (!s->bitstream_buffer)
970 return AVERROR(ENOMEM);
972 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
973 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
976 ff_thread_release_buffer(avctx, p);
979 if(ff_thread_get_buffer(avctx, p) < 0){
980 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
985 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
990 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
993 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
995 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
996 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
997 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
999 s->last_slice_end= 0;
1001 if(s->bitstream_bpp<24){
1003 int lefty, leftu, leftv;
1004 int lefttopy, lefttopu, lefttopv;
1007 p->data[0][3]= get_bits(&s->gb, 8);
1008 p->data[0][2]= get_bits(&s->gb, 8);
1009 p->data[0][1]= get_bits(&s->gb, 8);
1010 p->data[0][0]= get_bits(&s->gb, 8);
1012 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1016 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1017 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1018 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1019 p->data[0][0]= get_bits(&s->gb, 8);
1021 switch(s->predictor){
1024 decode_422_bitstream(s, width-2);
1025 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1026 if(!(s->flags&CODEC_FLAG_GRAY)){
1027 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1028 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1031 for(cy=y=1; y<s->height; y++,cy++){
1032 uint8_t *ydst, *udst, *vdst;
1034 if(s->bitstream_bpp==12){
1035 decode_gray_bitstream(s, width);
1037 ydst= p->data[0] + p->linesize[0]*y;
1039 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1040 if(s->predictor == PLANE){
1042 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1045 if(y>=s->height) break;
1050 ydst= p->data[0] + p->linesize[0]*y;
1051 udst= p->data[1] + p->linesize[1]*cy;
1052 vdst= p->data[2] + p->linesize[2]*cy;
1054 decode_422_bitstream(s, width);
1055 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1056 if(!(s->flags&CODEC_FLAG_GRAY)){
1057 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1058 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1060 if(s->predictor == PLANE){
1061 if(cy>s->interlaced){
1062 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1063 if(!(s->flags&CODEC_FLAG_GRAY)){
1064 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1065 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1070 draw_slice(s, height);
1074 /* first line except first 2 pixels is left predicted */
1075 decode_422_bitstream(s, width-2);
1076 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1077 if(!(s->flags&CODEC_FLAG_GRAY)){
1078 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1079 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1084 /* second line is left predicted for interlaced case */
1086 decode_422_bitstream(s, width);
1087 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1088 if(!(s->flags&CODEC_FLAG_GRAY)){
1089 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1090 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1095 /* next 4 pixels are left predicted too */
1096 decode_422_bitstream(s, 4);
1097 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1098 if(!(s->flags&CODEC_FLAG_GRAY)){
1099 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1100 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1103 /* next line except the first 4 pixels is median predicted */
1104 lefttopy= p->data[0][3];
1105 decode_422_bitstream(s, width-4);
1106 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1107 if(!(s->flags&CODEC_FLAG_GRAY)){
1108 lefttopu= p->data[1][1];
1109 lefttopv= p->data[2][1];
1110 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1111 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1115 for(; y<height; y++,cy++){
1116 uint8_t *ydst, *udst, *vdst;
1118 if(s->bitstream_bpp==12){
1120 decode_gray_bitstream(s, width);
1121 ydst= p->data[0] + p->linesize[0]*y;
1122 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1125 if(y>=height) break;
1129 decode_422_bitstream(s, width);
1131 ydst= p->data[0] + p->linesize[0]*y;
1132 udst= p->data[1] + p->linesize[1]*cy;
1133 vdst= p->data[2] + p->linesize[2]*cy;
1135 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1136 if(!(s->flags&CODEC_FLAG_GRAY)){
1137 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1138 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1142 draw_slice(s, height);
1148 int leftr, leftg, leftb, lefta;
1149 const int last_line= (height-1)*p->linesize[0];
1151 if(s->bitstream_bpp==32){
1152 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1153 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1154 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1155 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1157 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1158 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1159 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1160 lefta= p->data[0][last_line+A]= 255;
1161 skip_bits(&s->gb, 8);
1165 switch(s->predictor){
1168 decode_bgr_bitstream(s, width-1);
1169 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1171 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1172 decode_bgr_bitstream(s, width);
1174 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1175 if(s->predictor == PLANE){
1176 if(s->bitstream_bpp!=32) lefta=0;
1177 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1178 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1179 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1183 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1186 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1190 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1197 *data_size = sizeof(AVFrame);
1199 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1201 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1203 static int common_end(HYuvContext *s){
1207 av_freep(&s->temp[i]);
1212 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1213 static av_cold int decode_end(AVCodecContext *avctx)
1215 HYuvContext *s = avctx->priv_data;
1218 if (s->picture.data[0])
1219 avctx->release_buffer(avctx, &s->picture);
1222 av_freep(&s->bitstream_buffer);
1225 free_vlc(&s->vlc[i]);
1230 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1232 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1233 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1234 HYuvContext *s = avctx->priv_data;
1235 AVFrame *pict = data;
1236 const int width= s->width;
1237 const int width2= s->width>>1;
1238 const int height= s->height;
1239 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1240 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1241 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1242 AVFrame * const p= &s->picture;
1246 p->pict_type= FF_I_TYPE;
1251 generate_len_table(s->len[i], s->stats[i]);
1252 if(generate_bits_table(s->bits[i], s->len[i])<0)
1254 size+= store_table(s, s->len[i], &buf[size]);
1258 for(j=0; j<256; j++)
1259 s->stats[i][j] >>= 1;
1262 init_put_bits(&s->pb, buf+size, buf_size-size);
1264 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1265 int lefty, leftu, leftv, y, cy;
1267 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1268 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1269 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1270 put_bits(&s->pb, 8, p->data[0][0]);
1272 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1273 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1274 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1276 encode_422_bitstream(s, 2, width-2);
1278 if(s->predictor==MEDIAN){
1279 int lefttopy, lefttopu, lefttopv;
1282 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1283 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1284 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1286 encode_422_bitstream(s, 0, width);
1290 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1291 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1292 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1294 encode_422_bitstream(s, 0, 4);
1296 lefttopy= p->data[0][3];
1297 lefttopu= p->data[1][1];
1298 lefttopv= p->data[2][1];
1299 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1300 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1301 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1302 encode_422_bitstream(s, 0, width-4);
1305 for(; y<height; y++,cy++){
1306 uint8_t *ydst, *udst, *vdst;
1308 if(s->bitstream_bpp==12){
1310 ydst= p->data[0] + p->linesize[0]*y;
1311 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1312 encode_gray_bitstream(s, width);
1315 if(y>=height) break;
1317 ydst= p->data[0] + p->linesize[0]*y;
1318 udst= p->data[1] + p->linesize[1]*cy;
1319 vdst= p->data[2] + p->linesize[2]*cy;
1321 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1322 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1323 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1325 encode_422_bitstream(s, 0, width);
1328 for(cy=y=1; y<height; y++,cy++){
1329 uint8_t *ydst, *udst, *vdst;
1331 /* encode a luma only line & y++ */
1332 if(s->bitstream_bpp==12){
1333 ydst= p->data[0] + p->linesize[0]*y;
1335 if(s->predictor == PLANE && s->interlaced < y){
1336 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1338 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1340 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1342 encode_gray_bitstream(s, width);
1344 if(y>=height) break;
1347 ydst= p->data[0] + p->linesize[0]*y;
1348 udst= p->data[1] + p->linesize[1]*cy;
1349 vdst= p->data[2] + p->linesize[2]*cy;
1351 if(s->predictor == PLANE && s->interlaced < cy){
1352 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1353 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1354 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1356 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1357 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1358 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1360 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1361 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1362 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1365 encode_422_bitstream(s, 0, width);
1368 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1369 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1370 const int stride = -p->linesize[0];
1371 const int fake_stride = -fake_ystride;
1373 int leftr, leftg, leftb;
1375 put_bits(&s->pb, 8, leftr= data[R]);
1376 put_bits(&s->pb, 8, leftg= data[G]);
1377 put_bits(&s->pb, 8, leftb= data[B]);
1378 put_bits(&s->pb, 8, 0);
1380 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1381 encode_bgr_bitstream(s, width-1);
1383 for(y=1; y<s->height; y++){
1384 uint8_t *dst = data + y*stride;
1385 if(s->predictor == PLANE && s->interlaced < y){
1386 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1387 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1389 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1391 encode_bgr_bitstream(s, width);
1394 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1398 size+= (put_bits_count(&s->pb)+31)/8;
1399 put_bits(&s->pb, 16, 0);
1400 put_bits(&s->pb, 15, 0);
1403 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1405 char *p= avctx->stats_out;
1406 char *end= p + 1024*30;
1408 for(j=0; j<256; j++){
1409 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1413 snprintf(p, end-p, "\n");
1417 avctx->stats_out[0] = '\0';
1418 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1419 flush_put_bits(&s->pb);
1420 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1423 s->picture_number++;
1428 static av_cold int encode_end(AVCodecContext *avctx)
1430 HYuvContext *s = avctx->priv_data;
1434 av_freep(&avctx->extradata);
1435 av_freep(&avctx->stats_out);
1439 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1441 #if CONFIG_HUFFYUV_DECODER
1442 AVCodec ff_huffyuv_decoder = {
1446 sizeof(HYuvContext),
1451 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1453 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1454 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1458 #if CONFIG_FFVHUFF_DECODER
1459 AVCodec ff_ffvhuff_decoder = {
1463 sizeof(HYuvContext),
1468 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1470 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1471 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1475 #if CONFIG_HUFFYUV_ENCODER
1476 AVCodec ff_huffyuv_encoder = {
1480 sizeof(HYuvContext),
1484 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1485 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1489 #if CONFIG_FFVHUFF_ENCODER
1490 AVCodec ff_ffvhuff_encoder = {
1494 sizeof(HYuvContext),
1498 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1499 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),