2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
51 typedef enum Predictor{
57 typedef struct HYuvContext{
58 AVCodecContext *avctx;
66 int yuy2; //use yuy2 instead of 422P
67 int bgr32; //use bgr32 instead of bgr24
74 uint64_t stats[3][256];
76 uint32_t bits[3][256];
77 uint32_t pix_bgr_map[1<<VLC_BITS];
78 VLC vlc[6]; //Y,U,V,YY,YU,YV
80 uint8_t *bitstream_buffer;
81 unsigned int bitstream_buffer_size;
85 static const unsigned char classic_shift_luma[] = {
86 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
87 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
91 static const unsigned char classic_shift_chroma[] = {
92 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
93 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
94 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
97 static const unsigned char classic_add_luma[256] = {
98 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
99 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
100 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
101 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
102 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
103 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
104 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
105 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
106 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
107 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
108 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
109 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
110 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
111 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
112 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
113 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
116 static const unsigned char classic_add_chroma[256] = {
117 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
118 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
119 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
120 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
121 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
122 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
123 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
124 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
125 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
126 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
127 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
128 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
129 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
130 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
131 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
132 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
135 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
139 const int temp= src[i];
146 const int temp= src[i];
150 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
155 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
161 for(i=0; i<FFMIN(w,4); i++){
162 const int rt= src[i*4+R];
163 const int gt= src[i*4+G];
164 const int bt= src[i*4+B];
172 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
173 *red= src[(w-1)*4+R];
174 *green= src[(w-1)*4+G];
175 *blue= src[(w-1)*4+B];
178 static int read_len_table(uint8_t *dst, GetBitContext *gb){
182 repeat= get_bits(gb, 3);
183 val = get_bits(gb, 5);
185 repeat= get_bits(gb, 8);
186 //printf("%d %d\n", val, repeat);
188 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
197 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
201 for(len=32; len>0; len--){
202 for(index=0; index<256; index++){
203 if(len_table[index]==len)
207 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
215 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
221 static void heap_sift(HeapElem *h, int root, int size)
223 while(root*2+1 < size) {
224 int child = root*2+1;
225 if(child < size-1 && h[child].val > h[child+1].val)
227 if(h[root].val > h[child].val) {
228 FFSWAP(HeapElem, h[root], h[child]);
235 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
242 for(offset=1; ; offset<<=1){
243 for(i=0; i<size; i++){
245 h[i].val = (stats[i] << 8) + offset;
247 for(i=size/2-1; i>=0; i--)
248 heap_sift(h, i, size);
250 for(next=size; next<size*2-1; next++){
251 // merge the two smallest entries, and put it back in the heap
252 uint64_t min1v = h[0].val;
253 up[h[0].name] = next;
254 h[0].val = INT64_MAX;
255 heap_sift(h, 0, size);
256 up[h[0].name] = next;
259 heap_sift(h, 0, size);
263 for(i=2*size-3; i>=size; i--)
264 len[i] = len[up[i]] + 1;
265 for(i=0; i<size; i++) {
266 dst[i] = len[up[i]] + 1;
267 if(dst[i] >= 32) break;
272 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
274 static void generate_joint_tables(HYuvContext *s){
275 uint16_t symbols[1<<VLC_BITS];
276 uint16_t bits[1<<VLC_BITS];
277 uint8_t len[1<<VLC_BITS];
278 if(s->bitstream_bpp < 24){
281 for(i=y=0; y<256; y++){
282 int len0 = s->len[0][y];
283 int limit = VLC_BITS - len0;
286 for(u=0; u<256; u++){
287 int len1 = s->len[p][u];
290 len[i] = len0 + len1;
291 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
292 symbols[i] = (y<<8) + u;
293 if(symbols[i] != 0xffff) // reserved to mean "invalid"
297 free_vlc(&s->vlc[3+p]);
298 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
301 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
302 int i, b, g, r, code;
303 int p0 = s->decorrelate;
304 int p1 = !s->decorrelate;
305 // restrict the range to +/-16 becaues that's pretty much guaranteed to
306 // cover all the combinations that fit in 11 bits total, and it doesn't
307 // matter if we miss a few rare codes.
308 for(i=0, g=-16; g<16; g++){
309 int len0 = s->len[p0][g&255];
310 int limit0 = VLC_BITS - len0;
313 for(b=-16; b<16; b++){
314 int len1 = s->len[p1][b&255];
315 int limit1 = limit0 - len1;
318 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
319 for(r=-16; r<16; r++){
320 int len2 = s->len[2][r&255];
323 len[i] = len0 + len1 + len2;
324 bits[i] = (code << len2) + s->bits[2][r&255];
338 free_vlc(&s->vlc[3]);
339 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
343 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
347 init_get_bits(&gb, src, length*8);
350 if(read_len_table(s->len[i], &gb)<0)
352 if(generate_bits_table(s->bits[i], s->len[i])<0){
355 free_vlc(&s->vlc[i]);
356 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
359 generate_joint_tables(s);
361 return (get_bits_count(&gb)+7)/8;
364 static int read_old_huffman_tables(HYuvContext *s){
369 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
370 if(read_len_table(s->len[0], &gb)<0)
372 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
373 if(read_len_table(s->len[1], &gb)<0)
376 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
377 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
379 if(s->bitstream_bpp >= 24){
380 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
381 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
383 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
384 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
387 free_vlc(&s->vlc[i]);
388 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
391 generate_joint_tables(s);
395 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
400 static av_cold void alloc_temp(HYuvContext *s){
403 if(s->bitstream_bpp<24){
405 s->temp[i]= av_malloc(s->width + 16);
408 s->temp[0]= av_mallocz(4*s->width + 16);
412 static av_cold int common_init(AVCodecContext *avctx){
413 HYuvContext *s = avctx->priv_data;
416 s->flags= avctx->flags;
418 dsputil_init(&s->dsp, avctx);
420 s->width= avctx->width;
421 s->height= avctx->height;
422 assert(s->width>0 && s->height>0);
427 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
428 static av_cold int decode_init(AVCodecContext *avctx)
430 HYuvContext *s = avctx->priv_data;
433 memset(s->vlc, 0, 3*sizeof(VLC));
435 avctx->coded_frame= &s->picture;
436 avcodec_get_frame_defaults(&s->picture);
437 s->interlaced= s->height > 288;
440 //if(avctx->extradata)
441 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
442 if(avctx->extradata_size){
443 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
444 s->version=1; // do such files exist at all?
451 int method, interlace;
453 if (avctx->extradata_size < 4)
456 method= ((uint8_t*)avctx->extradata)[0];
457 s->decorrelate= method&64 ? 1 : 0;
458 s->predictor= method&63;
459 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
460 if(s->bitstream_bpp==0)
461 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
462 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
463 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
464 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
466 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
469 switch(avctx->bits_per_coded_sample&7){
480 s->decorrelate= avctx->bits_per_coded_sample >= 24;
483 s->predictor= MEDIAN;
487 s->predictor= LEFT; //OLD
491 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
494 if(read_old_huffman_tables(s) < 0)
498 switch(s->bitstream_bpp){
500 avctx->pix_fmt = PIX_FMT_YUV420P;
504 avctx->pix_fmt = PIX_FMT_YUYV422;
506 avctx->pix_fmt = PIX_FMT_YUV422P;
512 avctx->pix_fmt = PIX_FMT_RGB32;
514 avctx->pix_fmt = PIX_FMT_BGR24;
523 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
528 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
530 HYuvContext *s = avctx->priv_data;
533 avctx->coded_frame= &s->picture;
536 for (i = 0; i < 6; i++)
537 s->vlc[i].table = NULL;
540 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
543 if(read_old_huffman_tables(s) < 0)
549 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
551 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
552 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
560 for(; i<256 && len[i]==val && repeat<255; i++)
563 assert(val < 32 && val >0 && repeat<256 && repeat>0);
566 buf[index++]= repeat;
568 buf[index++]= val | (repeat<<5);
575 static av_cold int encode_init(AVCodecContext *avctx)
577 HYuvContext *s = avctx->priv_data;
582 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
583 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
586 avctx->coded_frame= &s->picture;
588 switch(avctx->pix_fmt){
589 case PIX_FMT_YUV420P:
590 s->bitstream_bpp= 12;
592 case PIX_FMT_YUV422P:
593 s->bitstream_bpp= 16;
596 s->bitstream_bpp= 24;
599 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
602 avctx->bits_per_coded_sample= s->bitstream_bpp;
603 s->decorrelate= s->bitstream_bpp >= 24;
604 s->predictor= avctx->prediction_method;
605 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
606 if(avctx->context_model==1){
607 s->context= avctx->context_model;
608 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
609 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
614 if(avctx->codec->id==CODEC_ID_HUFFYUV){
615 if(avctx->pix_fmt==PIX_FMT_YUV420P){
616 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
619 if(avctx->context_model){
620 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
623 if(s->interlaced != ( s->height > 288 ))
624 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
627 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
628 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
632 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
633 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
634 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
636 ((uint8_t*)avctx->extradata)[2]|= 0x40;
637 ((uint8_t*)avctx->extradata)[3]= 0;
638 s->avctx->extradata_size= 4;
641 char *p= avctx->stats_in;
651 for(j=0; j<256; j++){
652 s->stats[i][j]+= strtol(p, &next, 0);
653 if(next==p) return -1;
657 if(p[0]==0 || p[1]==0 || p[2]==0) break;
661 for(j=0; j<256; j++){
662 int d= FFMIN(j, 256-j);
664 s->stats[i][j]= 100000000/(d+1);
669 generate_len_table(s->len[i], s->stats[i]);
671 if(generate_bits_table(s->bits[i], s->len[i])<0){
675 s->avctx->extradata_size+=
676 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
681 int pels = s->width*s->height / (i?40:10);
682 for(j=0; j<256; j++){
683 int d= FFMIN(j, 256-j);
684 s->stats[i][j]= pels/(d+1);
693 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
701 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
703 /* TODO instead of restarting the read when the code isn't in the first level
704 * of the joint table, jump into the 2nd level of the individual table. */
705 #define READ_2PIX(dst0, dst1, plane1){\
706 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
711 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
712 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
716 static void decode_422_bitstream(HYuvContext *s, int count){
721 if(count >= (get_bits_left(&s->gb))/(31*4)){
722 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
723 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
724 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
727 for(i=0; i<count; i++){
728 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
729 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
734 static void decode_gray_bitstream(HYuvContext *s, int count){
739 if(count >= (get_bits_left(&s->gb))/(31*2)){
740 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
741 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
744 for(i=0; i<count; i++){
745 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
750 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
751 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
753 const uint8_t *y = s->temp[0] + offset;
754 const uint8_t *u = s->temp[1] + offset/2;
755 const uint8_t *v = s->temp[2] + offset/2;
757 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
758 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
769 if(s->flags&CODEC_FLAG_PASS1){
770 for(i=0; i<count; i++){
778 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
781 for(i=0; i<count; i++){
784 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
786 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
788 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
790 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
793 for(i=0; i<count; i++){
795 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
796 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
797 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
798 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
804 static int encode_gray_bitstream(HYuvContext *s, int count){
807 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
808 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
813 int y0 = s->temp[0][2*i];\
814 int y1 = s->temp[0][2*i+1];
819 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
820 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
823 if(s->flags&CODEC_FLAG_PASS1){
824 for(i=0; i<count; i++){
829 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
833 for(i=0; i<count; i++){
839 for(i=0; i<count; i++){
846 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
848 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
850 for(i=0; i<count; i++){
851 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
853 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
854 }else if(decorrelate){
855 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
856 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
857 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
859 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
860 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
861 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
864 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
868 static void decode_bgr_bitstream(HYuvContext *s, int count){
870 if(s->bitstream_bpp==24)
871 decode_bgr_1(s, count, 1, 0);
873 decode_bgr_1(s, count, 1, 1);
875 if(s->bitstream_bpp==24)
876 decode_bgr_1(s, count, 0, 0);
878 decode_bgr_1(s, count, 0, 1);
882 static int encode_bgr_bitstream(HYuvContext *s, int count){
885 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
886 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
891 int g= s->temp[0][4*i+G];\
892 int b= (s->temp[0][4*i+B] - g) & 0xff;\
893 int r= (s->temp[0][4*i+R] - g) & 0xff;
899 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
900 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
901 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
903 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
904 for(i=0; i<count; i++){
908 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
909 for(i=0; i<count; i++){
915 for(i=0; i<count; i++){
923 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
924 static void draw_slice(HYuvContext *s, int y){
928 if(s->avctx->draw_horiz_band==NULL)
931 h= y - s->last_slice_end;
934 if(s->bitstream_bpp==12){
940 offset[0] = s->picture.linesize[0]*y;
941 offset[1] = s->picture.linesize[1]*cy;
942 offset[2] = s->picture.linesize[2]*cy;
946 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
948 s->last_slice_end= y + h;
951 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
952 const uint8_t *buf = avpkt->data;
953 int buf_size = avpkt->size;
954 HYuvContext *s = avctx->priv_data;
955 const int width= s->width;
956 const int width2= s->width>>1;
957 const int height= s->height;
958 int fake_ystride, fake_ustride, fake_vstride;
959 AVFrame * const p= &s->picture;
962 AVFrame *picture = data;
964 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
965 if (!s->bitstream_buffer)
966 return AVERROR(ENOMEM);
968 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
969 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
972 ff_thread_release_buffer(avctx, p);
975 if(ff_thread_get_buffer(avctx, p) < 0){
976 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
981 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
986 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
989 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
991 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
992 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
993 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
995 s->last_slice_end= 0;
997 if(s->bitstream_bpp<24){
999 int lefty, leftu, leftv;
1000 int lefttopy, lefttopu, lefttopv;
1003 p->data[0][3]= get_bits(&s->gb, 8);
1004 p->data[0][2]= get_bits(&s->gb, 8);
1005 p->data[0][1]= get_bits(&s->gb, 8);
1006 p->data[0][0]= get_bits(&s->gb, 8);
1008 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1012 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1013 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1014 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1015 p->data[0][0]= get_bits(&s->gb, 8);
1017 switch(s->predictor){
1020 decode_422_bitstream(s, width-2);
1021 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1022 if(!(s->flags&CODEC_FLAG_GRAY)){
1023 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1024 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1027 for(cy=y=1; y<s->height; y++,cy++){
1028 uint8_t *ydst, *udst, *vdst;
1030 if(s->bitstream_bpp==12){
1031 decode_gray_bitstream(s, width);
1033 ydst= p->data[0] + p->linesize[0]*y;
1035 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1036 if(s->predictor == PLANE){
1038 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1041 if(y>=s->height) break;
1046 ydst= p->data[0] + p->linesize[0]*y;
1047 udst= p->data[1] + p->linesize[1]*cy;
1048 vdst= p->data[2] + p->linesize[2]*cy;
1050 decode_422_bitstream(s, width);
1051 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1052 if(!(s->flags&CODEC_FLAG_GRAY)){
1053 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1054 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1056 if(s->predictor == PLANE){
1057 if(cy>s->interlaced){
1058 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1059 if(!(s->flags&CODEC_FLAG_GRAY)){
1060 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1061 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1066 draw_slice(s, height);
1070 /* first line except first 2 pixels is left predicted */
1071 decode_422_bitstream(s, width-2);
1072 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1073 if(!(s->flags&CODEC_FLAG_GRAY)){
1074 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1075 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1080 /* second line is left predicted for interlaced case */
1082 decode_422_bitstream(s, width);
1083 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1084 if(!(s->flags&CODEC_FLAG_GRAY)){
1085 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1086 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1091 /* next 4 pixels are left predicted too */
1092 decode_422_bitstream(s, 4);
1093 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1094 if(!(s->flags&CODEC_FLAG_GRAY)){
1095 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1096 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1099 /* next line except the first 4 pixels is median predicted */
1100 lefttopy= p->data[0][3];
1101 decode_422_bitstream(s, width-4);
1102 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1103 if(!(s->flags&CODEC_FLAG_GRAY)){
1104 lefttopu= p->data[1][1];
1105 lefttopv= p->data[2][1];
1106 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1107 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1111 for(; y<height; y++,cy++){
1112 uint8_t *ydst, *udst, *vdst;
1114 if(s->bitstream_bpp==12){
1116 decode_gray_bitstream(s, width);
1117 ydst= p->data[0] + p->linesize[0]*y;
1118 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1121 if(y>=height) break;
1125 decode_422_bitstream(s, width);
1127 ydst= p->data[0] + p->linesize[0]*y;
1128 udst= p->data[1] + p->linesize[1]*cy;
1129 vdst= p->data[2] + p->linesize[2]*cy;
1131 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1132 if(!(s->flags&CODEC_FLAG_GRAY)){
1133 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1134 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1138 draw_slice(s, height);
1144 int leftr, leftg, leftb, lefta;
1145 const int last_line= (height-1)*p->linesize[0];
1147 if(s->bitstream_bpp==32){
1148 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1149 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1150 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1151 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1153 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1154 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1155 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1156 lefta= p->data[0][last_line+A]= 255;
1157 skip_bits(&s->gb, 8);
1161 switch(s->predictor){
1164 decode_bgr_bitstream(s, width-1);
1165 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1167 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1168 decode_bgr_bitstream(s, width);
1170 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1171 if(s->predictor == PLANE){
1172 if(s->bitstream_bpp!=32) lefta=0;
1173 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1174 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1175 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1179 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1182 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1186 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1193 *data_size = sizeof(AVFrame);
1195 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1197 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1199 static int common_end(HYuvContext *s){
1203 av_freep(&s->temp[i]);
1208 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1209 static av_cold int decode_end(AVCodecContext *avctx)
1211 HYuvContext *s = avctx->priv_data;
1214 if (s->picture.data[0])
1215 avctx->release_buffer(avctx, &s->picture);
1218 av_freep(&s->bitstream_buffer);
1221 free_vlc(&s->vlc[i]);
1226 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1228 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1229 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1230 HYuvContext *s = avctx->priv_data;
1231 AVFrame *pict = data;
1232 const int width= s->width;
1233 const int width2= s->width>>1;
1234 const int height= s->height;
1235 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1236 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1237 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1238 AVFrame * const p= &s->picture;
1242 p->pict_type= AV_PICTURE_TYPE_I;
1247 generate_len_table(s->len[i], s->stats[i]);
1248 if(generate_bits_table(s->bits[i], s->len[i])<0)
1250 size+= store_table(s, s->len[i], &buf[size]);
1254 for(j=0; j<256; j++)
1255 s->stats[i][j] >>= 1;
1258 init_put_bits(&s->pb, buf+size, buf_size-size);
1260 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1261 int lefty, leftu, leftv, y, cy;
1263 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1264 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1265 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1266 put_bits(&s->pb, 8, p->data[0][0]);
1268 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1269 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1270 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1272 encode_422_bitstream(s, 2, width-2);
1274 if(s->predictor==MEDIAN){
1275 int lefttopy, lefttopu, lefttopv;
1278 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1279 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1280 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1282 encode_422_bitstream(s, 0, width);
1286 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1287 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1288 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1290 encode_422_bitstream(s, 0, 4);
1292 lefttopy= p->data[0][3];
1293 lefttopu= p->data[1][1];
1294 lefttopv= p->data[2][1];
1295 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1296 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1297 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1298 encode_422_bitstream(s, 0, width-4);
1301 for(; y<height; y++,cy++){
1302 uint8_t *ydst, *udst, *vdst;
1304 if(s->bitstream_bpp==12){
1306 ydst= p->data[0] + p->linesize[0]*y;
1307 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1308 encode_gray_bitstream(s, width);
1311 if(y>=height) break;
1313 ydst= p->data[0] + p->linesize[0]*y;
1314 udst= p->data[1] + p->linesize[1]*cy;
1315 vdst= p->data[2] + p->linesize[2]*cy;
1317 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1318 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1319 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1321 encode_422_bitstream(s, 0, width);
1324 for(cy=y=1; y<height; y++,cy++){
1325 uint8_t *ydst, *udst, *vdst;
1327 /* encode a luma only line & y++ */
1328 if(s->bitstream_bpp==12){
1329 ydst= p->data[0] + p->linesize[0]*y;
1331 if(s->predictor == PLANE && s->interlaced < y){
1332 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1334 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1336 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1338 encode_gray_bitstream(s, width);
1340 if(y>=height) break;
1343 ydst= p->data[0] + p->linesize[0]*y;
1344 udst= p->data[1] + p->linesize[1]*cy;
1345 vdst= p->data[2] + p->linesize[2]*cy;
1347 if(s->predictor == PLANE && s->interlaced < cy){
1348 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1349 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1350 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1352 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1353 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1354 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1356 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1357 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1358 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1361 encode_422_bitstream(s, 0, width);
1364 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1365 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1366 const int stride = -p->linesize[0];
1367 const int fake_stride = -fake_ystride;
1369 int leftr, leftg, leftb;
1371 put_bits(&s->pb, 8, leftr= data[R]);
1372 put_bits(&s->pb, 8, leftg= data[G]);
1373 put_bits(&s->pb, 8, leftb= data[B]);
1374 put_bits(&s->pb, 8, 0);
1376 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1377 encode_bgr_bitstream(s, width-1);
1379 for(y=1; y<s->height; y++){
1380 uint8_t *dst = data + y*stride;
1381 if(s->predictor == PLANE && s->interlaced < y){
1382 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1383 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1385 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1387 encode_bgr_bitstream(s, width);
1390 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1394 size+= (put_bits_count(&s->pb)+31)/8;
1395 put_bits(&s->pb, 16, 0);
1396 put_bits(&s->pb, 15, 0);
1399 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1401 char *p= avctx->stats_out;
1402 char *end= p + 1024*30;
1404 for(j=0; j<256; j++){
1405 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1409 snprintf(p, end-p, "\n");
1413 avctx->stats_out[0] = '\0';
1414 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1415 flush_put_bits(&s->pb);
1416 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1419 s->picture_number++;
1424 static av_cold int encode_end(AVCodecContext *avctx)
1426 HYuvContext *s = avctx->priv_data;
1430 av_freep(&avctx->extradata);
1431 av_freep(&avctx->stats_out);
1435 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1437 #if CONFIG_HUFFYUV_DECODER
1438 AVCodec ff_huffyuv_decoder = {
1442 sizeof(HYuvContext),
1447 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1449 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1450 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1454 #if CONFIG_FFVHUFF_DECODER
1455 AVCodec ff_ffvhuff_decoder = {
1459 sizeof(HYuvContext),
1464 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1466 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1467 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1471 #if CONFIG_HUFFYUV_ENCODER
1472 AVCodec ff_huffyuv_encoder = {
1476 sizeof(HYuvContext),
1480 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1481 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1485 #if CONFIG_FFVHUFF_ENCODER
1486 AVCodec ff_ffvhuff_encoder = {
1490 sizeof(HYuvContext),
1494 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1495 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),