]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/huffyuv.c
aacenc: Write correct length for long identification strings.
[ffmpeg] / libavcodec / huffyuv.c
index ad2f25ffe3618d5dd468480845500bcea40fb90d..57b5f32fc89f14ded537462d280358753fbc5e97 100644 (file)
@@ -6,25 +6,25 @@
  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  * the algorithm used
  *
- * This file is part of FFmpeg.
+ * This file is part of Libav.
  *
- * FFmpeg is free software; you can redistribute it and/or
+ * Libav is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
- * FFmpeg is distributed in the hope that it will be useful,
+ * Libav is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
+ * License along with Libav; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
 /**
- * @file libavcodec/huffyuv.c
+ * @file
  * huffyuv codec for libavcodec.
  */
 
 #include "get_bits.h"
 #include "put_bits.h"
 #include "dsputil.h"
+#include "thread.h"
 
 #define VLC_BITS 11
 
-#ifdef WORDS_BIGENDIAN
+#if HAVE_BIGENDIAN
 #define B 3
 #define G 2
 #define R 1
+#define A 0
 #else
 #define B 0
 #define G 1
 #define R 2
+#define A 3
 #endif
 
 typedef enum Predictor{
@@ -129,47 +132,6 @@ static const unsigned char classic_add_chroma[256] = {
     6, 12,  8, 10,  7,  9,  6,  4,  6,  2,  2,  3,  3,  3,  3,  2,
 };
 
-static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
-    int i;
-
-    for(i=0; i<w-1; i++){
-        acc+= src[i];
-        dst[i]= acc;
-        i++;
-        acc+= src[i];
-        dst[i]= acc;
-    }
-
-    for(; i<w; i++){
-        acc+= src[i];
-        dst[i]= acc;
-    }
-
-    return acc;
-}
-
-static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
-    int i;
-    int r,g,b;
-    r= *red;
-    g= *green;
-    b= *blue;
-
-    for(i=0; i<w; i++){
-        b+= src[4*i+B];
-        g+= src[4*i+G];
-        r+= src[4*i+R];
-
-        dst[4*i+B]= b;
-        dst[4*i+G]= g;
-        dst[4*i+R]= r;
-    }
-
-    *red= r;
-    *green= g;
-    *blue= b;
-}
-
 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
     int i;
     if(w<32){
@@ -213,7 +175,7 @@ static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8
     *blue=  src[(w-1)*4+B];
 }
 
-static void read_len_table(uint8_t *dst, GetBitContext *gb){
+static int read_len_table(uint8_t *dst, GetBitContext *gb){
     int i, val, repeat;
 
     for(i=0; i<256;){
@@ -222,12 +184,17 @@ static void read_len_table(uint8_t *dst, GetBitContext *gb){
         if(repeat==0)
             repeat= get_bits(gb, 8);
 //printf("%d %d\n", val, repeat);
+        if(i+repeat > 256) {
+            av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
+            return -1;
+        }
         while (repeat--)
             dst[i++] = val;
     }
+    return 0;
 }
 
-static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
+static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
     int len, index;
     uint32_t bits=0;
 
@@ -265,11 +232,12 @@ static void heap_sift(HeapElem *h, int root, int size)
     }
 }
 
-static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
-    HeapElem h[size];
-    int up[2*size];
-    int len[2*size];
+static void generate_len_table(uint8_t *dst, const uint64_t *stats){
+    HeapElem h[256];
+    int up[2*256];
+    int len[2*256];
     int offset, i, next;
+    int size = 256;
 
     for(offset=1; ; offset<<=1){
         for(i=0; i<size; i++){
@@ -372,23 +340,18 @@ static void generate_joint_tables(HYuvContext *s){
     }
 }
 
-static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
+static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
     GetBitContext gb;
     int i;
 
     init_get_bits(&gb, src, length*8);
 
     for(i=0; i<3; i++){
-        read_len_table(s->len[i], &gb);
-
+        if(read_len_table(s->len[i], &gb)<0)
+            return -1;
         if(generate_bits_table(s->bits[i], s->len[i])<0){
             return -1;
         }
-#if 0
-for(j=0; j<256; j++){
-printf("%6X, %2d,  %3d\n", s->bits[i][j], s->len[i][j], j);
-}
-#endif
         free_vlc(&s->vlc[i]);
         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
     }
@@ -404,9 +367,11 @@ static int read_old_huffman_tables(HYuvContext *s){
     int i;
 
     init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
-    read_len_table(s->len[0], &gb);
+    if(read_len_table(s->len[0], &gb)<0)
+        return -1;
     init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
-    read_len_table(s->len[1], &gb);
+    if(read_len_table(s->len[1], &gb)<0)
+        return -1;
 
     for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma  [i];
     for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
@@ -440,9 +405,7 @@ static av_cold void alloc_temp(HYuvContext *s){
             s->temp[i]= av_malloc(s->width + 16);
         }
     }else{
-        for(i=0; i<2; i++){
-            s->temp[i]= av_malloc(4*s->width + 16);
-        }
+        s->temp[0]= av_mallocz(4*s->width + 16);
     }
 }
 
@@ -486,6 +449,9 @@ s->bgr32=1;
     if(s->version==2){
         int method, interlace;
 
+        if (avctx->extradata_size < 4)
+            return -1;
+
         method= ((uint8_t*)avctx->extradata)[0];
         s->decorrelate= method&64 ? 1 : 0;
         s->predictor= method&63;
@@ -496,7 +462,7 @@ s->bgr32=1;
         s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
         s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
 
-        if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
+        if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
             return -1;
     }else{
         switch(avctx->bits_per_coded_sample&7){
@@ -557,10 +523,32 @@ s->bgr32=1;
 
     return 0;
 }
+
+static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
+{
+    HYuvContext *s = avctx->priv_data;
+    int i;
+
+    avctx->coded_frame= &s->picture;
+    alloc_temp(s);
+
+    for (i = 0; i < 6; i++)
+        s->vlc[i].table = NULL;
+
+    if(s->version==2){
+        if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
+            return -1;
+    }else{
+        if(read_old_huffman_tables(s) < 0)
+            return -1;
+    }
+
+    return 0;
+}
 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
 
 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
-static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
+static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
     int i;
     int index= 0;
 
@@ -677,7 +665,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
     }
 
     for(i=0; i<3; i++){
-        generate_len_table(s->len[i], s->stats[i], 256);
+        generate_len_table(s->len[i], s->stats[i]);
 
         if(generate_bits_table(s->bits[i], s->len[i])<0){
             return -1;
@@ -729,7 +717,7 @@ static void decode_422_bitstream(HYuvContext *s, int count){
 
     count/=2;
 
-    if(count >= (s->gb.size_in_bits - get_bits_count(&s->gb))/(31*4)){
+    if(count >= (get_bits_left(&s->gb))/(31*4)){
         for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
             READ_2PIX(s->temp[0][2*i  ], s->temp[1][i], 1);
             READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
@@ -747,7 +735,7 @@ static void decode_gray_bitstream(HYuvContext *s, int count){
 
     count/=2;
 
-    if(count >= (s->gb.size_in_bits - get_bits_count(&s->gb))/(31*2)){
+    if(count >= (get_bits_left(&s->gb))/(31*2)){
         for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
             READ_2PIX(s->temp[0][2*i  ], s->temp[0][2*i+1], 0);
         }
@@ -872,7 +860,7 @@ static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorre
             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
         }
         if(alpha)
-            get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
+            s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
     }
 }
 
@@ -933,8 +921,8 @@ static int encode_bgr_bitstream(HYuvContext *s, int count){
 
 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
 static void draw_slice(HYuvContext *s, int y){
-    int h, cy;
-    int offset[4];
+    int h, cy, i;
+    int offset[AV_NUM_DATA_POINTERS];
 
     if(s->avctx->draw_horiz_band==NULL)
         return;
@@ -951,7 +939,8 @@ static void draw_slice(HYuvContext *s, int y){
     offset[0] = s->picture.linesize[0]*y;
     offset[1] = s->picture.linesize[1]*cy;
     offset[2] = s->picture.linesize[2]*cy;
-    offset[3] = 0;
+    for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
+        offset[i] = 0;
     emms_c();
 
     s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
@@ -976,13 +965,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
     if (!s->bitstream_buffer)
         return AVERROR(ENOMEM);
 
+    memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
     s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
 
     if(p->data[0])
-        avctx->release_buffer(avctx, p);
+        ff_thread_release_buffer(avctx, p);
 
     p->reference= 0;
-    if(avctx->get_buffer(avctx, p) < 0){
+    if(ff_thread_get_buffer(avctx, p) < 0){
         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
         return -1;
     }
@@ -1028,10 +1018,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
             case LEFT:
             case PLANE:
                 decode_422_bitstream(s, width-2);
-                lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
+                lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
                 if(!(s->flags&CODEC_FLAG_GRAY)){
-                    leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
-                    leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
+                    leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
+                    leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
                 }
 
                 for(cy=y=1; y<s->height; y++,cy++){
@@ -1042,7 +1032,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
 
                         ydst= p->data[0] + p->linesize[0]*y;
 
-                        lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
+                        lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
                         if(s->predictor == PLANE){
                             if(y>s->interlaced)
                                 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
@@ -1058,10 +1048,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
                     vdst= p->data[2] + p->linesize[2]*cy;
 
                     decode_422_bitstream(s, width);
-                    lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
+                    lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
                     if(!(s->flags&CODEC_FLAG_GRAY)){
-                        leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
-                        leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
+                        leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
+                        leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
                     }
                     if(s->predictor == PLANE){
                         if(cy>s->interlaced){
@@ -1079,10 +1069,10 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
             case MEDIAN:
                 /* first line except first 2 pixels is left predicted */
                 decode_422_bitstream(s, width-2);
-                lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
+                lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
                 if(!(s->flags&CODEC_FLAG_GRAY)){
-                    leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
-                    leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
+                    leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
+                    leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
                 }
 
                 cy=y=1;
@@ -1090,20 +1080,20 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
                 /* second line is left predicted for interlaced case */
                 if(s->interlaced){
                     decode_422_bitstream(s, width);
-                    lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
+                    lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
                     if(!(s->flags&CODEC_FLAG_GRAY)){
-                        leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
-                        leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
+                        leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
+                        leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
                     }
                     y++; cy++;
                 }
 
                 /* next 4 pixels are left predicted too */
                 decode_422_bitstream(s, 4);
-                lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
+                lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
                 if(!(s->flags&CODEC_FLAG_GRAY)){
-                    leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
-                    leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
+                    leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
+                    leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
                 }
 
                 /* next line except the first 4 pixels is median predicted */
@@ -1151,11 +1141,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
         }
     }else{
         int y;
-        int leftr, leftg, leftb;
+        int leftr, leftg, leftb, lefta;
         const int last_line= (height-1)*p->linesize[0];
 
         if(s->bitstream_bpp==32){
-            skip_bits(&s->gb, 8);
+            lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
@@ -1163,6 +1153,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
+            lefta= p->data[0][last_line+A]= 255;
             skip_bits(&s->gb, 8);
         }
 
@@ -1171,13 +1162,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
             case LEFT:
             case PLANE:
                 decode_bgr_bitstream(s, width-1);
-                add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
+                s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
 
                 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
                     decode_bgr_bitstream(s, width);
 
-                    add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
+                    s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
                     if(s->predictor == PLANE){
+                        if(s->bitstream_bpp!=32) lefta=0;
                         if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
                             s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
                                              p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
@@ -1219,6 +1211,9 @@ static av_cold int decode_end(AVCodecContext *avctx)
     HYuvContext *s = avctx->priv_data;
     int i;
 
+    if (s->picture.data[0])
+        avctx->release_buffer(avctx, &s->picture);
+
     common_end(s);
     av_freep(&s->bitstream_buffer);
 
@@ -1244,12 +1239,12 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
     int i, j, size=0;
 
     *p = *pict;
-    p->pict_type= FF_I_TYPE;
+    p->pict_type= AV_PICTURE_TYPE_I;
     p->key_frame= 1;
 
     if(s->context){
         for(i=0; i<3; i++){
-            generate_len_table(s->len[i], s->stats[i], 256);
+            generate_len_table(s->len[i], s->stats[i]);
             if(generate_bits_table(s->bits[i], s->len[i])<0)
                 return -1;
             size+= store_table(s, s->len[i], &buf[size]);
@@ -1440,61 +1435,59 @@ static av_cold int encode_end(AVCodecContext *avctx)
 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
 
 #if CONFIG_HUFFYUV_DECODER
-AVCodec huffyuv_decoder = {
-    "huffyuv",
-    CODEC_TYPE_VIDEO,
-    CODEC_ID_HUFFYUV,
-    sizeof(HYuvContext),
-    decode_init,
-    NULL,
-    decode_end,
-    decode_frame,
-    CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
-    NULL,
+AVCodec ff_huffyuv_decoder = {
+    .name           = "huffyuv",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_HUFFYUV,
+    .priv_data_size = sizeof(HYuvContext),
+    .init           = decode_init,
+    .close          = decode_end,
+    .decode         = decode_frame,
+    .capabilities   = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
+    .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
 };
 #endif
 
 #if CONFIG_FFVHUFF_DECODER
-AVCodec ffvhuff_decoder = {
-    "ffvhuff",
-    CODEC_TYPE_VIDEO,
-    CODEC_ID_FFVHUFF,
-    sizeof(HYuvContext),
-    decode_init,
-    NULL,
-    decode_end,
-    decode_frame,
-    CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
-    NULL,
+AVCodec ff_ffvhuff_decoder = {
+    .name           = "ffvhuff",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_FFVHUFF,
+    .priv_data_size = sizeof(HYuvContext),
+    .init           = decode_init,
+    .close          = decode_end,
+    .decode         = decode_frame,
+    .capabilities   = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
+    .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
 };
 #endif
 
 #if CONFIG_HUFFYUV_ENCODER
-AVCodec huffyuv_encoder = {
-    "huffyuv",
-    CODEC_TYPE_VIDEO,
-    CODEC_ID_HUFFYUV,
-    sizeof(HYuvContext),
-    encode_init,
-    encode_frame,
-    encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
+AVCodec ff_huffyuv_encoder = {
+    .name           = "huffyuv",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_HUFFYUV,
+    .priv_data_size = sizeof(HYuvContext),
+    .init           = encode_init,
+    .encode         = encode_frame,
+    .close          = encode_end,
+    .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
 };
 #endif
 
 #if CONFIG_FFVHUFF_ENCODER
-AVCodec ffvhuff_encoder = {
-    "ffvhuff",
-    CODEC_TYPE_VIDEO,
-    CODEC_ID_FFVHUFF,
-    sizeof(HYuvContext),
-    encode_init,
-    encode_frame,
-    encode_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
+AVCodec ff_ffvhuff_encoder = {
+    .name           = "ffvhuff",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_FFVHUFF,
+    .priv_data_size = sizeof(HYuvContext),
+    .init           = encode_init,
+    .encode         = encode_frame,
+    .close          = encode_end,
+    .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
 };
 #endif