]> git.sesse.net Git - ffmpeg/blob - libavcodec/huffyuv.c
Add const to some pointer parameters.
[ffmpeg] / libavcodec / huffyuv.c
1 /*
2  * huffyuv codec for libavcodec
3  *
4  * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25
26 /**
27  * @file libavcodec/huffyuv.c
28  * huffyuv codec for libavcodec.
29  */
30
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
35
36 #define VLC_BITS 11
37
38 #if HAVE_BIGENDIAN
39 #define B 3
40 #define G 2
41 #define R 1
42 #define A 0
43 #else
44 #define B 0
45 #define G 1
46 #define R 2
47 #define A 3
48 #endif
49
50 typedef enum Predictor{
51     LEFT= 0,
52     PLANE,
53     MEDIAN,
54 } Predictor;
55
56 typedef struct HYuvContext{
57     AVCodecContext *avctx;
58     Predictor predictor;
59     GetBitContext gb;
60     PutBitContext pb;
61     int interlaced;
62     int decorrelate;
63     int bitstream_bpp;
64     int version;
65     int yuy2;                               //use yuy2 instead of 422P
66     int bgr32;                              //use bgr32 instead of bgr24
67     int width, height;
68     int flags;
69     int context;
70     int picture_number;
71     int last_slice_end;
72     uint8_t *temp[3];
73     uint64_t stats[3][256];
74     uint8_t len[3][256];
75     uint32_t bits[3][256];
76     uint32_t pix_bgr_map[1<<VLC_BITS];
77     VLC vlc[6];                             //Y,U,V,YY,YU,YV
78     AVFrame picture;
79     uint8_t *bitstream_buffer;
80     unsigned int bitstream_buffer_size;
81     DSPContext dsp;
82 }HYuvContext;
83
84 static const unsigned char classic_shift_luma[] = {
85   34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
86   16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
87   69,68, 0
88 };
89
90 static const unsigned char classic_shift_chroma[] = {
91   66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
92   56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
93   214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
94 };
95
96 static const unsigned char classic_add_luma[256] = {
97     3,  9,  5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
98    73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
99    68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
100    35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
101    37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
102    35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
103    27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
104    15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
105    12, 17, 19, 13,  4,  9,  2, 11,  1,  7,  8,  0, 16,  3, 14,  6,
106    12, 10,  5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
107    18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
108    28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
109    28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
110    62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
111    54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
112    46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13,  7,  8,
113 };
114
115 static const unsigned char classic_add_chroma[256] = {
116     3,  1,  2,  2,  2,  2,  3,  3,  7,  5,  7,  5,  8,  6, 11,  9,
117     7, 13, 11, 10,  9,  8,  7,  5,  9,  7,  6,  4,  7,  5,  8,  7,
118    11,  8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
119    43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
120   143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
121    80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
122    17, 14,  5,  6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
123   112,113,114,115,  4,117,118, 92, 94,121,122,  3,124,103,  2,  1,
124     0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
125   135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
126    52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
127    19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10,  9,  8, 36,
128     7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
129    83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
130    14, 16, 17, 18, 20, 21, 12, 14, 15,  9, 10,  6,  9,  6,  5,  8,
131     6, 12,  8, 10,  7,  9,  6,  4,  6,  2,  2,  3,  3,  3,  3,  2,
132 };
133
134 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
135     int i;
136     if(w<32){
137         for(i=0; i<w; i++){
138             const int temp= src[i];
139             dst[i]= temp - left;
140             left= temp;
141         }
142         return left;
143     }else{
144         for(i=0; i<16; i++){
145             const int temp= src[i];
146             dst[i]= temp - left;
147             left= temp;
148         }
149         s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
150         return src[w-1];
151     }
152 }
153
154 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
155     int i;
156     int r,g,b;
157     r= *red;
158     g= *green;
159     b= *blue;
160     for(i=0; i<FFMIN(w,4); i++){
161         const int rt= src[i*4+R];
162         const int gt= src[i*4+G];
163         const int bt= src[i*4+B];
164         dst[i*4+R]= rt - r;
165         dst[i*4+G]= gt - g;
166         dst[i*4+B]= bt - b;
167         r = rt;
168         g = gt;
169         b = bt;
170     }
171     s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
172     *red=   src[(w-1)*4+R];
173     *green= src[(w-1)*4+G];
174     *blue=  src[(w-1)*4+B];
175 }
176
177 static int read_len_table(uint8_t *dst, GetBitContext *gb){
178     int i, val, repeat;
179
180     for(i=0; i<256;){
181         repeat= get_bits(gb, 3);
182         val   = get_bits(gb, 5);
183         if(repeat==0)
184             repeat= get_bits(gb, 8);
185 //printf("%d %d\n", val, repeat);
186         if(i+repeat > 256) {
187             av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
188             return -1;
189         }
190         while (repeat--)
191             dst[i++] = val;
192     }
193     return 0;
194 }
195
196 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
197     int len, index;
198     uint32_t bits=0;
199
200     for(len=32; len>0; len--){
201         for(index=0; index<256; index++){
202             if(len_table[index]==len)
203                 dst[index]= bits++;
204         }
205         if(bits & 1){
206             av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
207             return -1;
208         }
209         bits >>= 1;
210     }
211     return 0;
212 }
213
214 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
215 typedef struct {
216     uint64_t val;
217     int name;
218 } HeapElem;
219
220 static void heap_sift(HeapElem *h, int root, int size)
221 {
222     while(root*2+1 < size) {
223         int child = root*2+1;
224         if(child < size-1 && h[child].val > h[child+1].val)
225             child++;
226         if(h[root].val > h[child].val) {
227             FFSWAP(HeapElem, h[root], h[child]);
228             root = child;
229         } else
230             break;
231     }
232 }
233
234 static void generate_len_table(uint8_t *dst, const uint64_t *stats, int size){
235     HeapElem h[size];
236     int up[2*size];
237     int len[2*size];
238     int offset, i, next;
239
240     for(offset=1; ; offset<<=1){
241         for(i=0; i<size; i++){
242             h[i].name = i;
243             h[i].val = (stats[i] << 8) + offset;
244         }
245         for(i=size/2-1; i>=0; i--)
246             heap_sift(h, i, size);
247
248         for(next=size; next<size*2-1; next++){
249             // merge the two smallest entries, and put it back in the heap
250             uint64_t min1v = h[0].val;
251             up[h[0].name] = next;
252             h[0].val = INT64_MAX;
253             heap_sift(h, 0, size);
254             up[h[0].name] = next;
255             h[0].name = next;
256             h[0].val += min1v;
257             heap_sift(h, 0, size);
258         }
259
260         len[2*size-2] = 0;
261         for(i=2*size-3; i>=size; i--)
262             len[i] = len[up[i]] + 1;
263         for(i=0; i<size; i++) {
264             dst[i] = len[up[i]] + 1;
265             if(dst[i] >= 32) break;
266         }
267         if(i==size) break;
268     }
269 }
270 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
271
272 static void generate_joint_tables(HYuvContext *s){
273     uint16_t symbols[1<<VLC_BITS];
274     uint16_t bits[1<<VLC_BITS];
275     uint8_t len[1<<VLC_BITS];
276     if(s->bitstream_bpp < 24){
277         int p, i, y, u;
278         for(p=0; p<3; p++){
279             for(i=y=0; y<256; y++){
280                 int len0 = s->len[0][y];
281                 int limit = VLC_BITS - len0;
282                 if(limit <= 0)
283                     continue;
284                 for(u=0; u<256; u++){
285                     int len1 = s->len[p][u];
286                     if(len1 > limit)
287                         continue;
288                     len[i] = len0 + len1;
289                     bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
290                     symbols[i] = (y<<8) + u;
291                     if(symbols[i] != 0xffff) // reserved to mean "invalid"
292                         i++;
293                 }
294             }
295             free_vlc(&s->vlc[3+p]);
296             init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
297         }
298     }else{
299         uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
300         int i, b, g, r, code;
301         int p0 = s->decorrelate;
302         int p1 = !s->decorrelate;
303         // restrict the range to +/-16 becaues that's pretty much guaranteed to
304         // cover all the combinations that fit in 11 bits total, and it doesn't
305         // matter if we miss a few rare codes.
306         for(i=0, g=-16; g<16; g++){
307             int len0 = s->len[p0][g&255];
308             int limit0 = VLC_BITS - len0;
309             if(limit0 < 2)
310                 continue;
311             for(b=-16; b<16; b++){
312                 int len1 = s->len[p1][b&255];
313                 int limit1 = limit0 - len1;
314                 if(limit1 < 1)
315                     continue;
316                 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
317                 for(r=-16; r<16; r++){
318                     int len2 = s->len[2][r&255];
319                     if(len2 > limit1)
320                         continue;
321                     len[i] = len0 + len1 + len2;
322                     bits[i] = (code << len2) + s->bits[2][r&255];
323                     if(s->decorrelate){
324                         map[i][G] = g;
325                         map[i][B] = g+b;
326                         map[i][R] = g+r;
327                     }else{
328                         map[i][B] = g;
329                         map[i][G] = b;
330                         map[i][R] = r;
331                     }
332                     i++;
333                 }
334             }
335         }
336         free_vlc(&s->vlc[3]);
337         init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
338     }
339 }
340
341 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
342     GetBitContext gb;
343     int i;
344
345     init_get_bits(&gb, src, length*8);
346
347     for(i=0; i<3; i++){
348         if(read_len_table(s->len[i], &gb)<0)
349             return -1;
350         if(generate_bits_table(s->bits[i], s->len[i])<0){
351             return -1;
352         }
353 #if 0
354 for(j=0; j<256; j++){
355 printf("%6X, %2d,  %3d\n", s->bits[i][j], s->len[i][j], j);
356 }
357 #endif
358         free_vlc(&s->vlc[i]);
359         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
360     }
361
362     generate_joint_tables(s);
363
364     return (get_bits_count(&gb)+7)/8;
365 }
366
367 static int read_old_huffman_tables(HYuvContext *s){
368 #if 1
369     GetBitContext gb;
370     int i;
371
372     init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
373     if(read_len_table(s->len[0], &gb)<0)
374         return -1;
375     init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
376     if(read_len_table(s->len[1], &gb)<0)
377         return -1;
378
379     for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma  [i];
380     for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
381
382     if(s->bitstream_bpp >= 24){
383         memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
384         memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
385     }
386     memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
387     memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
388
389     for(i=0; i<3; i++){
390         free_vlc(&s->vlc[i]);
391         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
392     }
393
394     generate_joint_tables(s);
395
396     return 0;
397 #else
398     av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
399     return -1;
400 #endif
401 }
402
403 static av_cold void alloc_temp(HYuvContext *s){
404     int i;
405
406     if(s->bitstream_bpp<24){
407         for(i=0; i<3; i++){
408             s->temp[i]= av_malloc(s->width + 16);
409         }
410     }else{
411         s->temp[0]= av_mallocz(4*s->width + 16);
412     }
413 }
414
415 static av_cold int common_init(AVCodecContext *avctx){
416     HYuvContext *s = avctx->priv_data;
417
418     s->avctx= avctx;
419     s->flags= avctx->flags;
420
421     dsputil_init(&s->dsp, avctx);
422
423     s->width= avctx->width;
424     s->height= avctx->height;
425     assert(s->width>0 && s->height>0);
426
427     return 0;
428 }
429
430 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
431 static av_cold int decode_init(AVCodecContext *avctx)
432 {
433     HYuvContext *s = avctx->priv_data;
434
435     common_init(avctx);
436     memset(s->vlc, 0, 3*sizeof(VLC));
437
438     avctx->coded_frame= &s->picture;
439     s->interlaced= s->height > 288;
440
441 s->bgr32=1;
442 //if(avctx->extradata)
443 //  printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
444     if(avctx->extradata_size){
445         if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
446             s->version=1; // do such files exist at all?
447         else
448             s->version=2;
449     }else
450         s->version=0;
451
452     if(s->version==2){
453         int method, interlace;
454
455         if (avctx->extradata_size < 4)
456             return -1;
457
458         method= ((uint8_t*)avctx->extradata)[0];
459         s->decorrelate= method&64 ? 1 : 0;
460         s->predictor= method&63;
461         s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
462         if(s->bitstream_bpp==0)
463             s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
464         interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
465         s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
466         s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
467
468         if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
469             return -1;
470     }else{
471         switch(avctx->bits_per_coded_sample&7){
472         case 1:
473             s->predictor= LEFT;
474             s->decorrelate= 0;
475             break;
476         case 2:
477             s->predictor= LEFT;
478             s->decorrelate= 1;
479             break;
480         case 3:
481             s->predictor= PLANE;
482             s->decorrelate= avctx->bits_per_coded_sample >= 24;
483             break;
484         case 4:
485             s->predictor= MEDIAN;
486             s->decorrelate= 0;
487             break;
488         default:
489             s->predictor= LEFT; //OLD
490             s->decorrelate= 0;
491             break;
492         }
493         s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
494         s->context= 0;
495
496         if(read_old_huffman_tables(s) < 0)
497             return -1;
498     }
499
500     switch(s->bitstream_bpp){
501     case 12:
502         avctx->pix_fmt = PIX_FMT_YUV420P;
503         break;
504     case 16:
505         if(s->yuy2){
506             avctx->pix_fmt = PIX_FMT_YUYV422;
507         }else{
508             avctx->pix_fmt = PIX_FMT_YUV422P;
509         }
510         break;
511     case 24:
512     case 32:
513         if(s->bgr32){
514             avctx->pix_fmt = PIX_FMT_RGB32;
515         }else{
516             avctx->pix_fmt = PIX_FMT_BGR24;
517         }
518         break;
519     default:
520         assert(0);
521     }
522
523     alloc_temp(s);
524
525 //    av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
526
527     return 0;
528 }
529 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
530
531 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
532 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
533     int i;
534     int index= 0;
535
536     for(i=0; i<256;){
537         int val= len[i];
538         int repeat=0;
539
540         for(; i<256 && len[i]==val && repeat<255; i++)
541             repeat++;
542
543         assert(val < 32 && val >0 && repeat<256 && repeat>0);
544         if(repeat>7){
545             buf[index++]= val;
546             buf[index++]= repeat;
547         }else{
548             buf[index++]= val | (repeat<<5);
549         }
550     }
551
552     return index;
553 }
554
555 static av_cold int encode_init(AVCodecContext *avctx)
556 {
557     HYuvContext *s = avctx->priv_data;
558     int i, j;
559
560     common_init(avctx);
561
562     avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
563     avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
564     s->version=2;
565
566     avctx->coded_frame= &s->picture;
567
568     switch(avctx->pix_fmt){
569     case PIX_FMT_YUV420P:
570         s->bitstream_bpp= 12;
571         break;
572     case PIX_FMT_YUV422P:
573         s->bitstream_bpp= 16;
574         break;
575     case PIX_FMT_RGB32:
576         s->bitstream_bpp= 24;
577         break;
578     default:
579         av_log(avctx, AV_LOG_ERROR, "format not supported\n");
580         return -1;
581     }
582     avctx->bits_per_coded_sample= s->bitstream_bpp;
583     s->decorrelate= s->bitstream_bpp >= 24;
584     s->predictor= avctx->prediction_method;
585     s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
586     if(avctx->context_model==1){
587         s->context= avctx->context_model;
588         if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
589             av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
590             return -1;
591         }
592     }else s->context= 0;
593
594     if(avctx->codec->id==CODEC_ID_HUFFYUV){
595         if(avctx->pix_fmt==PIX_FMT_YUV420P){
596             av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
597             return -1;
598         }
599         if(avctx->context_model){
600             av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
601             return -1;
602         }
603         if(s->interlaced != ( s->height > 288 ))
604             av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
605     }
606
607     if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
608         av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
609         return -1;
610     }
611
612     ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
613     ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
614     ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
615     if(s->context)
616         ((uint8_t*)avctx->extradata)[2]|= 0x40;
617     ((uint8_t*)avctx->extradata)[3]= 0;
618     s->avctx->extradata_size= 4;
619
620     if(avctx->stats_in){
621         char *p= avctx->stats_in;
622
623         for(i=0; i<3; i++)
624             for(j=0; j<256; j++)
625                 s->stats[i][j]= 1;
626
627         for(;;){
628             for(i=0; i<3; i++){
629                 char *next;
630
631                 for(j=0; j<256; j++){
632                     s->stats[i][j]+= strtol(p, &next, 0);
633                     if(next==p) return -1;
634                     p=next;
635                 }
636             }
637             if(p[0]==0 || p[1]==0 || p[2]==0) break;
638         }
639     }else{
640         for(i=0; i<3; i++)
641             for(j=0; j<256; j++){
642                 int d= FFMIN(j, 256-j);
643
644                 s->stats[i][j]= 100000000/(d+1);
645             }
646     }
647
648     for(i=0; i<3; i++){
649         generate_len_table(s->len[i], s->stats[i], 256);
650
651         if(generate_bits_table(s->bits[i], s->len[i])<0){
652             return -1;
653         }
654
655         s->avctx->extradata_size+=
656         store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
657     }
658
659     if(s->context){
660         for(i=0; i<3; i++){
661             int pels = s->width*s->height / (i?40:10);
662             for(j=0; j<256; j++){
663                 int d= FFMIN(j, 256-j);
664                 s->stats[i][j]= pels/(d+1);
665             }
666         }
667     }else{
668         for(i=0; i<3; i++)
669             for(j=0; j<256; j++)
670                 s->stats[i][j]= 0;
671     }
672
673 //    printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
674
675     alloc_temp(s);
676
677     s->picture_number=0;
678
679     return 0;
680 }
681 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
682
683 /* TODO instead of restarting the read when the code isn't in the first level
684  * of the joint table, jump into the 2nd level of the individual table. */
685 #define READ_2PIX(dst0, dst1, plane1){\
686     uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
687     if(code != 0xffff){\
688         dst0 = code>>8;\
689         dst1 = code;\
690     }else{\
691         dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
692         dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
693     }\
694 }
695
696 static void decode_422_bitstream(HYuvContext *s, int count){
697     int i;
698
699     count/=2;
700
701     if(count >= (get_bits_left(&s->gb))/(31*4)){
702         for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
703             READ_2PIX(s->temp[0][2*i  ], s->temp[1][i], 1);
704             READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
705         }
706     }else{
707         for(i=0; i<count; i++){
708             READ_2PIX(s->temp[0][2*i  ], s->temp[1][i], 1);
709             READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
710         }
711     }
712 }
713
714 static void decode_gray_bitstream(HYuvContext *s, int count){
715     int i;
716
717     count/=2;
718
719     if(count >= (get_bits_left(&s->gb))/(31*2)){
720         for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
721             READ_2PIX(s->temp[0][2*i  ], s->temp[0][2*i+1], 0);
722         }
723     }else{
724         for(i=0; i<count; i++){
725             READ_2PIX(s->temp[0][2*i  ], s->temp[0][2*i+1], 0);
726         }
727     }
728 }
729
730 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
731 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
732     int i;
733     const uint8_t *y = s->temp[0] + offset;
734     const uint8_t *u = s->temp[1] + offset/2;
735     const uint8_t *v = s->temp[2] + offset/2;
736
737     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
738         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
739         return -1;
740     }
741
742 #define LOAD4\
743             int y0 = y[2*i];\
744             int y1 = y[2*i+1];\
745             int u0 = u[i];\
746             int v0 = v[i];
747
748     count/=2;
749     if(s->flags&CODEC_FLAG_PASS1){
750         for(i=0; i<count; i++){
751             LOAD4;
752             s->stats[0][y0]++;
753             s->stats[1][u0]++;
754             s->stats[0][y1]++;
755             s->stats[2][v0]++;
756         }
757     }
758     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
759         return 0;
760     if(s->context){
761         for(i=0; i<count; i++){
762             LOAD4;
763             s->stats[0][y0]++;
764             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
765             s->stats[1][u0]++;
766             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
767             s->stats[0][y1]++;
768             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
769             s->stats[2][v0]++;
770             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
771         }
772     }else{
773         for(i=0; i<count; i++){
774             LOAD4;
775             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
776             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
777             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
778             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
779         }
780     }
781     return 0;
782 }
783
784 static int encode_gray_bitstream(HYuvContext *s, int count){
785     int i;
786
787     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
788         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
789         return -1;
790     }
791
792 #define LOAD2\
793             int y0 = s->temp[0][2*i];\
794             int y1 = s->temp[0][2*i+1];
795 #define STAT2\
796             s->stats[0][y0]++;\
797             s->stats[0][y1]++;
798 #define WRITE2\
799             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
800             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
801
802     count/=2;
803     if(s->flags&CODEC_FLAG_PASS1){
804         for(i=0; i<count; i++){
805             LOAD2;
806             STAT2;
807         }
808     }
809     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
810         return 0;
811
812     if(s->context){
813         for(i=0; i<count; i++){
814             LOAD2;
815             STAT2;
816             WRITE2;
817         }
818     }else{
819         for(i=0; i<count; i++){
820             LOAD2;
821             WRITE2;
822         }
823     }
824     return 0;
825 }
826 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
827
828 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
829     int i;
830     for(i=0; i<count; i++){
831         int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
832         if(code != -1){
833             *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
834         }else if(decorrelate){
835             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
836             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
837             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
838         }else{
839             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
840             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
841             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
842         }
843         if(alpha)
844             s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
845     }
846 }
847
848 static void decode_bgr_bitstream(HYuvContext *s, int count){
849     if(s->decorrelate){
850         if(s->bitstream_bpp==24)
851             decode_bgr_1(s, count, 1, 0);
852         else
853             decode_bgr_1(s, count, 1, 1);
854     }else{
855         if(s->bitstream_bpp==24)
856             decode_bgr_1(s, count, 0, 0);
857         else
858             decode_bgr_1(s, count, 0, 1);
859     }
860 }
861
862 static int encode_bgr_bitstream(HYuvContext *s, int count){
863     int i;
864
865     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
866         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
867         return -1;
868     }
869
870 #define LOAD3\
871             int g= s->temp[0][4*i+G];\
872             int b= (s->temp[0][4*i+B] - g) & 0xff;\
873             int r= (s->temp[0][4*i+R] - g) & 0xff;
874 #define STAT3\
875             s->stats[0][b]++;\
876             s->stats[1][g]++;\
877             s->stats[2][r]++;
878 #define WRITE3\
879             put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
880             put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
881             put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
882
883     if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
884         for(i=0; i<count; i++){
885             LOAD3;
886             STAT3;
887         }
888     }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
889         for(i=0; i<count; i++){
890             LOAD3;
891             STAT3;
892             WRITE3;
893         }
894     }else{
895         for(i=0; i<count; i++){
896             LOAD3;
897             WRITE3;
898         }
899     }
900     return 0;
901 }
902
903 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
904 static void draw_slice(HYuvContext *s, int y){
905     int h, cy;
906     int offset[4];
907
908     if(s->avctx->draw_horiz_band==NULL)
909         return;
910
911     h= y - s->last_slice_end;
912     y -= h;
913
914     if(s->bitstream_bpp==12){
915         cy= y>>1;
916     }else{
917         cy= y;
918     }
919
920     offset[0] = s->picture.linesize[0]*y;
921     offset[1] = s->picture.linesize[1]*cy;
922     offset[2] = s->picture.linesize[2]*cy;
923     offset[3] = 0;
924     emms_c();
925
926     s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
927
928     s->last_slice_end= y + h;
929 }
930
931 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
932     const uint8_t *buf = avpkt->data;
933     int buf_size = avpkt->size;
934     HYuvContext *s = avctx->priv_data;
935     const int width= s->width;
936     const int width2= s->width>>1;
937     const int height= s->height;
938     int fake_ystride, fake_ustride, fake_vstride;
939     AVFrame * const p= &s->picture;
940     int table_size= 0;
941
942     AVFrame *picture = data;
943
944     av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
945     if (!s->bitstream_buffer)
946         return AVERROR(ENOMEM);
947
948     memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
949     s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
950
951     if(p->data[0])
952         avctx->release_buffer(avctx, p);
953
954     p->reference= 0;
955     if(avctx->get_buffer(avctx, p) < 0){
956         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
957         return -1;
958     }
959
960     if(s->context){
961         table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
962         if(table_size < 0)
963             return -1;
964     }
965
966     if((unsigned)(buf_size-table_size) >= INT_MAX/8)
967         return -1;
968
969     init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
970
971     fake_ystride= s->interlaced ? p->linesize[0]*2  : p->linesize[0];
972     fake_ustride= s->interlaced ? p->linesize[1]*2  : p->linesize[1];
973     fake_vstride= s->interlaced ? p->linesize[2]*2  : p->linesize[2];
974
975     s->last_slice_end= 0;
976
977     if(s->bitstream_bpp<24){
978         int y, cy;
979         int lefty, leftu, leftv;
980         int lefttopy, lefttopu, lefttopv;
981
982         if(s->yuy2){
983             p->data[0][3]= get_bits(&s->gb, 8);
984             p->data[0][2]= get_bits(&s->gb, 8);
985             p->data[0][1]= get_bits(&s->gb, 8);
986             p->data[0][0]= get_bits(&s->gb, 8);
987
988             av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
989             return -1;
990         }else{
991
992             leftv= p->data[2][0]= get_bits(&s->gb, 8);
993             lefty= p->data[0][1]= get_bits(&s->gb, 8);
994             leftu= p->data[1][0]= get_bits(&s->gb, 8);
995                    p->data[0][0]= get_bits(&s->gb, 8);
996
997             switch(s->predictor){
998             case LEFT:
999             case PLANE:
1000                 decode_422_bitstream(s, width-2);
1001                 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1002                 if(!(s->flags&CODEC_FLAG_GRAY)){
1003                     leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1004                     leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1005                 }
1006
1007                 for(cy=y=1; y<s->height; y++,cy++){
1008                     uint8_t *ydst, *udst, *vdst;
1009
1010                     if(s->bitstream_bpp==12){
1011                         decode_gray_bitstream(s, width);
1012
1013                         ydst= p->data[0] + p->linesize[0]*y;
1014
1015                         lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1016                         if(s->predictor == PLANE){
1017                             if(y>s->interlaced)
1018                                 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1019                         }
1020                         y++;
1021                         if(y>=s->height) break;
1022                     }
1023
1024                     draw_slice(s, y);
1025
1026                     ydst= p->data[0] + p->linesize[0]*y;
1027                     udst= p->data[1] + p->linesize[1]*cy;
1028                     vdst= p->data[2] + p->linesize[2]*cy;
1029
1030                     decode_422_bitstream(s, width);
1031                     lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1032                     if(!(s->flags&CODEC_FLAG_GRAY)){
1033                         leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1034                         leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1035                     }
1036                     if(s->predictor == PLANE){
1037                         if(cy>s->interlaced){
1038                             s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1039                             if(!(s->flags&CODEC_FLAG_GRAY)){
1040                                 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1041                                 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1042                             }
1043                         }
1044                     }
1045                 }
1046                 draw_slice(s, height);
1047
1048                 break;
1049             case MEDIAN:
1050                 /* first line except first 2 pixels is left predicted */
1051                 decode_422_bitstream(s, width-2);
1052                 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1053                 if(!(s->flags&CODEC_FLAG_GRAY)){
1054                     leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1055                     leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1056                 }
1057
1058                 cy=y=1;
1059
1060                 /* second line is left predicted for interlaced case */
1061                 if(s->interlaced){
1062                     decode_422_bitstream(s, width);
1063                     lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1064                     if(!(s->flags&CODEC_FLAG_GRAY)){
1065                         leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1066                         leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1067                     }
1068                     y++; cy++;
1069                 }
1070
1071                 /* next 4 pixels are left predicted too */
1072                 decode_422_bitstream(s, 4);
1073                 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1074                 if(!(s->flags&CODEC_FLAG_GRAY)){
1075                     leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1076                     leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1077                 }
1078
1079                 /* next line except the first 4 pixels is median predicted */
1080                 lefttopy= p->data[0][3];
1081                 decode_422_bitstream(s, width-4);
1082                 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1083                 if(!(s->flags&CODEC_FLAG_GRAY)){
1084                     lefttopu= p->data[1][1];
1085                     lefttopv= p->data[2][1];
1086                     s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1087                     s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1088                 }
1089                 y++; cy++;
1090
1091                 for(; y<height; y++,cy++){
1092                     uint8_t *ydst, *udst, *vdst;
1093
1094                     if(s->bitstream_bpp==12){
1095                         while(2*cy > y){
1096                             decode_gray_bitstream(s, width);
1097                             ydst= p->data[0] + p->linesize[0]*y;
1098                             s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1099                             y++;
1100                         }
1101                         if(y>=height) break;
1102                     }
1103                     draw_slice(s, y);
1104
1105                     decode_422_bitstream(s, width);
1106
1107                     ydst= p->data[0] + p->linesize[0]*y;
1108                     udst= p->data[1] + p->linesize[1]*cy;
1109                     vdst= p->data[2] + p->linesize[2]*cy;
1110
1111                     s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1112                     if(!(s->flags&CODEC_FLAG_GRAY)){
1113                         s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1114                         s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1115                     }
1116                 }
1117
1118                 draw_slice(s, height);
1119                 break;
1120             }
1121         }
1122     }else{
1123         int y;
1124         int leftr, leftg, leftb, lefta;
1125         const int last_line= (height-1)*p->linesize[0];
1126
1127         if(s->bitstream_bpp==32){
1128             lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1129             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1130             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1131             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1132         }else{
1133             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1134             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1135             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1136             lefta= p->data[0][last_line+A]= 255;
1137             skip_bits(&s->gb, 8);
1138         }
1139
1140         if(s->bgr32){
1141             switch(s->predictor){
1142             case LEFT:
1143             case PLANE:
1144                 decode_bgr_bitstream(s, width-1);
1145                 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1146
1147                 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1148                     decode_bgr_bitstream(s, width);
1149
1150                     s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1151                     if(s->predictor == PLANE){
1152                         if(s->bitstream_bpp!=32) lefta=0;
1153                         if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1154                             s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1155                                              p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1156                         }
1157                     }
1158                 }
1159                 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1160                 break;
1161             default:
1162                 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1163             }
1164         }else{
1165
1166             av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1167             return -1;
1168         }
1169     }
1170     emms_c();
1171
1172     *picture= *p;
1173     *data_size = sizeof(AVFrame);
1174
1175     return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1176 }
1177 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1178
1179 static int common_end(HYuvContext *s){
1180     int i;
1181
1182     for(i=0; i<3; i++){
1183         av_freep(&s->temp[i]);
1184     }
1185     return 0;
1186 }
1187
1188 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1189 static av_cold int decode_end(AVCodecContext *avctx)
1190 {
1191     HYuvContext *s = avctx->priv_data;
1192     int i;
1193
1194     if (s->picture.data[0])
1195         avctx->release_buffer(avctx, &s->picture);
1196
1197     common_end(s);
1198     av_freep(&s->bitstream_buffer);
1199
1200     for(i=0; i<6; i++){
1201         free_vlc(&s->vlc[i]);
1202     }
1203
1204     return 0;
1205 }
1206 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1207
1208 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1209 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1210     HYuvContext *s = avctx->priv_data;
1211     AVFrame *pict = data;
1212     const int width= s->width;
1213     const int width2= s->width>>1;
1214     const int height= s->height;
1215     const int fake_ystride= s->interlaced ? pict->linesize[0]*2  : pict->linesize[0];
1216     const int fake_ustride= s->interlaced ? pict->linesize[1]*2  : pict->linesize[1];
1217     const int fake_vstride= s->interlaced ? pict->linesize[2]*2  : pict->linesize[2];
1218     AVFrame * const p= &s->picture;
1219     int i, j, size=0;
1220
1221     *p = *pict;
1222     p->pict_type= FF_I_TYPE;
1223     p->key_frame= 1;
1224
1225     if(s->context){
1226         for(i=0; i<3; i++){
1227             generate_len_table(s->len[i], s->stats[i], 256);
1228             if(generate_bits_table(s->bits[i], s->len[i])<0)
1229                 return -1;
1230             size+= store_table(s, s->len[i], &buf[size]);
1231         }
1232
1233         for(i=0; i<3; i++)
1234             for(j=0; j<256; j++)
1235                 s->stats[i][j] >>= 1;
1236     }
1237
1238     init_put_bits(&s->pb, buf+size, buf_size-size);
1239
1240     if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1241         int lefty, leftu, leftv, y, cy;
1242
1243         put_bits(&s->pb, 8, leftv= p->data[2][0]);
1244         put_bits(&s->pb, 8, lefty= p->data[0][1]);
1245         put_bits(&s->pb, 8, leftu= p->data[1][0]);
1246         put_bits(&s->pb, 8,        p->data[0][0]);
1247
1248         lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1249         leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1250         leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1251
1252         encode_422_bitstream(s, 2, width-2);
1253
1254         if(s->predictor==MEDIAN){
1255             int lefttopy, lefttopu, lefttopv;
1256             cy=y=1;
1257             if(s->interlaced){
1258                 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1259                 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1260                 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1261
1262                 encode_422_bitstream(s, 0, width);
1263                 y++; cy++;
1264             }
1265
1266             lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1267             leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1268             leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1269
1270             encode_422_bitstream(s, 0, 4);
1271
1272             lefttopy= p->data[0][3];
1273             lefttopu= p->data[1][1];
1274             lefttopv= p->data[2][1];
1275             s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1276             s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1277             s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1278             encode_422_bitstream(s, 0, width-4);
1279             y++; cy++;
1280
1281             for(; y<height; y++,cy++){
1282                 uint8_t *ydst, *udst, *vdst;
1283
1284                 if(s->bitstream_bpp==12){
1285                     while(2*cy > y){
1286                         ydst= p->data[0] + p->linesize[0]*y;
1287                         s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1288                         encode_gray_bitstream(s, width);
1289                         y++;
1290                     }
1291                     if(y>=height) break;
1292                 }
1293                 ydst= p->data[0] + p->linesize[0]*y;
1294                 udst= p->data[1] + p->linesize[1]*cy;
1295                 vdst= p->data[2] + p->linesize[2]*cy;
1296
1297                 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1298                 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1299                 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1300
1301                 encode_422_bitstream(s, 0, width);
1302             }
1303         }else{
1304             for(cy=y=1; y<height; y++,cy++){
1305                 uint8_t *ydst, *udst, *vdst;
1306
1307                 /* encode a luma only line & y++ */
1308                 if(s->bitstream_bpp==12){
1309                     ydst= p->data[0] + p->linesize[0]*y;
1310
1311                     if(s->predictor == PLANE && s->interlaced < y){
1312                         s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1313
1314                         lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1315                     }else{
1316                         lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1317                     }
1318                     encode_gray_bitstream(s, width);
1319                     y++;
1320                     if(y>=height) break;
1321                 }
1322
1323                 ydst= p->data[0] + p->linesize[0]*y;
1324                 udst= p->data[1] + p->linesize[1]*cy;
1325                 vdst= p->data[2] + p->linesize[2]*cy;
1326
1327                 if(s->predictor == PLANE && s->interlaced < cy){
1328                     s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1329                     s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1330                     s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1331
1332                     lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1333                     leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1334                     leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1335                 }else{
1336                     lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1337                     leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1338                     leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1339                 }
1340
1341                 encode_422_bitstream(s, 0, width);
1342             }
1343         }
1344     }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1345         uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1346         const int stride = -p->linesize[0];
1347         const int fake_stride = -fake_ystride;
1348         int y;
1349         int leftr, leftg, leftb;
1350
1351         put_bits(&s->pb, 8, leftr= data[R]);
1352         put_bits(&s->pb, 8, leftg= data[G]);
1353         put_bits(&s->pb, 8, leftb= data[B]);
1354         put_bits(&s->pb, 8, 0);
1355
1356         sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1357         encode_bgr_bitstream(s, width-1);
1358
1359         for(y=1; y<s->height; y++){
1360             uint8_t *dst = data + y*stride;
1361             if(s->predictor == PLANE && s->interlaced < y){
1362                 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1363                 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1364             }else{
1365                 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1366             }
1367             encode_bgr_bitstream(s, width);
1368         }
1369     }else{
1370         av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1371     }
1372     emms_c();
1373
1374     size+= (put_bits_count(&s->pb)+31)/8;
1375     put_bits(&s->pb, 16, 0);
1376     put_bits(&s->pb, 15, 0);
1377     size/= 4;
1378
1379     if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1380         int j;
1381         char *p= avctx->stats_out;
1382         char *end= p + 1024*30;
1383         for(i=0; i<3; i++){
1384             for(j=0; j<256; j++){
1385                 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1386                 p+= strlen(p);
1387                 s->stats[i][j]= 0;
1388             }
1389             snprintf(p, end-p, "\n");
1390             p++;
1391         }
1392     } else
1393         avctx->stats_out[0] = '\0';
1394     if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1395         flush_put_bits(&s->pb);
1396         s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1397     }
1398
1399     s->picture_number++;
1400
1401     return size*4;
1402 }
1403
1404 static av_cold int encode_end(AVCodecContext *avctx)
1405 {
1406     HYuvContext *s = avctx->priv_data;
1407
1408     common_end(s);
1409
1410     av_freep(&avctx->extradata);
1411     av_freep(&avctx->stats_out);
1412
1413     return 0;
1414 }
1415 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1416
1417 #if CONFIG_HUFFYUV_DECODER
1418 AVCodec huffyuv_decoder = {
1419     "huffyuv",
1420     AVMEDIA_TYPE_VIDEO,
1421     CODEC_ID_HUFFYUV,
1422     sizeof(HYuvContext),
1423     decode_init,
1424     NULL,
1425     decode_end,
1426     decode_frame,
1427     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1428     NULL,
1429     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1430 };
1431 #endif
1432
1433 #if CONFIG_FFVHUFF_DECODER
1434 AVCodec ffvhuff_decoder = {
1435     "ffvhuff",
1436     AVMEDIA_TYPE_VIDEO,
1437     CODEC_ID_FFVHUFF,
1438     sizeof(HYuvContext),
1439     decode_init,
1440     NULL,
1441     decode_end,
1442     decode_frame,
1443     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1444     NULL,
1445     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1446 };
1447 #endif
1448
1449 #if CONFIG_HUFFYUV_ENCODER
1450 AVCodec huffyuv_encoder = {
1451     "huffyuv",
1452     AVMEDIA_TYPE_VIDEO,
1453     CODEC_ID_HUFFYUV,
1454     sizeof(HYuvContext),
1455     encode_init,
1456     encode_frame,
1457     encode_end,
1458     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1459     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1460 };
1461 #endif
1462
1463 #if CONFIG_FFVHUFF_ENCODER
1464 AVCodec ffvhuff_encoder = {
1465     "ffvhuff",
1466     AVMEDIA_TYPE_VIDEO,
1467     CODEC_ID_FFVHUFF,
1468     sizeof(HYuvContext),
1469     encode_init,
1470     encode_frame,
1471     encode_end,
1472     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1473     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1474 };
1475 #endif