]> git.sesse.net Git - ffmpeg/blob - libavcodec/huffyuv.c
Add av_shrink_packet function for use in av_get_packet that reduces pkt->size
[ffmpeg] / libavcodec / huffyuv.c
1 /*
2  * huffyuv codec for libavcodec
3  *
4  * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25
26 /**
27  * @file libavcodec/huffyuv.c
28  * huffyuv codec for libavcodec.
29  */
30
31 #include "avcodec.h"
32 #include "bitstream.h"
33 #include "dsputil.h"
34
35 #define VLC_BITS 11
36
37 #ifdef WORDS_BIGENDIAN
38 #define B 3
39 #define G 2
40 #define R 1
41 #else
42 #define B 0
43 #define G 1
44 #define R 2
45 #endif
46
47 typedef enum Predictor{
48     LEFT= 0,
49     PLANE,
50     MEDIAN,
51 } Predictor;
52
53 typedef struct HYuvContext{
54     AVCodecContext *avctx;
55     Predictor predictor;
56     GetBitContext gb;
57     PutBitContext pb;
58     int interlaced;
59     int decorrelate;
60     int bitstream_bpp;
61     int version;
62     int yuy2;                               //use yuy2 instead of 422P
63     int bgr32;                              //use bgr32 instead of bgr24
64     int width, height;
65     int flags;
66     int context;
67     int picture_number;
68     int last_slice_end;
69     uint8_t *temp[3];
70     uint64_t stats[3][256];
71     uint8_t len[3][256];
72     uint32_t bits[3][256];
73     uint32_t pix_bgr_map[1<<VLC_BITS];
74     VLC vlc[6];                             //Y,U,V,YY,YU,YV
75     AVFrame picture;
76     uint8_t *bitstream_buffer;
77     unsigned int bitstream_buffer_size;
78     DSPContext dsp;
79 }HYuvContext;
80
81 static const unsigned char classic_shift_luma[] = {
82   34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
83   16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
84   69,68, 0
85 };
86
87 static const unsigned char classic_shift_chroma[] = {
88   66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
89   56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
90   214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
91 };
92
93 static const unsigned char classic_add_luma[256] = {
94     3,  9,  5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
95    73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
96    68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
97    35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
98    37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
99    35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
100    27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
101    15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
102    12, 17, 19, 13,  4,  9,  2, 11,  1,  7,  8,  0, 16,  3, 14,  6,
103    12, 10,  5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
104    18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
105    28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
106    28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
107    62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
108    54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
109    46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13,  7,  8,
110 };
111
112 static const unsigned char classic_add_chroma[256] = {
113     3,  1,  2,  2,  2,  2,  3,  3,  7,  5,  7,  5,  8,  6, 11,  9,
114     7, 13, 11, 10,  9,  8,  7,  5,  9,  7,  6,  4,  7,  5,  8,  7,
115    11,  8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
116    43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
117   143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
118    80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
119    17, 14,  5,  6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
120   112,113,114,115,  4,117,118, 92, 94,121,122,  3,124,103,  2,  1,
121     0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
122   135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
123    52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
124    19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10,  9,  8, 36,
125     7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
126    83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
127    14, 16, 17, 18, 20, 21, 12, 14, 15,  9, 10,  6,  9,  6,  5,  8,
128     6, 12,  8, 10,  7,  9,  6,  4,  6,  2,  2,  3,  3,  3,  3,  2,
129 };
130
131 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
132     int i;
133
134     for(i=0; i<w-1; i++){
135         acc+= src[i];
136         dst[i]= acc;
137         i++;
138         acc+= src[i];
139         dst[i]= acc;
140     }
141
142     for(; i<w; i++){
143         acc+= src[i];
144         dst[i]= acc;
145     }
146
147     return acc;
148 }
149
150 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
151     int i;
152     int r,g,b;
153     r= *red;
154     g= *green;
155     b= *blue;
156
157     for(i=0; i<w; i++){
158         b+= src[4*i+B];
159         g+= src[4*i+G];
160         r+= src[4*i+R];
161
162         dst[4*i+B]= b;
163         dst[4*i+G]= g;
164         dst[4*i+R]= r;
165     }
166
167     *red= r;
168     *green= g;
169     *blue= b;
170 }
171
172 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
173     int i;
174     if(w<32){
175         for(i=0; i<w; i++){
176             const int temp= src[i];
177             dst[i]= temp - left;
178             left= temp;
179         }
180         return left;
181     }else{
182         for(i=0; i<16; i++){
183             const int temp= src[i];
184             dst[i]= temp - left;
185             left= temp;
186         }
187         s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
188         return src[w-1];
189     }
190 }
191
192 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
193     int i;
194     int r,g,b;
195     r= *red;
196     g= *green;
197     b= *blue;
198     for(i=0; i<FFMIN(w,4); i++){
199         const int rt= src[i*4+R];
200         const int gt= src[i*4+G];
201         const int bt= src[i*4+B];
202         dst[i*4+R]= rt - r;
203         dst[i*4+G]= gt - g;
204         dst[i*4+B]= bt - b;
205         r = rt;
206         g = gt;
207         b = bt;
208     }
209     s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
210     *red=   src[(w-1)*4+R];
211     *green= src[(w-1)*4+G];
212     *blue=  src[(w-1)*4+B];
213 }
214
215 static void read_len_table(uint8_t *dst, GetBitContext *gb){
216     int i, val, repeat;
217
218     for(i=0; i<256;){
219         repeat= get_bits(gb, 3);
220         val   = get_bits(gb, 5);
221         if(repeat==0)
222             repeat= get_bits(gb, 8);
223 //printf("%d %d\n", val, repeat);
224         while (repeat--)
225             dst[i++] = val;
226     }
227 }
228
229 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
230     int len, index;
231     uint32_t bits=0;
232
233     for(len=32; len>0; len--){
234         for(index=0; index<256; index++){
235             if(len_table[index]==len)
236                 dst[index]= bits++;
237         }
238         if(bits & 1){
239             av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
240             return -1;
241         }
242         bits >>= 1;
243     }
244     return 0;
245 }
246
247 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
248 typedef struct {
249     uint64_t val;
250     int name;
251 } HeapElem;
252
253 static void heap_sift(HeapElem *h, int root, int size)
254 {
255     while(root*2+1 < size) {
256         int child = root*2+1;
257         if(child < size-1 && h[child].val > h[child+1].val)
258             child++;
259         if(h[root].val > h[child].val) {
260             FFSWAP(HeapElem, h[root], h[child]);
261             root = child;
262         } else
263             break;
264     }
265 }
266
267 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
268     HeapElem h[size];
269     int up[2*size];
270     int len[2*size];
271     int offset, i, next;
272
273     for(offset=1; ; offset<<=1){
274         for(i=0; i<size; i++){
275             h[i].name = i;
276             h[i].val = (stats[i] << 8) + offset;
277         }
278         for(i=size/2-1; i>=0; i--)
279             heap_sift(h, i, size);
280
281         for(next=size; next<size*2-1; next++){
282             // merge the two smallest entries, and put it back in the heap
283             uint64_t min1v = h[0].val;
284             up[h[0].name] = next;
285             h[0].val = INT64_MAX;
286             heap_sift(h, 0, size);
287             up[h[0].name] = next;
288             h[0].name = next;
289             h[0].val += min1v;
290             heap_sift(h, 0, size);
291         }
292
293         len[2*size-2] = 0;
294         for(i=2*size-3; i>=size; i--)
295             len[i] = len[up[i]] + 1;
296         for(i=0; i<size; i++) {
297             dst[i] = len[up[i]] + 1;
298             if(dst[i] >= 32) break;
299         }
300         if(i==size) break;
301     }
302 }
303 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
304
305 static void generate_joint_tables(HYuvContext *s){
306     uint16_t symbols[1<<VLC_BITS];
307     uint16_t bits[1<<VLC_BITS];
308     uint8_t len[1<<VLC_BITS];
309     if(s->bitstream_bpp < 24){
310         int p, i, y, u;
311         for(p=0; p<3; p++){
312             for(i=y=0; y<256; y++){
313                 int len0 = s->len[0][y];
314                 int limit = VLC_BITS - len0;
315                 if(limit <= 0)
316                     continue;
317                 for(u=0; u<256; u++){
318                     int len1 = s->len[p][u];
319                     if(len1 > limit)
320                         continue;
321                     len[i] = len0 + len1;
322                     bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
323                     symbols[i] = (y<<8) + u;
324                     if(symbols[i] != 0xffff) // reserved to mean "invalid"
325                         i++;
326                 }
327             }
328             free_vlc(&s->vlc[3+p]);
329             init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
330         }
331     }else{
332         uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
333         int i, b, g, r, code;
334         int p0 = s->decorrelate;
335         int p1 = !s->decorrelate;
336         // restrict the range to +/-16 becaues that's pretty much guaranteed to
337         // cover all the combinations that fit in 11 bits total, and it doesn't
338         // matter if we miss a few rare codes.
339         for(i=0, g=-16; g<16; g++){
340             int len0 = s->len[p0][g&255];
341             int limit0 = VLC_BITS - len0;
342             if(limit0 < 2)
343                 continue;
344             for(b=-16; b<16; b++){
345                 int len1 = s->len[p1][b&255];
346                 int limit1 = limit0 - len1;
347                 if(limit1 < 1)
348                     continue;
349                 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
350                 for(r=-16; r<16; r++){
351                     int len2 = s->len[2][r&255];
352                     if(len2 > limit1)
353                         continue;
354                     len[i] = len0 + len1 + len2;
355                     bits[i] = (code << len2) + s->bits[2][r&255];
356                     if(s->decorrelate){
357                         map[i][G] = g;
358                         map[i][B] = g+b;
359                         map[i][R] = g+r;
360                     }else{
361                         map[i][B] = g;
362                         map[i][G] = b;
363                         map[i][R] = r;
364                     }
365                     i++;
366                 }
367             }
368         }
369         free_vlc(&s->vlc[3]);
370         init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
371     }
372 }
373
374 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
375     GetBitContext gb;
376     int i;
377
378     init_get_bits(&gb, src, length*8);
379
380     for(i=0; i<3; i++){
381         read_len_table(s->len[i], &gb);
382
383         if(generate_bits_table(s->bits[i], s->len[i])<0){
384             return -1;
385         }
386 #if 0
387 for(j=0; j<256; j++){
388 printf("%6X, %2d,  %3d\n", s->bits[i][j], s->len[i][j], j);
389 }
390 #endif
391         free_vlc(&s->vlc[i]);
392         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
393     }
394
395     generate_joint_tables(s);
396
397     return (get_bits_count(&gb)+7)/8;
398 }
399
400 static int read_old_huffman_tables(HYuvContext *s){
401 #if 1
402     GetBitContext gb;
403     int i;
404
405     init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
406     read_len_table(s->len[0], &gb);
407     init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
408     read_len_table(s->len[1], &gb);
409
410     for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma  [i];
411     for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
412
413     if(s->bitstream_bpp >= 24){
414         memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
415         memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
416     }
417     memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
418     memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
419
420     for(i=0; i<3; i++){
421         free_vlc(&s->vlc[i]);
422         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
423     }
424
425     generate_joint_tables(s);
426
427     return 0;
428 #else
429     av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
430     return -1;
431 #endif
432 }
433
434 static av_cold void alloc_temp(HYuvContext *s){
435     int i;
436
437     if(s->bitstream_bpp<24){
438         for(i=0; i<3; i++){
439             s->temp[i]= av_malloc(s->width + 16);
440         }
441     }else{
442         for(i=0; i<2; i++){
443             s->temp[i]= av_malloc(4*s->width + 16);
444         }
445     }
446 }
447
448 static av_cold int common_init(AVCodecContext *avctx){
449     HYuvContext *s = avctx->priv_data;
450
451     s->avctx= avctx;
452     s->flags= avctx->flags;
453
454     dsputil_init(&s->dsp, avctx);
455
456     s->width= avctx->width;
457     s->height= avctx->height;
458     assert(s->width>0 && s->height>0);
459
460     return 0;
461 }
462
463 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
464 static av_cold int decode_init(AVCodecContext *avctx)
465 {
466     HYuvContext *s = avctx->priv_data;
467
468     common_init(avctx);
469     memset(s->vlc, 0, 3*sizeof(VLC));
470
471     avctx->coded_frame= &s->picture;
472     s->interlaced= s->height > 288;
473
474 s->bgr32=1;
475 //if(avctx->extradata)
476 //  printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
477     if(avctx->extradata_size){
478         if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
479             s->version=1; // do such files exist at all?
480         else
481             s->version=2;
482     }else
483         s->version=0;
484
485     if(s->version==2){
486         int method, interlace;
487
488         method= ((uint8_t*)avctx->extradata)[0];
489         s->decorrelate= method&64 ? 1 : 0;
490         s->predictor= method&63;
491         s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
492         if(s->bitstream_bpp==0)
493             s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
494         interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
495         s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
496         s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
497
498         if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
499             return -1;
500     }else{
501         switch(avctx->bits_per_coded_sample&7){
502         case 1:
503             s->predictor= LEFT;
504             s->decorrelate= 0;
505             break;
506         case 2:
507             s->predictor= LEFT;
508             s->decorrelate= 1;
509             break;
510         case 3:
511             s->predictor= PLANE;
512             s->decorrelate= avctx->bits_per_coded_sample >= 24;
513             break;
514         case 4:
515             s->predictor= MEDIAN;
516             s->decorrelate= 0;
517             break;
518         default:
519             s->predictor= LEFT; //OLD
520             s->decorrelate= 0;
521             break;
522         }
523         s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
524         s->context= 0;
525
526         if(read_old_huffman_tables(s) < 0)
527             return -1;
528     }
529
530     switch(s->bitstream_bpp){
531     case 12:
532         avctx->pix_fmt = PIX_FMT_YUV420P;
533         break;
534     case 16:
535         if(s->yuy2){
536             avctx->pix_fmt = PIX_FMT_YUYV422;
537         }else{
538             avctx->pix_fmt = PIX_FMT_YUV422P;
539         }
540         break;
541     case 24:
542     case 32:
543         if(s->bgr32){
544             avctx->pix_fmt = PIX_FMT_RGB32;
545         }else{
546             avctx->pix_fmt = PIX_FMT_BGR24;
547         }
548         break;
549     default:
550         assert(0);
551     }
552
553     alloc_temp(s);
554
555 //    av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
556
557     return 0;
558 }
559 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
560
561 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
562 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
563     int i;
564     int index= 0;
565
566     for(i=0; i<256;){
567         int val= len[i];
568         int repeat=0;
569
570         for(; i<256 && len[i]==val && repeat<255; i++)
571             repeat++;
572
573         assert(val < 32 && val >0 && repeat<256 && repeat>0);
574         if(repeat>7){
575             buf[index++]= val;
576             buf[index++]= repeat;
577         }else{
578             buf[index++]= val | (repeat<<5);
579         }
580     }
581
582     return index;
583 }
584
585 static av_cold int encode_init(AVCodecContext *avctx)
586 {
587     HYuvContext *s = avctx->priv_data;
588     int i, j;
589
590     common_init(avctx);
591
592     avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
593     avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
594     s->version=2;
595
596     avctx->coded_frame= &s->picture;
597
598     switch(avctx->pix_fmt){
599     case PIX_FMT_YUV420P:
600         s->bitstream_bpp= 12;
601         break;
602     case PIX_FMT_YUV422P:
603         s->bitstream_bpp= 16;
604         break;
605     case PIX_FMT_RGB32:
606         s->bitstream_bpp= 24;
607         break;
608     default:
609         av_log(avctx, AV_LOG_ERROR, "format not supported\n");
610         return -1;
611     }
612     avctx->bits_per_coded_sample= s->bitstream_bpp;
613     s->decorrelate= s->bitstream_bpp >= 24;
614     s->predictor= avctx->prediction_method;
615     s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
616     if(avctx->context_model==1){
617         s->context= avctx->context_model;
618         if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
619             av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
620             return -1;
621         }
622     }else s->context= 0;
623
624     if(avctx->codec->id==CODEC_ID_HUFFYUV){
625         if(avctx->pix_fmt==PIX_FMT_YUV420P){
626             av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
627             return -1;
628         }
629         if(avctx->context_model){
630             av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
631             return -1;
632         }
633         if(s->interlaced != ( s->height > 288 ))
634             av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
635     }
636
637     if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
638         av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
639         return -1;
640     }
641
642     ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
643     ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
644     ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
645     if(s->context)
646         ((uint8_t*)avctx->extradata)[2]|= 0x40;
647     ((uint8_t*)avctx->extradata)[3]= 0;
648     s->avctx->extradata_size= 4;
649
650     if(avctx->stats_in){
651         char *p= avctx->stats_in;
652
653         for(i=0; i<3; i++)
654             for(j=0; j<256; j++)
655                 s->stats[i][j]= 1;
656
657         for(;;){
658             for(i=0; i<3; i++){
659                 char *next;
660
661                 for(j=0; j<256; j++){
662                     s->stats[i][j]+= strtol(p, &next, 0);
663                     if(next==p) return -1;
664                     p=next;
665                 }
666             }
667             if(p[0]==0 || p[1]==0 || p[2]==0) break;
668         }
669     }else{
670         for(i=0; i<3; i++)
671             for(j=0; j<256; j++){
672                 int d= FFMIN(j, 256-j);
673
674                 s->stats[i][j]= 100000000/(d+1);
675             }
676     }
677
678     for(i=0; i<3; i++){
679         generate_len_table(s->len[i], s->stats[i], 256);
680
681         if(generate_bits_table(s->bits[i], s->len[i])<0){
682             return -1;
683         }
684
685         s->avctx->extradata_size+=
686         store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
687     }
688
689     if(s->context){
690         for(i=0; i<3; i++){
691             int pels = s->width*s->height / (i?40:10);
692             for(j=0; j<256; j++){
693                 int d= FFMIN(j, 256-j);
694                 s->stats[i][j]= pels/(d+1);
695             }
696         }
697     }else{
698         for(i=0; i<3; i++)
699             for(j=0; j<256; j++)
700                 s->stats[i][j]= 0;
701     }
702
703 //    printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
704
705     alloc_temp(s);
706
707     s->picture_number=0;
708
709     return 0;
710 }
711 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
712
713 /* TODO instead of restarting the read when the code isn't in the first level
714  * of the joint table, jump into the 2nd level of the individual table. */
715 #define READ_2PIX(dst0, dst1, plane1){\
716     uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
717     if(code != 0xffff){\
718         dst0 = code>>8;\
719         dst1 = code;\
720     }else{\
721         dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
722         dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
723     }\
724 }
725
726 static void decode_422_bitstream(HYuvContext *s, int count){
727     int i;
728
729     count/=2;
730
731     for(i=0; i<count; i++){
732         READ_2PIX(s->temp[0][2*i  ], s->temp[1][i], 1);
733         READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
734     }
735 }
736
737 static void decode_gray_bitstream(HYuvContext *s, int count){
738     int i;
739
740     count/=2;
741
742     for(i=0; i<count; i++){
743         READ_2PIX(s->temp[0][2*i  ], s->temp[0][2*i+1], 0);
744     }
745 }
746
747 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
748 static int encode_422_bitstream(HYuvContext *s, int count){
749     int i;
750
751     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
752         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
753         return -1;
754     }
755
756 #define LOAD4\
757             int y0 = s->temp[0][2*i];\
758             int y1 = s->temp[0][2*i+1];\
759             int u0 = s->temp[1][i];\
760             int v0 = s->temp[2][i];
761
762     count/=2;
763     if(s->flags&CODEC_FLAG_PASS1){
764         for(i=0; i<count; i++){
765             LOAD4;
766             s->stats[0][y0]++;
767             s->stats[1][u0]++;
768             s->stats[0][y1]++;
769             s->stats[2][v0]++;
770         }
771     }
772     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
773         return 0;
774     if(s->context){
775         for(i=0; i<count; i++){
776             LOAD4;
777             s->stats[0][y0]++;
778             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
779             s->stats[1][u0]++;
780             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
781             s->stats[0][y1]++;
782             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
783             s->stats[2][v0]++;
784             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
785         }
786     }else{
787         for(i=0; i<count; i++){
788             LOAD4;
789             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
790             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
791             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
792             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
793         }
794     }
795     return 0;
796 }
797
798 static int encode_gray_bitstream(HYuvContext *s, int count){
799     int i;
800
801     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
802         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
803         return -1;
804     }
805
806 #define LOAD2\
807             int y0 = s->temp[0][2*i];\
808             int y1 = s->temp[0][2*i+1];
809 #define STAT2\
810             s->stats[0][y0]++;\
811             s->stats[0][y1]++;
812 #define WRITE2\
813             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
814             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
815
816     count/=2;
817     if(s->flags&CODEC_FLAG_PASS1){
818         for(i=0; i<count; i++){
819             LOAD2;
820             STAT2;
821         }
822     }
823     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
824         return 0;
825
826     if(s->context){
827         for(i=0; i<count; i++){
828             LOAD2;
829             STAT2;
830             WRITE2;
831         }
832     }else{
833         for(i=0; i<count; i++){
834             LOAD2;
835             WRITE2;
836         }
837     }
838     return 0;
839 }
840 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
841
842 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
843     int i;
844     for(i=0; i<count; i++){
845         int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
846         if(code != -1){
847             *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
848         }else if(decorrelate){
849             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
850             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
851             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
852         }else{
853             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
854             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
855             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
856         }
857         if(alpha)
858             get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
859     }
860 }
861
862 static void decode_bgr_bitstream(HYuvContext *s, int count){
863     if(s->decorrelate){
864         if(s->bitstream_bpp==24)
865             decode_bgr_1(s, count, 1, 0);
866         else
867             decode_bgr_1(s, count, 1, 1);
868     }else{
869         if(s->bitstream_bpp==24)
870             decode_bgr_1(s, count, 0, 0);
871         else
872             decode_bgr_1(s, count, 0, 1);
873     }
874 }
875
876 static int encode_bgr_bitstream(HYuvContext *s, int count){
877     int i;
878
879     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
880         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
881         return -1;
882     }
883
884 #define LOAD3\
885             int g= s->temp[0][4*i+G];\
886             int b= (s->temp[0][4*i+B] - g) & 0xff;\
887             int r= (s->temp[0][4*i+R] - g) & 0xff;
888 #define STAT3\
889             s->stats[0][b]++;\
890             s->stats[1][g]++;\
891             s->stats[2][r]++;
892 #define WRITE3\
893             put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
894             put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
895             put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
896
897     if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
898         for(i=0; i<count; i++){
899             LOAD3;
900             STAT3;
901         }
902     }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
903         for(i=0; i<count; i++){
904             LOAD3;
905             STAT3;
906             WRITE3;
907         }
908     }else{
909         for(i=0; i<count; i++){
910             LOAD3;
911             WRITE3;
912         }
913     }
914     return 0;
915 }
916
917 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
918 static void draw_slice(HYuvContext *s, int y){
919     int h, cy;
920     int offset[4];
921
922     if(s->avctx->draw_horiz_band==NULL)
923         return;
924
925     h= y - s->last_slice_end;
926     y -= h;
927
928     if(s->bitstream_bpp==12){
929         cy= y>>1;
930     }else{
931         cy= y;
932     }
933
934     offset[0] = s->picture.linesize[0]*y;
935     offset[1] = s->picture.linesize[1]*cy;
936     offset[2] = s->picture.linesize[2]*cy;
937     offset[3] = 0;
938     emms_c();
939
940     s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
941
942     s->last_slice_end= y + h;
943 }
944
945 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
946     const uint8_t *buf = avpkt->data;
947     int buf_size = avpkt->size;
948     HYuvContext *s = avctx->priv_data;
949     const int width= s->width;
950     const int width2= s->width>>1;
951     const int height= s->height;
952     int fake_ystride, fake_ustride, fake_vstride;
953     AVFrame * const p= &s->picture;
954     int table_size= 0;
955
956     AVFrame *picture = data;
957
958     s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
959
960     s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
961
962     if(p->data[0])
963         avctx->release_buffer(avctx, p);
964
965     p->reference= 0;
966     if(avctx->get_buffer(avctx, p) < 0){
967         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
968         return -1;
969     }
970
971     if(s->context){
972         table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
973         if(table_size < 0)
974             return -1;
975     }
976
977     if((unsigned)(buf_size-table_size) >= INT_MAX/8)
978         return -1;
979
980     init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
981
982     fake_ystride= s->interlaced ? p->linesize[0]*2  : p->linesize[0];
983     fake_ustride= s->interlaced ? p->linesize[1]*2  : p->linesize[1];
984     fake_vstride= s->interlaced ? p->linesize[2]*2  : p->linesize[2];
985
986     s->last_slice_end= 0;
987
988     if(s->bitstream_bpp<24){
989         int y, cy;
990         int lefty, leftu, leftv;
991         int lefttopy, lefttopu, lefttopv;
992
993         if(s->yuy2){
994             p->data[0][3]= get_bits(&s->gb, 8);
995             p->data[0][2]= get_bits(&s->gb, 8);
996             p->data[0][1]= get_bits(&s->gb, 8);
997             p->data[0][0]= get_bits(&s->gb, 8);
998
999             av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1000             return -1;
1001         }else{
1002
1003             leftv= p->data[2][0]= get_bits(&s->gb, 8);
1004             lefty= p->data[0][1]= get_bits(&s->gb, 8);
1005             leftu= p->data[1][0]= get_bits(&s->gb, 8);
1006                    p->data[0][0]= get_bits(&s->gb, 8);
1007
1008             switch(s->predictor){
1009             case LEFT:
1010             case PLANE:
1011                 decode_422_bitstream(s, width-2);
1012                 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1013                 if(!(s->flags&CODEC_FLAG_GRAY)){
1014                     leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1015                     leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1016                 }
1017
1018                 for(cy=y=1; y<s->height; y++,cy++){
1019                     uint8_t *ydst, *udst, *vdst;
1020
1021                     if(s->bitstream_bpp==12){
1022                         decode_gray_bitstream(s, width);
1023
1024                         ydst= p->data[0] + p->linesize[0]*y;
1025
1026                         lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1027                         if(s->predictor == PLANE){
1028                             if(y>s->interlaced)
1029                                 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1030                         }
1031                         y++;
1032                         if(y>=s->height) break;
1033                     }
1034
1035                     draw_slice(s, y);
1036
1037                     ydst= p->data[0] + p->linesize[0]*y;
1038                     udst= p->data[1] + p->linesize[1]*cy;
1039                     vdst= p->data[2] + p->linesize[2]*cy;
1040
1041                     decode_422_bitstream(s, width);
1042                     lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
1043                     if(!(s->flags&CODEC_FLAG_GRAY)){
1044                         leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
1045                         leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
1046                     }
1047                     if(s->predictor == PLANE){
1048                         if(cy>s->interlaced){
1049                             s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1050                             if(!(s->flags&CODEC_FLAG_GRAY)){
1051                                 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1052                                 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1053                             }
1054                         }
1055                     }
1056                 }
1057                 draw_slice(s, height);
1058
1059                 break;
1060             case MEDIAN:
1061                 /* first line except first 2 pixels is left predicted */
1062                 decode_422_bitstream(s, width-2);
1063                 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1064                 if(!(s->flags&CODEC_FLAG_GRAY)){
1065                     leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1066                     leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1067                 }
1068
1069                 cy=y=1;
1070
1071                 /* second line is left predicted for interlaced case */
1072                 if(s->interlaced){
1073                     decode_422_bitstream(s, width);
1074                     lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1075                     if(!(s->flags&CODEC_FLAG_GRAY)){
1076                         leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1077                         leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1078                     }
1079                     y++; cy++;
1080                 }
1081
1082                 /* next 4 pixels are left predicted too */
1083                 decode_422_bitstream(s, 4);
1084                 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1085                 if(!(s->flags&CODEC_FLAG_GRAY)){
1086                     leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1087                     leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1088                 }
1089
1090                 /* next line except the first 4 pixels is median predicted */
1091                 lefttopy= p->data[0][3];
1092                 decode_422_bitstream(s, width-4);
1093                 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1094                 if(!(s->flags&CODEC_FLAG_GRAY)){
1095                     lefttopu= p->data[1][1];
1096                     lefttopv= p->data[2][1];
1097                     s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1098                     s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1099                 }
1100                 y++; cy++;
1101
1102                 for(; y<height; y++,cy++){
1103                     uint8_t *ydst, *udst, *vdst;
1104
1105                     if(s->bitstream_bpp==12){
1106                         while(2*cy > y){
1107                             decode_gray_bitstream(s, width);
1108                             ydst= p->data[0] + p->linesize[0]*y;
1109                             s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1110                             y++;
1111                         }
1112                         if(y>=height) break;
1113                     }
1114                     draw_slice(s, y);
1115
1116                     decode_422_bitstream(s, width);
1117
1118                     ydst= p->data[0] + p->linesize[0]*y;
1119                     udst= p->data[1] + p->linesize[1]*cy;
1120                     vdst= p->data[2] + p->linesize[2]*cy;
1121
1122                     s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1123                     if(!(s->flags&CODEC_FLAG_GRAY)){
1124                         s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1125                         s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1126                     }
1127                 }
1128
1129                 draw_slice(s, height);
1130                 break;
1131             }
1132         }
1133     }else{
1134         int y;
1135         int leftr, leftg, leftb;
1136         const int last_line= (height-1)*p->linesize[0];
1137
1138         if(s->bitstream_bpp==32){
1139             skip_bits(&s->gb, 8);
1140             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1141             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1142             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1143         }else{
1144             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1145             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1146             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1147             skip_bits(&s->gb, 8);
1148         }
1149
1150         if(s->bgr32){
1151             switch(s->predictor){
1152             case LEFT:
1153             case PLANE:
1154                 decode_bgr_bitstream(s, width-1);
1155                 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
1156
1157                 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1158                     decode_bgr_bitstream(s, width);
1159
1160                     add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1161                     if(s->predictor == PLANE){
1162                         if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1163                             s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1164                                              p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1165                         }
1166                     }
1167                 }
1168                 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1169                 break;
1170             default:
1171                 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1172             }
1173         }else{
1174
1175             av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1176             return -1;
1177         }
1178     }
1179     emms_c();
1180
1181     *picture= *p;
1182     *data_size = sizeof(AVFrame);
1183
1184     return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1185 }
1186 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1187
1188 static int common_end(HYuvContext *s){
1189     int i;
1190
1191     for(i=0; i<3; i++){
1192         av_freep(&s->temp[i]);
1193     }
1194     return 0;
1195 }
1196
1197 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1198 static av_cold int decode_end(AVCodecContext *avctx)
1199 {
1200     HYuvContext *s = avctx->priv_data;
1201     int i;
1202
1203     common_end(s);
1204     av_freep(&s->bitstream_buffer);
1205
1206     for(i=0; i<6; i++){
1207         free_vlc(&s->vlc[i]);
1208     }
1209
1210     return 0;
1211 }
1212 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1213
1214 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1215 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1216     HYuvContext *s = avctx->priv_data;
1217     AVFrame *pict = data;
1218     const int width= s->width;
1219     const int width2= s->width>>1;
1220     const int height= s->height;
1221     const int fake_ystride= s->interlaced ? pict->linesize[0]*2  : pict->linesize[0];
1222     const int fake_ustride= s->interlaced ? pict->linesize[1]*2  : pict->linesize[1];
1223     const int fake_vstride= s->interlaced ? pict->linesize[2]*2  : pict->linesize[2];
1224     AVFrame * const p= &s->picture;
1225     int i, j, size=0;
1226
1227     *p = *pict;
1228     p->pict_type= FF_I_TYPE;
1229     p->key_frame= 1;
1230
1231     if(s->context){
1232         for(i=0; i<3; i++){
1233             generate_len_table(s->len[i], s->stats[i], 256);
1234             if(generate_bits_table(s->bits[i], s->len[i])<0)
1235                 return -1;
1236             size+= store_table(s, s->len[i], &buf[size]);
1237         }
1238
1239         for(i=0; i<3; i++)
1240             for(j=0; j<256; j++)
1241                 s->stats[i][j] >>= 1;
1242     }
1243
1244     init_put_bits(&s->pb, buf+size, buf_size-size);
1245
1246     if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1247         int lefty, leftu, leftv, y, cy;
1248
1249         put_bits(&s->pb, 8, leftv= p->data[2][0]);
1250         put_bits(&s->pb, 8, lefty= p->data[0][1]);
1251         put_bits(&s->pb, 8, leftu= p->data[1][0]);
1252         put_bits(&s->pb, 8,        p->data[0][0]);
1253
1254         lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1255         leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1256         leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1257
1258         encode_422_bitstream(s, width-2);
1259
1260         if(s->predictor==MEDIAN){
1261             int lefttopy, lefttopu, lefttopv;
1262             cy=y=1;
1263             if(s->interlaced){
1264                 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1265                 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1266                 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1267
1268                 encode_422_bitstream(s, width);
1269                 y++; cy++;
1270             }
1271
1272             lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1273             leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1274             leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1275
1276             encode_422_bitstream(s, 4);
1277
1278             lefttopy= p->data[0][3];
1279             lefttopu= p->data[1][1];
1280             lefttopv= p->data[2][1];
1281             s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1282             s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1283             s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1284             encode_422_bitstream(s, width-4);
1285             y++; cy++;
1286
1287             for(; y<height; y++,cy++){
1288                 uint8_t *ydst, *udst, *vdst;
1289
1290                 if(s->bitstream_bpp==12){
1291                     while(2*cy > y){
1292                         ydst= p->data[0] + p->linesize[0]*y;
1293                         s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1294                         encode_gray_bitstream(s, width);
1295                         y++;
1296                     }
1297                     if(y>=height) break;
1298                 }
1299                 ydst= p->data[0] + p->linesize[0]*y;
1300                 udst= p->data[1] + p->linesize[1]*cy;
1301                 vdst= p->data[2] + p->linesize[2]*cy;
1302
1303                 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1304                 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1305                 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1306
1307                 encode_422_bitstream(s, width);
1308             }
1309         }else{
1310             for(cy=y=1; y<height; y++,cy++){
1311                 uint8_t *ydst, *udst, *vdst;
1312
1313                 /* encode a luma only line & y++ */
1314                 if(s->bitstream_bpp==12){
1315                     ydst= p->data[0] + p->linesize[0]*y;
1316
1317                     if(s->predictor == PLANE && s->interlaced < y){
1318                         s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1319
1320                         lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1321                     }else{
1322                         lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1323                     }
1324                     encode_gray_bitstream(s, width);
1325                     y++;
1326                     if(y>=height) break;
1327                 }
1328
1329                 ydst= p->data[0] + p->linesize[0]*y;
1330                 udst= p->data[1] + p->linesize[1]*cy;
1331                 vdst= p->data[2] + p->linesize[2]*cy;
1332
1333                 if(s->predictor == PLANE && s->interlaced < cy){
1334                     s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1335                     s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1336                     s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1337
1338                     lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1339                     leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1340                     leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1341                 }else{
1342                     lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1343                     leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1344                     leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1345                 }
1346
1347                 encode_422_bitstream(s, width);
1348             }
1349         }
1350     }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1351         uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1352         const int stride = -p->linesize[0];
1353         const int fake_stride = -fake_ystride;
1354         int y;
1355         int leftr, leftg, leftb;
1356
1357         put_bits(&s->pb, 8, leftr= data[R]);
1358         put_bits(&s->pb, 8, leftg= data[G]);
1359         put_bits(&s->pb, 8, leftb= data[B]);
1360         put_bits(&s->pb, 8, 0);
1361
1362         sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1363         encode_bgr_bitstream(s, width-1);
1364
1365         for(y=1; y<s->height; y++){
1366             uint8_t *dst = data + y*stride;
1367             if(s->predictor == PLANE && s->interlaced < y){
1368                 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1369                 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1370             }else{
1371                 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1372             }
1373             encode_bgr_bitstream(s, width);
1374         }
1375     }else{
1376         av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1377     }
1378     emms_c();
1379
1380     size+= (put_bits_count(&s->pb)+31)/8;
1381     put_bits(&s->pb, 16, 0);
1382     put_bits(&s->pb, 15, 0);
1383     size/= 4;
1384
1385     if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1386         int j;
1387         char *p= avctx->stats_out;
1388         char *end= p + 1024*30;
1389         for(i=0; i<3; i++){
1390             for(j=0; j<256; j++){
1391                 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1392                 p+= strlen(p);
1393                 s->stats[i][j]= 0;
1394             }
1395             snprintf(p, end-p, "\n");
1396             p++;
1397         }
1398     } else
1399         avctx->stats_out[0] = '\0';
1400     if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1401         flush_put_bits(&s->pb);
1402         s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1403     }
1404
1405     s->picture_number++;
1406
1407     return size*4;
1408 }
1409
1410 static av_cold int encode_end(AVCodecContext *avctx)
1411 {
1412     HYuvContext *s = avctx->priv_data;
1413
1414     common_end(s);
1415
1416     av_freep(&avctx->extradata);
1417     av_freep(&avctx->stats_out);
1418
1419     return 0;
1420 }
1421 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1422
1423 #if CONFIG_HUFFYUV_DECODER
1424 AVCodec huffyuv_decoder = {
1425     "huffyuv",
1426     CODEC_TYPE_VIDEO,
1427     CODEC_ID_HUFFYUV,
1428     sizeof(HYuvContext),
1429     decode_init,
1430     NULL,
1431     decode_end,
1432     decode_frame,
1433     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1434     NULL,
1435     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1436 };
1437 #endif
1438
1439 #if CONFIG_FFVHUFF_DECODER
1440 AVCodec ffvhuff_decoder = {
1441     "ffvhuff",
1442     CODEC_TYPE_VIDEO,
1443     CODEC_ID_FFVHUFF,
1444     sizeof(HYuvContext),
1445     decode_init,
1446     NULL,
1447     decode_end,
1448     decode_frame,
1449     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1450     NULL,
1451     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1452 };
1453 #endif
1454
1455 #if CONFIG_HUFFYUV_ENCODER
1456 AVCodec huffyuv_encoder = {
1457     "huffyuv",
1458     CODEC_TYPE_VIDEO,
1459     CODEC_ID_HUFFYUV,
1460     sizeof(HYuvContext),
1461     encode_init,
1462     encode_frame,
1463     encode_end,
1464     .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1465     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1466 };
1467 #endif
1468
1469 #if CONFIG_FFVHUFF_ENCODER
1470 AVCodec ffvhuff_encoder = {
1471     "ffvhuff",
1472     CODEC_TYPE_VIDEO,
1473     CODEC_ID_FFVHUFF,
1474     sizeof(HYuvContext),
1475     encode_init,
1476     encode_frame,
1477     encode_end,
1478     .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1479     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1480 };
1481 #endif