]> git.sesse.net Git - ffmpeg/blob - libavcodec/huffyuv.c
snowenc: don't abuse input picture for storing information.
[ffmpeg] / libavcodec / huffyuv.c
1 /*
2  * huffyuv codec for libavcodec
3  *
4  * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of Libav.
10  *
11  * Libav is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * Libav is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with Libav; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25
26 /**
27  * @file
28  * huffyuv codec for libavcodec.
29  */
30
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
35 #include "thread.h"
36
37 #define VLC_BITS 11
38
39 #if HAVE_BIGENDIAN
40 #define B 3
41 #define G 2
42 #define R 1
43 #define A 0
44 #else
45 #define B 0
46 #define G 1
47 #define R 2
48 #define A 3
49 #endif
50
51 typedef enum Predictor{
52     LEFT= 0,
53     PLANE,
54     MEDIAN,
55 } Predictor;
56
57 typedef struct HYuvContext{
58     AVCodecContext *avctx;
59     Predictor predictor;
60     GetBitContext gb;
61     PutBitContext pb;
62     int interlaced;
63     int decorrelate;
64     int bitstream_bpp;
65     int version;
66     int yuy2;                               //use yuy2 instead of 422P
67     int bgr32;                              //use bgr32 instead of bgr24
68     int width, height;
69     int flags;
70     int context;
71     int picture_number;
72     int last_slice_end;
73     uint8_t *temp[3];
74     uint64_t stats[3][256];
75     uint8_t len[3][256];
76     uint32_t bits[3][256];
77     uint32_t pix_bgr_map[1<<VLC_BITS];
78     VLC vlc[6];                             //Y,U,V,YY,YU,YV
79     AVFrame picture;
80     uint8_t *bitstream_buffer;
81     unsigned int bitstream_buffer_size;
82     DSPContext dsp;
83 }HYuvContext;
84
85 static const unsigned char classic_shift_luma[] = {
86   34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
87   16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
88   69,68, 0
89 };
90
91 static const unsigned char classic_shift_chroma[] = {
92   66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
93   56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
94   214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
95 };
96
97 static const unsigned char classic_add_luma[256] = {
98     3,  9,  5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
99    73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
100    68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
101    35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
102    37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
103    35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
104    27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
105    15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
106    12, 17, 19, 13,  4,  9,  2, 11,  1,  7,  8,  0, 16,  3, 14,  6,
107    12, 10,  5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
108    18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
109    28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
110    28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
111    62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
112    54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
113    46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13,  7,  8,
114 };
115
116 static const unsigned char classic_add_chroma[256] = {
117     3,  1,  2,  2,  2,  2,  3,  3,  7,  5,  7,  5,  8,  6, 11,  9,
118     7, 13, 11, 10,  9,  8,  7,  5,  9,  7,  6,  4,  7,  5,  8,  7,
119    11,  8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
120    43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
121   143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
122    80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
123    17, 14,  5,  6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
124   112,113,114,115,  4,117,118, 92, 94,121,122,  3,124,103,  2,  1,
125     0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
126   135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
127    52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
128    19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10,  9,  8, 36,
129     7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
130    83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
131    14, 16, 17, 18, 20, 21, 12, 14, 15,  9, 10,  6,  9,  6,  5,  8,
132     6, 12,  8, 10,  7,  9,  6,  4,  6,  2,  2,  3,  3,  3,  3,  2,
133 };
134
135 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
136     int i;
137     if(w<32){
138         for(i=0; i<w; i++){
139             const int temp= src[i];
140             dst[i]= temp - left;
141             left= temp;
142         }
143         return left;
144     }else{
145         for(i=0; i<16; i++){
146             const int temp= src[i];
147             dst[i]= temp - left;
148             left= temp;
149         }
150         s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
151         return src[w-1];
152     }
153 }
154
155 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
156     int i;
157     int r,g,b;
158     r= *red;
159     g= *green;
160     b= *blue;
161     for(i=0; i<FFMIN(w,4); i++){
162         const int rt= src[i*4+R];
163         const int gt= src[i*4+G];
164         const int bt= src[i*4+B];
165         dst[i*4+R]= rt - r;
166         dst[i*4+G]= gt - g;
167         dst[i*4+B]= bt - b;
168         r = rt;
169         g = gt;
170         b = bt;
171     }
172     s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
173     *red=   src[(w-1)*4+R];
174     *green= src[(w-1)*4+G];
175     *blue=  src[(w-1)*4+B];
176 }
177
178 static int read_len_table(uint8_t *dst, GetBitContext *gb){
179     int i, val, repeat;
180
181     for(i=0; i<256;){
182         repeat= get_bits(gb, 3);
183         val   = get_bits(gb, 5);
184         if(repeat==0)
185             repeat= get_bits(gb, 8);
186 //printf("%d %d\n", val, repeat);
187         if(i+repeat > 256 || get_bits_left(gb) < 0) {
188             av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
189             return -1;
190         }
191         while (repeat--)
192             dst[i++] = val;
193     }
194     return 0;
195 }
196
197 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
198     int len, index;
199     uint32_t bits=0;
200
201     for(len=32; len>0; len--){
202         for(index=0; index<256; index++){
203             if(len_table[index]==len)
204                 dst[index]= bits++;
205         }
206         if(bits & 1){
207             av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
208             return -1;
209         }
210         bits >>= 1;
211     }
212     return 0;
213 }
214
215 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
216 typedef struct {
217     uint64_t val;
218     int name;
219 } HeapElem;
220
221 static void heap_sift(HeapElem *h, int root, int size)
222 {
223     while(root*2+1 < size) {
224         int child = root*2+1;
225         if(child < size-1 && h[child].val > h[child+1].val)
226             child++;
227         if(h[root].val > h[child].val) {
228             FFSWAP(HeapElem, h[root], h[child]);
229             root = child;
230         } else
231             break;
232     }
233 }
234
235 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
236     HeapElem h[256];
237     int up[2*256];
238     int len[2*256];
239     int offset, i, next;
240     int size = 256;
241
242     for(offset=1; ; offset<<=1){
243         for(i=0; i<size; i++){
244             h[i].name = i;
245             h[i].val = (stats[i] << 8) + offset;
246         }
247         for(i=size/2-1; i>=0; i--)
248             heap_sift(h, i, size);
249
250         for(next=size; next<size*2-1; next++){
251             // merge the two smallest entries, and put it back in the heap
252             uint64_t min1v = h[0].val;
253             up[h[0].name] = next;
254             h[0].val = INT64_MAX;
255             heap_sift(h, 0, size);
256             up[h[0].name] = next;
257             h[0].name = next;
258             h[0].val += min1v;
259             heap_sift(h, 0, size);
260         }
261
262         len[2*size-2] = 0;
263         for(i=2*size-3; i>=size; i--)
264             len[i] = len[up[i]] + 1;
265         for(i=0; i<size; i++) {
266             dst[i] = len[up[i]] + 1;
267             if(dst[i] >= 32) break;
268         }
269         if(i==size) break;
270     }
271 }
272 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
273
274 static void generate_joint_tables(HYuvContext *s){
275     uint16_t symbols[1<<VLC_BITS];
276     uint16_t bits[1<<VLC_BITS];
277     uint8_t len[1<<VLC_BITS];
278     if(s->bitstream_bpp < 24){
279         int p, i, y, u;
280         for(p=0; p<3; p++){
281             for(i=y=0; y<256; y++){
282                 int len0 = s->len[0][y];
283                 int limit = VLC_BITS - len0;
284                 if(limit <= 0)
285                     continue;
286                 for(u=0; u<256; u++){
287                     int len1 = s->len[p][u];
288                     if(len1 > limit)
289                         continue;
290                     len[i] = len0 + len1;
291                     bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
292                     symbols[i] = (y<<8) + u;
293                     if(symbols[i] != 0xffff) // reserved to mean "invalid"
294                         i++;
295                 }
296             }
297             ff_free_vlc(&s->vlc[3+p]);
298             ff_init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
299         }
300     }else{
301         uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
302         int i, b, g, r, code;
303         int p0 = s->decorrelate;
304         int p1 = !s->decorrelate;
305         // restrict the range to +/-16 becaues that's pretty much guaranteed to
306         // cover all the combinations that fit in 11 bits total, and it doesn't
307         // matter if we miss a few rare codes.
308         for(i=0, g=-16; g<16; g++){
309             int len0 = s->len[p0][g&255];
310             int limit0 = VLC_BITS - len0;
311             if(limit0 < 2)
312                 continue;
313             for(b=-16; b<16; b++){
314                 int len1 = s->len[p1][b&255];
315                 int limit1 = limit0 - len1;
316                 if(limit1 < 1)
317                     continue;
318                 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
319                 for(r=-16; r<16; r++){
320                     int len2 = s->len[2][r&255];
321                     if(len2 > limit1)
322                         continue;
323                     len[i] = len0 + len1 + len2;
324                     bits[i] = (code << len2) + s->bits[2][r&255];
325                     if(s->decorrelate){
326                         map[i][G] = g;
327                         map[i][B] = g+b;
328                         map[i][R] = g+r;
329                     }else{
330                         map[i][B] = g;
331                         map[i][G] = b;
332                         map[i][R] = r;
333                     }
334                     i++;
335                 }
336             }
337         }
338         ff_free_vlc(&s->vlc[3]);
339         init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
340     }
341 }
342
343 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
344     GetBitContext gb;
345     int i;
346
347     init_get_bits(&gb, src, length*8);
348
349     for(i=0; i<3; i++){
350         if(read_len_table(s->len[i], &gb)<0)
351             return -1;
352         if(generate_bits_table(s->bits[i], s->len[i])<0){
353             return -1;
354         }
355         ff_free_vlc(&s->vlc[i]);
356         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
357     }
358
359     generate_joint_tables(s);
360
361     return (get_bits_count(&gb)+7)/8;
362 }
363
364 static int read_old_huffman_tables(HYuvContext *s){
365 #if 1
366     GetBitContext gb;
367     int i;
368
369     init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
370     if(read_len_table(s->len[0], &gb)<0)
371         return -1;
372     init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
373     if(read_len_table(s->len[1], &gb)<0)
374         return -1;
375
376     for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma  [i];
377     for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
378
379     if(s->bitstream_bpp >= 24){
380         memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
381         memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
382     }
383     memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
384     memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
385
386     for(i=0; i<3; i++){
387         ff_free_vlc(&s->vlc[i]);
388         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
389     }
390
391     generate_joint_tables(s);
392
393     return 0;
394 #else
395     av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
396     return -1;
397 #endif
398 }
399
400 static av_cold void alloc_temp(HYuvContext *s){
401     int i;
402
403     if(s->bitstream_bpp<24){
404         for(i=0; i<3; i++){
405             s->temp[i]= av_malloc(s->width + 16);
406         }
407     }else{
408         s->temp[0]= av_mallocz(4*s->width + 16);
409     }
410 }
411
412 static av_cold int common_init(AVCodecContext *avctx){
413     HYuvContext *s = avctx->priv_data;
414
415     s->avctx= avctx;
416     s->flags= avctx->flags;
417
418     ff_dsputil_init(&s->dsp, avctx);
419
420     s->width= avctx->width;
421     s->height= avctx->height;
422     assert(s->width>0 && s->height>0);
423
424     return 0;
425 }
426
427 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
428 static av_cold int decode_init(AVCodecContext *avctx)
429 {
430     HYuvContext *s = avctx->priv_data;
431
432     common_init(avctx);
433     memset(s->vlc, 0, 3*sizeof(VLC));
434
435     avctx->coded_frame= &s->picture;
436     s->interlaced= s->height > 288;
437
438 s->bgr32=1;
439 //if(avctx->extradata)
440 //  printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
441     if(avctx->extradata_size){
442         if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
443             s->version=1; // do such files exist at all?
444         else
445             s->version=2;
446     }else
447         s->version=0;
448
449     if(s->version==2){
450         int method, interlace;
451
452         if (avctx->extradata_size < 4)
453             return -1;
454
455         method= ((uint8_t*)avctx->extradata)[0];
456         s->decorrelate= method&64 ? 1 : 0;
457         s->predictor= method&63;
458         s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
459         if(s->bitstream_bpp==0)
460             s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
461         interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
462         s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
463         s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
464
465         if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
466             return -1;
467     }else{
468         switch(avctx->bits_per_coded_sample&7){
469         case 1:
470             s->predictor= LEFT;
471             s->decorrelate= 0;
472             break;
473         case 2:
474             s->predictor= LEFT;
475             s->decorrelate= 1;
476             break;
477         case 3:
478             s->predictor= PLANE;
479             s->decorrelate= avctx->bits_per_coded_sample >= 24;
480             break;
481         case 4:
482             s->predictor= MEDIAN;
483             s->decorrelate= 0;
484             break;
485         default:
486             s->predictor= LEFT; //OLD
487             s->decorrelate= 0;
488             break;
489         }
490         s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
491         s->context= 0;
492
493         if(read_old_huffman_tables(s) < 0)
494             return -1;
495     }
496
497     switch(s->bitstream_bpp){
498     case 12:
499         avctx->pix_fmt = PIX_FMT_YUV420P;
500         break;
501     case 16:
502         if(s->yuy2){
503             avctx->pix_fmt = PIX_FMT_YUYV422;
504         }else{
505             avctx->pix_fmt = PIX_FMT_YUV422P;
506         }
507         break;
508     case 24:
509     case 32:
510         if(s->bgr32){
511             avctx->pix_fmt = PIX_FMT_RGB32;
512         }else{
513             avctx->pix_fmt = PIX_FMT_BGR24;
514         }
515         break;
516     default:
517         assert(0);
518     }
519
520     alloc_temp(s);
521
522 //    av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
523
524     return 0;
525 }
526
527 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
528 {
529     HYuvContext *s = avctx->priv_data;
530     int i;
531
532     avctx->coded_frame= &s->picture;
533     alloc_temp(s);
534
535     for (i = 0; i < 6; i++)
536         s->vlc[i].table = NULL;
537
538     if(s->version==2){
539         if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
540             return -1;
541     }else{
542         if(read_old_huffman_tables(s) < 0)
543             return -1;
544     }
545
546     return 0;
547 }
548 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
549
550 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
551 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
552     int i;
553     int index= 0;
554
555     for(i=0; i<256;){
556         int val= len[i];
557         int repeat=0;
558
559         for(; i<256 && len[i]==val && repeat<255; i++)
560             repeat++;
561
562         assert(val < 32 && val >0 && repeat<256 && repeat>0);
563         if(repeat>7){
564             buf[index++]= val;
565             buf[index++]= repeat;
566         }else{
567             buf[index++]= val | (repeat<<5);
568         }
569     }
570
571     return index;
572 }
573
574 static av_cold int encode_init(AVCodecContext *avctx)
575 {
576     HYuvContext *s = avctx->priv_data;
577     int i, j;
578
579     common_init(avctx);
580
581     avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
582     avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
583     s->version=2;
584
585     avctx->coded_frame= &s->picture;
586
587     switch(avctx->pix_fmt){
588     case PIX_FMT_YUV420P:
589         s->bitstream_bpp= 12;
590         break;
591     case PIX_FMT_YUV422P:
592         s->bitstream_bpp= 16;
593         break;
594     case PIX_FMT_RGB32:
595         s->bitstream_bpp= 24;
596         break;
597     default:
598         av_log(avctx, AV_LOG_ERROR, "format not supported\n");
599         return -1;
600     }
601     avctx->bits_per_coded_sample= s->bitstream_bpp;
602     s->decorrelate= s->bitstream_bpp >= 24;
603     s->predictor= avctx->prediction_method;
604     s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
605     if(avctx->context_model==1){
606         s->context= avctx->context_model;
607         if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
608             av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
609             return -1;
610         }
611     }else s->context= 0;
612
613     if(avctx->codec->id==CODEC_ID_HUFFYUV){
614         if(avctx->pix_fmt==PIX_FMT_YUV420P){
615             av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
616             return -1;
617         }
618         if(avctx->context_model){
619             av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
620             return -1;
621         }
622         if(s->interlaced != ( s->height > 288 ))
623             av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
624     }
625
626     if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
627         av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
628         return -1;
629     }
630
631     ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
632     ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
633     ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
634     if(s->context)
635         ((uint8_t*)avctx->extradata)[2]|= 0x40;
636     ((uint8_t*)avctx->extradata)[3]= 0;
637     s->avctx->extradata_size= 4;
638
639     if(avctx->stats_in){
640         char *p= avctx->stats_in;
641
642         for(i=0; i<3; i++)
643             for(j=0; j<256; j++)
644                 s->stats[i][j]= 1;
645
646         for(;;){
647             for(i=0; i<3; i++){
648                 char *next;
649
650                 for(j=0; j<256; j++){
651                     s->stats[i][j]+= strtol(p, &next, 0);
652                     if(next==p) return -1;
653                     p=next;
654                 }
655             }
656             if(p[0]==0 || p[1]==0 || p[2]==0) break;
657         }
658     }else{
659         for(i=0; i<3; i++)
660             for(j=0; j<256; j++){
661                 int d= FFMIN(j, 256-j);
662
663                 s->stats[i][j]= 100000000/(d+1);
664             }
665     }
666
667     for(i=0; i<3; i++){
668         generate_len_table(s->len[i], s->stats[i]);
669
670         if(generate_bits_table(s->bits[i], s->len[i])<0){
671             return -1;
672         }
673
674         s->avctx->extradata_size+=
675         store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
676     }
677
678     if(s->context){
679         for(i=0; i<3; i++){
680             int pels = s->width*s->height / (i?40:10);
681             for(j=0; j<256; j++){
682                 int d= FFMIN(j, 256-j);
683                 s->stats[i][j]= pels/(d+1);
684             }
685         }
686     }else{
687         for(i=0; i<3; i++)
688             for(j=0; j<256; j++)
689                 s->stats[i][j]= 0;
690     }
691
692 //    printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
693
694     alloc_temp(s);
695
696     s->picture_number=0;
697
698     return 0;
699 }
700 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
701
702 /* TODO instead of restarting the read when the code isn't in the first level
703  * of the joint table, jump into the 2nd level of the individual table. */
704 #define READ_2PIX(dst0, dst1, plane1){\
705     uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
706     if(code != 0xffff){\
707         dst0 = code>>8;\
708         dst1 = code;\
709     }else{\
710         dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
711         dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
712     }\
713 }
714
715 static void decode_422_bitstream(HYuvContext *s, int count){
716     int i;
717
718     count/=2;
719
720     if(count >= (get_bits_left(&s->gb))/(31*4)){
721         for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
722             READ_2PIX(s->temp[0][2*i  ], s->temp[1][i], 1);
723             READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
724         }
725     }else{
726         for(i=0; i<count; i++){
727             READ_2PIX(s->temp[0][2*i  ], s->temp[1][i], 1);
728             READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
729         }
730     }
731 }
732
733 static void decode_gray_bitstream(HYuvContext *s, int count){
734     int i;
735
736     count/=2;
737
738     if(count >= (get_bits_left(&s->gb))/(31*2)){
739         for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
740             READ_2PIX(s->temp[0][2*i  ], s->temp[0][2*i+1], 0);
741         }
742     }else{
743         for(i=0; i<count; i++){
744             READ_2PIX(s->temp[0][2*i  ], s->temp[0][2*i+1], 0);
745         }
746     }
747 }
748
749 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
750 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
751     int i;
752     const uint8_t *y = s->temp[0] + offset;
753     const uint8_t *u = s->temp[1] + offset/2;
754     const uint8_t *v = s->temp[2] + offset/2;
755
756     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
757         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
758         return -1;
759     }
760
761 #define LOAD4\
762             int y0 = y[2*i];\
763             int y1 = y[2*i+1];\
764             int u0 = u[i];\
765             int v0 = v[i];
766
767     count/=2;
768     if(s->flags&CODEC_FLAG_PASS1){
769         for(i=0; i<count; i++){
770             LOAD4;
771             s->stats[0][y0]++;
772             s->stats[1][u0]++;
773             s->stats[0][y1]++;
774             s->stats[2][v0]++;
775         }
776     }
777     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
778         return 0;
779     if(s->context){
780         for(i=0; i<count; i++){
781             LOAD4;
782             s->stats[0][y0]++;
783             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
784             s->stats[1][u0]++;
785             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
786             s->stats[0][y1]++;
787             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
788             s->stats[2][v0]++;
789             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
790         }
791     }else{
792         for(i=0; i<count; i++){
793             LOAD4;
794             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
795             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
796             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
797             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
798         }
799     }
800     return 0;
801 }
802
803 static int encode_gray_bitstream(HYuvContext *s, int count){
804     int i;
805
806     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
807         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
808         return -1;
809     }
810
811 #define LOAD2\
812             int y0 = s->temp[0][2*i];\
813             int y1 = s->temp[0][2*i+1];
814 #define STAT2\
815             s->stats[0][y0]++;\
816             s->stats[0][y1]++;
817 #define WRITE2\
818             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
819             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
820
821     count/=2;
822     if(s->flags&CODEC_FLAG_PASS1){
823         for(i=0; i<count; i++){
824             LOAD2;
825             STAT2;
826         }
827     }
828     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
829         return 0;
830
831     if(s->context){
832         for(i=0; i<count; i++){
833             LOAD2;
834             STAT2;
835             WRITE2;
836         }
837     }else{
838         for(i=0; i<count; i++){
839             LOAD2;
840             WRITE2;
841         }
842     }
843     return 0;
844 }
845 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
846
847 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
848     int i;
849     for(i=0; i<count; i++){
850         int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
851         if(code != -1){
852             *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
853         }else if(decorrelate){
854             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
855             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
856             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
857         }else{
858             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
859             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
860             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
861         }
862         if(alpha)
863             s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
864     }
865 }
866
867 static void decode_bgr_bitstream(HYuvContext *s, int count){
868     if(s->decorrelate){
869         if(s->bitstream_bpp==24)
870             decode_bgr_1(s, count, 1, 0);
871         else
872             decode_bgr_1(s, count, 1, 1);
873     }else{
874         if(s->bitstream_bpp==24)
875             decode_bgr_1(s, count, 0, 0);
876         else
877             decode_bgr_1(s, count, 0, 1);
878     }
879 }
880
881 static int encode_bgr_bitstream(HYuvContext *s, int count){
882     int i;
883
884     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
885         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
886         return -1;
887     }
888
889 #define LOAD3\
890             int g= s->temp[0][4*i+G];\
891             int b= (s->temp[0][4*i+B] - g) & 0xff;\
892             int r= (s->temp[0][4*i+R] - g) & 0xff;
893 #define STAT3\
894             s->stats[0][b]++;\
895             s->stats[1][g]++;\
896             s->stats[2][r]++;
897 #define WRITE3\
898             put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
899             put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
900             put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
901
902     if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
903         for(i=0; i<count; i++){
904             LOAD3;
905             STAT3;
906         }
907     }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
908         for(i=0; i<count; i++){
909             LOAD3;
910             STAT3;
911             WRITE3;
912         }
913     }else{
914         for(i=0; i<count; i++){
915             LOAD3;
916             WRITE3;
917         }
918     }
919     return 0;
920 }
921
922 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
923 static void draw_slice(HYuvContext *s, int y){
924     int h, cy, i;
925     int offset[AV_NUM_DATA_POINTERS];
926
927     if(s->avctx->draw_horiz_band==NULL)
928         return;
929
930     h= y - s->last_slice_end;
931     y -= h;
932
933     if(s->bitstream_bpp==12){
934         cy= y>>1;
935     }else{
936         cy= y;
937     }
938
939     offset[0] = s->picture.linesize[0]*y;
940     offset[1] = s->picture.linesize[1]*cy;
941     offset[2] = s->picture.linesize[2]*cy;
942     for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
943         offset[i] = 0;
944     emms_c();
945
946     s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
947
948     s->last_slice_end= y + h;
949 }
950
951 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
952     const uint8_t *buf = avpkt->data;
953     int buf_size = avpkt->size;
954     HYuvContext *s = avctx->priv_data;
955     const int width= s->width;
956     const int width2= s->width>>1;
957     const int height= s->height;
958     int fake_ystride, fake_ustride, fake_vstride;
959     AVFrame * const p= &s->picture;
960     int table_size= 0;
961
962     AVFrame *picture = data;
963
964     av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
965     if (!s->bitstream_buffer)
966         return AVERROR(ENOMEM);
967
968     memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
969     s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
970
971     if(p->data[0])
972         ff_thread_release_buffer(avctx, p);
973
974     p->reference= 0;
975     if(ff_thread_get_buffer(avctx, p) < 0){
976         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
977         return -1;
978     }
979
980     if(s->context){
981         table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
982         if(table_size < 0)
983             return -1;
984     }
985
986     if((unsigned)(buf_size-table_size) >= INT_MAX/8)
987         return -1;
988
989     init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
990
991     fake_ystride= s->interlaced ? p->linesize[0]*2  : p->linesize[0];
992     fake_ustride= s->interlaced ? p->linesize[1]*2  : p->linesize[1];
993     fake_vstride= s->interlaced ? p->linesize[2]*2  : p->linesize[2];
994
995     s->last_slice_end= 0;
996
997     if(s->bitstream_bpp<24){
998         int y, cy;
999         int lefty, leftu, leftv;
1000         int lefttopy, lefttopu, lefttopv;
1001
1002         if(s->yuy2){
1003             p->data[0][3]= get_bits(&s->gb, 8);
1004             p->data[0][2]= get_bits(&s->gb, 8);
1005             p->data[0][1]= get_bits(&s->gb, 8);
1006             p->data[0][0]= get_bits(&s->gb, 8);
1007
1008             av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1009             return -1;
1010         }else{
1011
1012             leftv= p->data[2][0]= get_bits(&s->gb, 8);
1013             lefty= p->data[0][1]= get_bits(&s->gb, 8);
1014             leftu= p->data[1][0]= get_bits(&s->gb, 8);
1015                    p->data[0][0]= get_bits(&s->gb, 8);
1016
1017             switch(s->predictor){
1018             case LEFT:
1019             case PLANE:
1020                 decode_422_bitstream(s, width-2);
1021                 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1022                 if(!(s->flags&CODEC_FLAG_GRAY)){
1023                     leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1024                     leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1025                 }
1026
1027                 for(cy=y=1; y<s->height; y++,cy++){
1028                     uint8_t *ydst, *udst, *vdst;
1029
1030                     if(s->bitstream_bpp==12){
1031                         decode_gray_bitstream(s, width);
1032
1033                         ydst= p->data[0] + p->linesize[0]*y;
1034
1035                         lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1036                         if(s->predictor == PLANE){
1037                             if(y>s->interlaced)
1038                                 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1039                         }
1040                         y++;
1041                         if(y>=s->height) break;
1042                     }
1043
1044                     draw_slice(s, y);
1045
1046                     ydst= p->data[0] + p->linesize[0]*y;
1047                     udst= p->data[1] + p->linesize[1]*cy;
1048                     vdst= p->data[2] + p->linesize[2]*cy;
1049
1050                     decode_422_bitstream(s, width);
1051                     lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1052                     if(!(s->flags&CODEC_FLAG_GRAY)){
1053                         leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1054                         leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1055                     }
1056                     if(s->predictor == PLANE){
1057                         if(cy>s->interlaced){
1058                             s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1059                             if(!(s->flags&CODEC_FLAG_GRAY)){
1060                                 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1061                                 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1062                             }
1063                         }
1064                     }
1065                 }
1066                 draw_slice(s, height);
1067
1068                 break;
1069             case MEDIAN:
1070                 /* first line except first 2 pixels is left predicted */
1071                 decode_422_bitstream(s, width-2);
1072                 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1073                 if(!(s->flags&CODEC_FLAG_GRAY)){
1074                     leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1075                     leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1076                 }
1077
1078                 cy=y=1;
1079
1080                 /* second line is left predicted for interlaced case */
1081                 if(s->interlaced){
1082                     decode_422_bitstream(s, width);
1083                     lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1084                     if(!(s->flags&CODEC_FLAG_GRAY)){
1085                         leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1086                         leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1087                     }
1088                     y++; cy++;
1089                 }
1090
1091                 /* next 4 pixels are left predicted too */
1092                 decode_422_bitstream(s, 4);
1093                 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1094                 if(!(s->flags&CODEC_FLAG_GRAY)){
1095                     leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1096                     leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1097                 }
1098
1099                 /* next line except the first 4 pixels is median predicted */
1100                 lefttopy= p->data[0][3];
1101                 decode_422_bitstream(s, width-4);
1102                 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1103                 if(!(s->flags&CODEC_FLAG_GRAY)){
1104                     lefttopu= p->data[1][1];
1105                     lefttopv= p->data[2][1];
1106                     s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1107                     s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1108                 }
1109                 y++; cy++;
1110
1111                 for(; y<height; y++,cy++){
1112                     uint8_t *ydst, *udst, *vdst;
1113
1114                     if(s->bitstream_bpp==12){
1115                         while(2*cy > y){
1116                             decode_gray_bitstream(s, width);
1117                             ydst= p->data[0] + p->linesize[0]*y;
1118                             s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1119                             y++;
1120                         }
1121                         if(y>=height) break;
1122                     }
1123                     draw_slice(s, y);
1124
1125                     decode_422_bitstream(s, width);
1126
1127                     ydst= p->data[0] + p->linesize[0]*y;
1128                     udst= p->data[1] + p->linesize[1]*cy;
1129                     vdst= p->data[2] + p->linesize[2]*cy;
1130
1131                     s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1132                     if(!(s->flags&CODEC_FLAG_GRAY)){
1133                         s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1134                         s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1135                     }
1136                 }
1137
1138                 draw_slice(s, height);
1139                 break;
1140             }
1141         }
1142     }else{
1143         int y;
1144         int leftr, leftg, leftb, lefta;
1145         const int last_line= (height-1)*p->linesize[0];
1146
1147         if(s->bitstream_bpp==32){
1148             lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1149             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1150             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1151             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1152         }else{
1153             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1154             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1155             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1156             lefta= p->data[0][last_line+A]= 255;
1157             skip_bits(&s->gb, 8);
1158         }
1159
1160         if(s->bgr32){
1161             switch(s->predictor){
1162             case LEFT:
1163             case PLANE:
1164                 decode_bgr_bitstream(s, width-1);
1165                 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1166
1167                 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1168                     decode_bgr_bitstream(s, width);
1169
1170                     s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1171                     if(s->predictor == PLANE){
1172                         if(s->bitstream_bpp!=32) lefta=0;
1173                         if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1174                             s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1175                                              p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1176                         }
1177                     }
1178                 }
1179                 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1180                 break;
1181             default:
1182                 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1183             }
1184         }else{
1185
1186             av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1187             return -1;
1188         }
1189     }
1190     emms_c();
1191
1192     *picture= *p;
1193     *data_size = sizeof(AVFrame);
1194
1195     return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1196 }
1197 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1198
1199 static int common_end(HYuvContext *s){
1200     int i;
1201
1202     for(i=0; i<3; i++){
1203         av_freep(&s->temp[i]);
1204     }
1205     return 0;
1206 }
1207
1208 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1209 static av_cold int decode_end(AVCodecContext *avctx)
1210 {
1211     HYuvContext *s = avctx->priv_data;
1212     int i;
1213
1214     if (s->picture.data[0])
1215         avctx->release_buffer(avctx, &s->picture);
1216
1217     common_end(s);
1218     av_freep(&s->bitstream_buffer);
1219
1220     for(i=0; i<6; i++){
1221         ff_free_vlc(&s->vlc[i]);
1222     }
1223
1224     return 0;
1225 }
1226 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1227
1228 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1229 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1230                         const AVFrame *pict, int *got_packet)
1231 {
1232     HYuvContext *s = avctx->priv_data;
1233     const int width= s->width;
1234     const int width2= s->width>>1;
1235     const int height= s->height;
1236     const int fake_ystride= s->interlaced ? pict->linesize[0]*2  : pict->linesize[0];
1237     const int fake_ustride= s->interlaced ? pict->linesize[1]*2  : pict->linesize[1];
1238     const int fake_vstride= s->interlaced ? pict->linesize[2]*2  : pict->linesize[2];
1239     AVFrame * const p= &s->picture;
1240     int i, j, size = 0, ret;
1241
1242     if (!pkt->data &&
1243         (ret = av_new_packet(pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
1244         av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
1245         return ret;
1246     }
1247
1248     *p = *pict;
1249     p->pict_type= AV_PICTURE_TYPE_I;
1250     p->key_frame= 1;
1251
1252     if(s->context){
1253         for(i=0; i<3; i++){
1254             generate_len_table(s->len[i], s->stats[i]);
1255             if(generate_bits_table(s->bits[i], s->len[i])<0)
1256                 return -1;
1257             size += store_table(s, s->len[i], &pkt->data[size]);
1258         }
1259
1260         for(i=0; i<3; i++)
1261             for(j=0; j<256; j++)
1262                 s->stats[i][j] >>= 1;
1263     }
1264
1265     init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
1266
1267     if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1268         int lefty, leftu, leftv, y, cy;
1269
1270         put_bits(&s->pb, 8, leftv= p->data[2][0]);
1271         put_bits(&s->pb, 8, lefty= p->data[0][1]);
1272         put_bits(&s->pb, 8, leftu= p->data[1][0]);
1273         put_bits(&s->pb, 8,        p->data[0][0]);
1274
1275         lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1276         leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1277         leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1278
1279         encode_422_bitstream(s, 2, width-2);
1280
1281         if(s->predictor==MEDIAN){
1282             int lefttopy, lefttopu, lefttopv;
1283             cy=y=1;
1284             if(s->interlaced){
1285                 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1286                 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1287                 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1288
1289                 encode_422_bitstream(s, 0, width);
1290                 y++; cy++;
1291             }
1292
1293             lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1294             leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1295             leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1296
1297             encode_422_bitstream(s, 0, 4);
1298
1299             lefttopy= p->data[0][3];
1300             lefttopu= p->data[1][1];
1301             lefttopv= p->data[2][1];
1302             s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1303             s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1304             s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1305             encode_422_bitstream(s, 0, width-4);
1306             y++; cy++;
1307
1308             for(; y<height; y++,cy++){
1309                 uint8_t *ydst, *udst, *vdst;
1310
1311                 if(s->bitstream_bpp==12){
1312                     while(2*cy > y){
1313                         ydst= p->data[0] + p->linesize[0]*y;
1314                         s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1315                         encode_gray_bitstream(s, width);
1316                         y++;
1317                     }
1318                     if(y>=height) break;
1319                 }
1320                 ydst= p->data[0] + p->linesize[0]*y;
1321                 udst= p->data[1] + p->linesize[1]*cy;
1322                 vdst= p->data[2] + p->linesize[2]*cy;
1323
1324                 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1325                 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1326                 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1327
1328                 encode_422_bitstream(s, 0, width);
1329             }
1330         }else{
1331             for(cy=y=1; y<height; y++,cy++){
1332                 uint8_t *ydst, *udst, *vdst;
1333
1334                 /* encode a luma only line & y++ */
1335                 if(s->bitstream_bpp==12){
1336                     ydst= p->data[0] + p->linesize[0]*y;
1337
1338                     if(s->predictor == PLANE && s->interlaced < y){
1339                         s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1340
1341                         lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1342                     }else{
1343                         lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1344                     }
1345                     encode_gray_bitstream(s, width);
1346                     y++;
1347                     if(y>=height) break;
1348                 }
1349
1350                 ydst= p->data[0] + p->linesize[0]*y;
1351                 udst= p->data[1] + p->linesize[1]*cy;
1352                 vdst= p->data[2] + p->linesize[2]*cy;
1353
1354                 if(s->predictor == PLANE && s->interlaced < cy){
1355                     s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1356                     s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1357                     s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1358
1359                     lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1360                     leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1361                     leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1362                 }else{
1363                     lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1364                     leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1365                     leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1366                 }
1367
1368                 encode_422_bitstream(s, 0, width);
1369             }
1370         }
1371     }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1372         uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1373         const int stride = -p->linesize[0];
1374         const int fake_stride = -fake_ystride;
1375         int y;
1376         int leftr, leftg, leftb;
1377
1378         put_bits(&s->pb, 8, leftr= data[R]);
1379         put_bits(&s->pb, 8, leftg= data[G]);
1380         put_bits(&s->pb, 8, leftb= data[B]);
1381         put_bits(&s->pb, 8, 0);
1382
1383         sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1384         encode_bgr_bitstream(s, width-1);
1385
1386         for(y=1; y<s->height; y++){
1387             uint8_t *dst = data + y*stride;
1388             if(s->predictor == PLANE && s->interlaced < y){
1389                 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1390                 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1391             }else{
1392                 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1393             }
1394             encode_bgr_bitstream(s, width);
1395         }
1396     }else{
1397         av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1398     }
1399     emms_c();
1400
1401     size+= (put_bits_count(&s->pb)+31)/8;
1402     put_bits(&s->pb, 16, 0);
1403     put_bits(&s->pb, 15, 0);
1404     size/= 4;
1405
1406     if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1407         int j;
1408         char *p= avctx->stats_out;
1409         char *end= p + 1024*30;
1410         for(i=0; i<3; i++){
1411             for(j=0; j<256; j++){
1412                 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1413                 p+= strlen(p);
1414                 s->stats[i][j]= 0;
1415             }
1416             snprintf(p, end-p, "\n");
1417             p++;
1418         }
1419     } else
1420         avctx->stats_out[0] = '\0';
1421     if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1422         flush_put_bits(&s->pb);
1423         s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
1424     }
1425
1426     s->picture_number++;
1427
1428     pkt->size   = size*4;
1429     pkt->flags |= AV_PKT_FLAG_KEY;
1430     *got_packet = 1;
1431
1432     return 0;
1433 }
1434
1435 static av_cold int encode_end(AVCodecContext *avctx)
1436 {
1437     HYuvContext *s = avctx->priv_data;
1438
1439     common_end(s);
1440
1441     av_freep(&avctx->extradata);
1442     av_freep(&avctx->stats_out);
1443
1444     return 0;
1445 }
1446 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1447
1448 #if CONFIG_HUFFYUV_DECODER
1449 AVCodec ff_huffyuv_decoder = {
1450     .name           = "huffyuv",
1451     .type           = AVMEDIA_TYPE_VIDEO,
1452     .id             = CODEC_ID_HUFFYUV,
1453     .priv_data_size = sizeof(HYuvContext),
1454     .init           = decode_init,
1455     .close          = decode_end,
1456     .decode         = decode_frame,
1457     .capabilities   = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1458     .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1459     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1460 };
1461 #endif
1462
1463 #if CONFIG_FFVHUFF_DECODER
1464 AVCodec ff_ffvhuff_decoder = {
1465     .name           = "ffvhuff",
1466     .type           = AVMEDIA_TYPE_VIDEO,
1467     .id             = CODEC_ID_FFVHUFF,
1468     .priv_data_size = sizeof(HYuvContext),
1469     .init           = decode_init,
1470     .close          = decode_end,
1471     .decode         = decode_frame,
1472     .capabilities   = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1473     .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1474     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1475 };
1476 #endif
1477
1478 #if CONFIG_HUFFYUV_ENCODER
1479 AVCodec ff_huffyuv_encoder = {
1480     .name           = "huffyuv",
1481     .type           = AVMEDIA_TYPE_VIDEO,
1482     .id             = CODEC_ID_HUFFYUV,
1483     .priv_data_size = sizeof(HYuvContext),
1484     .init           = encode_init,
1485     .encode2        = encode_frame,
1486     .close          = encode_end,
1487     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1488     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1489 };
1490 #endif
1491
1492 #if CONFIG_FFVHUFF_ENCODER
1493 AVCodec ff_ffvhuff_encoder = {
1494     .name           = "ffvhuff",
1495     .type           = AVMEDIA_TYPE_VIDEO,
1496     .id             = CODEC_ID_FFVHUFF,
1497     .priv_data_size = sizeof(HYuvContext),
1498     .init           = encode_init,
1499     .encode2        = encode_frame,
1500     .close          = encode_end,
1501     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1502     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1503 };
1504 #endif