]> git.sesse.net Git - ffmpeg/blob - libavcodec/huffyuv.c
matroskaenc: don't write an empty Cues element.
[ffmpeg] / libavcodec / huffyuv.c
1 /*
2  * huffyuv codec for libavcodec
3  *
4  * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of Libav.
10  *
11  * Libav is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * Libav is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with Libav; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25
26 /**
27  * @file
28  * huffyuv codec for libavcodec.
29  */
30
31 #include "avcodec.h"
32 #include "get_bits.h"
33 #include "put_bits.h"
34 #include "dsputil.h"
35 #include "thread.h"
36
37 #define VLC_BITS 11
38
39 #if HAVE_BIGENDIAN
40 #define B 3
41 #define G 2
42 #define R 1
43 #define A 0
44 #else
45 #define B 0
46 #define G 1
47 #define R 2
48 #define A 3
49 #endif
50
51 typedef enum Predictor{
52     LEFT= 0,
53     PLANE,
54     MEDIAN,
55 } Predictor;
56
57 typedef struct HYuvContext{
58     AVCodecContext *avctx;
59     Predictor predictor;
60     GetBitContext gb;
61     PutBitContext pb;
62     int interlaced;
63     int decorrelate;
64     int bitstream_bpp;
65     int version;
66     int yuy2;                               //use yuy2 instead of 422P
67     int bgr32;                              //use bgr32 instead of bgr24
68     int width, height;
69     int flags;
70     int context;
71     int picture_number;
72     int last_slice_end;
73     uint8_t *temp[3];
74     uint64_t stats[3][256];
75     uint8_t len[3][256];
76     uint32_t bits[3][256];
77     uint32_t pix_bgr_map[1<<VLC_BITS];
78     VLC vlc[6];                             //Y,U,V,YY,YU,YV
79     AVFrame picture;
80     uint8_t *bitstream_buffer;
81     unsigned int bitstream_buffer_size;
82     DSPContext dsp;
83 }HYuvContext;
84
85 static const unsigned char classic_shift_luma[] = {
86   34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
87   16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
88   69,68, 0
89 };
90
91 static const unsigned char classic_shift_chroma[] = {
92   66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
93   56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
94   214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
95 };
96
97 static const unsigned char classic_add_luma[256] = {
98     3,  9,  5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
99    73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
100    68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
101    35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
102    37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
103    35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
104    27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
105    15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
106    12, 17, 19, 13,  4,  9,  2, 11,  1,  7,  8,  0, 16,  3, 14,  6,
107    12, 10,  5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
108    18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
109    28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
110    28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
111    62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
112    54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
113    46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13,  7,  8,
114 };
115
116 static const unsigned char classic_add_chroma[256] = {
117     3,  1,  2,  2,  2,  2,  3,  3,  7,  5,  7,  5,  8,  6, 11,  9,
118     7, 13, 11, 10,  9,  8,  7,  5,  9,  7,  6,  4,  7,  5,  8,  7,
119    11,  8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
120    43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
121   143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
122    80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
123    17, 14,  5,  6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
124   112,113,114,115,  4,117,118, 92, 94,121,122,  3,124,103,  2,  1,
125     0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
126   135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
127    52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
128    19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10,  9,  8, 36,
129     7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
130    83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
131    14, 16, 17, 18, 20, 21, 12, 14, 15,  9, 10,  6,  9,  6,  5,  8,
132     6, 12,  8, 10,  7,  9,  6,  4,  6,  2,  2,  3,  3,  3,  3,  2,
133 };
134
135 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
136     int i;
137     if(w<32){
138         for(i=0; i<w; i++){
139             const int temp= src[i];
140             dst[i]= temp - left;
141             left= temp;
142         }
143         return left;
144     }else{
145         for(i=0; i<16; i++){
146             const int temp= src[i];
147             dst[i]= temp - left;
148             left= temp;
149         }
150         s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
151         return src[w-1];
152     }
153 }
154
155 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
156     int i;
157     int r,g,b;
158     r= *red;
159     g= *green;
160     b= *blue;
161     for(i=0; i<FFMIN(w,4); i++){
162         const int rt= src[i*4+R];
163         const int gt= src[i*4+G];
164         const int bt= src[i*4+B];
165         dst[i*4+R]= rt - r;
166         dst[i*4+G]= gt - g;
167         dst[i*4+B]= bt - b;
168         r = rt;
169         g = gt;
170         b = bt;
171     }
172     s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
173     *red=   src[(w-1)*4+R];
174     *green= src[(w-1)*4+G];
175     *blue=  src[(w-1)*4+B];
176 }
177
178 static int read_len_table(uint8_t *dst, GetBitContext *gb){
179     int i, val, repeat;
180
181     for(i=0; i<256;){
182         repeat= get_bits(gb, 3);
183         val   = get_bits(gb, 5);
184         if(repeat==0)
185             repeat= get_bits(gb, 8);
186 //printf("%d %d\n", val, repeat);
187         if(i+repeat > 256) {
188             av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
189             return -1;
190         }
191         while (repeat--)
192             dst[i++] = val;
193     }
194     return 0;
195 }
196
197 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
198     int len, index;
199     uint32_t bits=0;
200
201     for(len=32; len>0; len--){
202         for(index=0; index<256; index++){
203             if(len_table[index]==len)
204                 dst[index]= bits++;
205         }
206         if(bits & 1){
207             av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
208             return -1;
209         }
210         bits >>= 1;
211     }
212     return 0;
213 }
214
215 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
216 typedef struct {
217     uint64_t val;
218     int name;
219 } HeapElem;
220
221 static void heap_sift(HeapElem *h, int root, int size)
222 {
223     while(root*2+1 < size) {
224         int child = root*2+1;
225         if(child < size-1 && h[child].val > h[child+1].val)
226             child++;
227         if(h[root].val > h[child].val) {
228             FFSWAP(HeapElem, h[root], h[child]);
229             root = child;
230         } else
231             break;
232     }
233 }
234
235 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
236     HeapElem h[256];
237     int up[2*256];
238     int len[2*256];
239     int offset, i, next;
240     int size = 256;
241
242     for(offset=1; ; offset<<=1){
243         for(i=0; i<size; i++){
244             h[i].name = i;
245             h[i].val = (stats[i] << 8) + offset;
246         }
247         for(i=size/2-1; i>=0; i--)
248             heap_sift(h, i, size);
249
250         for(next=size; next<size*2-1; next++){
251             // merge the two smallest entries, and put it back in the heap
252             uint64_t min1v = h[0].val;
253             up[h[0].name] = next;
254             h[0].val = INT64_MAX;
255             heap_sift(h, 0, size);
256             up[h[0].name] = next;
257             h[0].name = next;
258             h[0].val += min1v;
259             heap_sift(h, 0, size);
260         }
261
262         len[2*size-2] = 0;
263         for(i=2*size-3; i>=size; i--)
264             len[i] = len[up[i]] + 1;
265         for(i=0; i<size; i++) {
266             dst[i] = len[up[i]] + 1;
267             if(dst[i] >= 32) break;
268         }
269         if(i==size) break;
270     }
271 }
272 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
273
274 static void generate_joint_tables(HYuvContext *s){
275     uint16_t symbols[1<<VLC_BITS];
276     uint16_t bits[1<<VLC_BITS];
277     uint8_t len[1<<VLC_BITS];
278     if(s->bitstream_bpp < 24){
279         int p, i, y, u;
280         for(p=0; p<3; p++){
281             for(i=y=0; y<256; y++){
282                 int len0 = s->len[0][y];
283                 int limit = VLC_BITS - len0;
284                 if(limit <= 0)
285                     continue;
286                 for(u=0; u<256; u++){
287                     int len1 = s->len[p][u];
288                     if(len1 > limit)
289                         continue;
290                     len[i] = len0 + len1;
291                     bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
292                     symbols[i] = (y<<8) + u;
293                     if(symbols[i] != 0xffff) // reserved to mean "invalid"
294                         i++;
295                 }
296             }
297             free_vlc(&s->vlc[3+p]);
298             init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
299         }
300     }else{
301         uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
302         int i, b, g, r, code;
303         int p0 = s->decorrelate;
304         int p1 = !s->decorrelate;
305         // restrict the range to +/-16 becaues that's pretty much guaranteed to
306         // cover all the combinations that fit in 11 bits total, and it doesn't
307         // matter if we miss a few rare codes.
308         for(i=0, g=-16; g<16; g++){
309             int len0 = s->len[p0][g&255];
310             int limit0 = VLC_BITS - len0;
311             if(limit0 < 2)
312                 continue;
313             for(b=-16; b<16; b++){
314                 int len1 = s->len[p1][b&255];
315                 int limit1 = limit0 - len1;
316                 if(limit1 < 1)
317                     continue;
318                 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
319                 for(r=-16; r<16; r++){
320                     int len2 = s->len[2][r&255];
321                     if(len2 > limit1)
322                         continue;
323                     len[i] = len0 + len1 + len2;
324                     bits[i] = (code << len2) + s->bits[2][r&255];
325                     if(s->decorrelate){
326                         map[i][G] = g;
327                         map[i][B] = g+b;
328                         map[i][R] = g+r;
329                     }else{
330                         map[i][B] = g;
331                         map[i][G] = b;
332                         map[i][R] = r;
333                     }
334                     i++;
335                 }
336             }
337         }
338         free_vlc(&s->vlc[3]);
339         init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
340     }
341 }
342
343 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
344     GetBitContext gb;
345     int i;
346
347     init_get_bits(&gb, src, length*8);
348
349     for(i=0; i<3; i++){
350         if(read_len_table(s->len[i], &gb)<0)
351             return -1;
352         if(generate_bits_table(s->bits[i], s->len[i])<0){
353             return -1;
354         }
355 #if 0
356 for(j=0; j<256; j++){
357 printf("%6X, %2d,  %3d\n", s->bits[i][j], s->len[i][j], j);
358 }
359 #endif
360         free_vlc(&s->vlc[i]);
361         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
362     }
363
364     generate_joint_tables(s);
365
366     return (get_bits_count(&gb)+7)/8;
367 }
368
369 static int read_old_huffman_tables(HYuvContext *s){
370 #if 1
371     GetBitContext gb;
372     int i;
373
374     init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
375     if(read_len_table(s->len[0], &gb)<0)
376         return -1;
377     init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
378     if(read_len_table(s->len[1], &gb)<0)
379         return -1;
380
381     for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma  [i];
382     for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
383
384     if(s->bitstream_bpp >= 24){
385         memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
386         memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
387     }
388     memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
389     memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
390
391     for(i=0; i<3; i++){
392         free_vlc(&s->vlc[i]);
393         init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
394     }
395
396     generate_joint_tables(s);
397
398     return 0;
399 #else
400     av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
401     return -1;
402 #endif
403 }
404
405 static av_cold void alloc_temp(HYuvContext *s){
406     int i;
407
408     if(s->bitstream_bpp<24){
409         for(i=0; i<3; i++){
410             s->temp[i]= av_malloc(s->width + 16);
411         }
412     }else{
413         s->temp[0]= av_mallocz(4*s->width + 16);
414     }
415 }
416
417 static av_cold int common_init(AVCodecContext *avctx){
418     HYuvContext *s = avctx->priv_data;
419
420     s->avctx= avctx;
421     s->flags= avctx->flags;
422
423     dsputil_init(&s->dsp, avctx);
424
425     s->width= avctx->width;
426     s->height= avctx->height;
427     assert(s->width>0 && s->height>0);
428
429     return 0;
430 }
431
432 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
433 static av_cold int decode_init(AVCodecContext *avctx)
434 {
435     HYuvContext *s = avctx->priv_data;
436
437     common_init(avctx);
438     memset(s->vlc, 0, 3*sizeof(VLC));
439
440     avctx->coded_frame= &s->picture;
441     s->interlaced= s->height > 288;
442
443 s->bgr32=1;
444 //if(avctx->extradata)
445 //  printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
446     if(avctx->extradata_size){
447         if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
448             s->version=1; // do such files exist at all?
449         else
450             s->version=2;
451     }else
452         s->version=0;
453
454     if(s->version==2){
455         int method, interlace;
456
457         if (avctx->extradata_size < 4)
458             return -1;
459
460         method= ((uint8_t*)avctx->extradata)[0];
461         s->decorrelate= method&64 ? 1 : 0;
462         s->predictor= method&63;
463         s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
464         if(s->bitstream_bpp==0)
465             s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
466         interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
467         s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
468         s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
469
470         if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
471             return -1;
472     }else{
473         switch(avctx->bits_per_coded_sample&7){
474         case 1:
475             s->predictor= LEFT;
476             s->decorrelate= 0;
477             break;
478         case 2:
479             s->predictor= LEFT;
480             s->decorrelate= 1;
481             break;
482         case 3:
483             s->predictor= PLANE;
484             s->decorrelate= avctx->bits_per_coded_sample >= 24;
485             break;
486         case 4:
487             s->predictor= MEDIAN;
488             s->decorrelate= 0;
489             break;
490         default:
491             s->predictor= LEFT; //OLD
492             s->decorrelate= 0;
493             break;
494         }
495         s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
496         s->context= 0;
497
498         if(read_old_huffman_tables(s) < 0)
499             return -1;
500     }
501
502     switch(s->bitstream_bpp){
503     case 12:
504         avctx->pix_fmt = PIX_FMT_YUV420P;
505         break;
506     case 16:
507         if(s->yuy2){
508             avctx->pix_fmt = PIX_FMT_YUYV422;
509         }else{
510             avctx->pix_fmt = PIX_FMT_YUV422P;
511         }
512         break;
513     case 24:
514     case 32:
515         if(s->bgr32){
516             avctx->pix_fmt = PIX_FMT_RGB32;
517         }else{
518             avctx->pix_fmt = PIX_FMT_BGR24;
519         }
520         break;
521     default:
522         assert(0);
523     }
524
525     alloc_temp(s);
526
527 //    av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
528
529     return 0;
530 }
531
532 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
533 {
534     HYuvContext *s = avctx->priv_data;
535     int i;
536
537     avctx->coded_frame= &s->picture;
538     alloc_temp(s);
539
540     for (i = 0; i < 6; i++)
541         s->vlc[i].table = NULL;
542
543     if(s->version==2){
544         if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
545             return -1;
546     }else{
547         if(read_old_huffman_tables(s) < 0)
548             return -1;
549     }
550
551     return 0;
552 }
553 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
554
555 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
556 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
557     int i;
558     int index= 0;
559
560     for(i=0; i<256;){
561         int val= len[i];
562         int repeat=0;
563
564         for(; i<256 && len[i]==val && repeat<255; i++)
565             repeat++;
566
567         assert(val < 32 && val >0 && repeat<256 && repeat>0);
568         if(repeat>7){
569             buf[index++]= val;
570             buf[index++]= repeat;
571         }else{
572             buf[index++]= val | (repeat<<5);
573         }
574     }
575
576     return index;
577 }
578
579 static av_cold int encode_init(AVCodecContext *avctx)
580 {
581     HYuvContext *s = avctx->priv_data;
582     int i, j;
583
584     common_init(avctx);
585
586     avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
587     avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
588     s->version=2;
589
590     avctx->coded_frame= &s->picture;
591
592     switch(avctx->pix_fmt){
593     case PIX_FMT_YUV420P:
594         s->bitstream_bpp= 12;
595         break;
596     case PIX_FMT_YUV422P:
597         s->bitstream_bpp= 16;
598         break;
599     case PIX_FMT_RGB32:
600         s->bitstream_bpp= 24;
601         break;
602     default:
603         av_log(avctx, AV_LOG_ERROR, "format not supported\n");
604         return -1;
605     }
606     avctx->bits_per_coded_sample= s->bitstream_bpp;
607     s->decorrelate= s->bitstream_bpp >= 24;
608     s->predictor= avctx->prediction_method;
609     s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
610     if(avctx->context_model==1){
611         s->context= avctx->context_model;
612         if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
613             av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
614             return -1;
615         }
616     }else s->context= 0;
617
618     if(avctx->codec->id==CODEC_ID_HUFFYUV){
619         if(avctx->pix_fmt==PIX_FMT_YUV420P){
620             av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
621             return -1;
622         }
623         if(avctx->context_model){
624             av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
625             return -1;
626         }
627         if(s->interlaced != ( s->height > 288 ))
628             av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
629     }
630
631     if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
632         av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
633         return -1;
634     }
635
636     ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
637     ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
638     ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
639     if(s->context)
640         ((uint8_t*)avctx->extradata)[2]|= 0x40;
641     ((uint8_t*)avctx->extradata)[3]= 0;
642     s->avctx->extradata_size= 4;
643
644     if(avctx->stats_in){
645         char *p= avctx->stats_in;
646
647         for(i=0; i<3; i++)
648             for(j=0; j<256; j++)
649                 s->stats[i][j]= 1;
650
651         for(;;){
652             for(i=0; i<3; i++){
653                 char *next;
654
655                 for(j=0; j<256; j++){
656                     s->stats[i][j]+= strtol(p, &next, 0);
657                     if(next==p) return -1;
658                     p=next;
659                 }
660             }
661             if(p[0]==0 || p[1]==0 || p[2]==0) break;
662         }
663     }else{
664         for(i=0; i<3; i++)
665             for(j=0; j<256; j++){
666                 int d= FFMIN(j, 256-j);
667
668                 s->stats[i][j]= 100000000/(d+1);
669             }
670     }
671
672     for(i=0; i<3; i++){
673         generate_len_table(s->len[i], s->stats[i]);
674
675         if(generate_bits_table(s->bits[i], s->len[i])<0){
676             return -1;
677         }
678
679         s->avctx->extradata_size+=
680         store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
681     }
682
683     if(s->context){
684         for(i=0; i<3; i++){
685             int pels = s->width*s->height / (i?40:10);
686             for(j=0; j<256; j++){
687                 int d= FFMIN(j, 256-j);
688                 s->stats[i][j]= pels/(d+1);
689             }
690         }
691     }else{
692         for(i=0; i<3; i++)
693             for(j=0; j<256; j++)
694                 s->stats[i][j]= 0;
695     }
696
697 //    printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
698
699     alloc_temp(s);
700
701     s->picture_number=0;
702
703     return 0;
704 }
705 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
706
707 /* TODO instead of restarting the read when the code isn't in the first level
708  * of the joint table, jump into the 2nd level of the individual table. */
709 #define READ_2PIX(dst0, dst1, plane1){\
710     uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
711     if(code != 0xffff){\
712         dst0 = code>>8;\
713         dst1 = code;\
714     }else{\
715         dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
716         dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
717     }\
718 }
719
720 static void decode_422_bitstream(HYuvContext *s, int count){
721     int i;
722
723     count/=2;
724
725     if(count >= (get_bits_left(&s->gb))/(31*4)){
726         for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
727             READ_2PIX(s->temp[0][2*i  ], s->temp[1][i], 1);
728             READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
729         }
730     }else{
731         for(i=0; i<count; i++){
732             READ_2PIX(s->temp[0][2*i  ], s->temp[1][i], 1);
733             READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
734         }
735     }
736 }
737
738 static void decode_gray_bitstream(HYuvContext *s, int count){
739     int i;
740
741     count/=2;
742
743     if(count >= (get_bits_left(&s->gb))/(31*2)){
744         for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
745             READ_2PIX(s->temp[0][2*i  ], s->temp[0][2*i+1], 0);
746         }
747     }else{
748         for(i=0; i<count; i++){
749             READ_2PIX(s->temp[0][2*i  ], s->temp[0][2*i+1], 0);
750         }
751     }
752 }
753
754 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
755 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
756     int i;
757     const uint8_t *y = s->temp[0] + offset;
758     const uint8_t *u = s->temp[1] + offset/2;
759     const uint8_t *v = s->temp[2] + offset/2;
760
761     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
762         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
763         return -1;
764     }
765
766 #define LOAD4\
767             int y0 = y[2*i];\
768             int y1 = y[2*i+1];\
769             int u0 = u[i];\
770             int v0 = v[i];
771
772     count/=2;
773     if(s->flags&CODEC_FLAG_PASS1){
774         for(i=0; i<count; i++){
775             LOAD4;
776             s->stats[0][y0]++;
777             s->stats[1][u0]++;
778             s->stats[0][y1]++;
779             s->stats[2][v0]++;
780         }
781     }
782     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
783         return 0;
784     if(s->context){
785         for(i=0; i<count; i++){
786             LOAD4;
787             s->stats[0][y0]++;
788             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
789             s->stats[1][u0]++;
790             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
791             s->stats[0][y1]++;
792             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
793             s->stats[2][v0]++;
794             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
795         }
796     }else{
797         for(i=0; i<count; i++){
798             LOAD4;
799             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
800             put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
801             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
802             put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
803         }
804     }
805     return 0;
806 }
807
808 static int encode_gray_bitstream(HYuvContext *s, int count){
809     int i;
810
811     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
812         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
813         return -1;
814     }
815
816 #define LOAD2\
817             int y0 = s->temp[0][2*i];\
818             int y1 = s->temp[0][2*i+1];
819 #define STAT2\
820             s->stats[0][y0]++;\
821             s->stats[0][y1]++;
822 #define WRITE2\
823             put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
824             put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
825
826     count/=2;
827     if(s->flags&CODEC_FLAG_PASS1){
828         for(i=0; i<count; i++){
829             LOAD2;
830             STAT2;
831         }
832     }
833     if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
834         return 0;
835
836     if(s->context){
837         for(i=0; i<count; i++){
838             LOAD2;
839             STAT2;
840             WRITE2;
841         }
842     }else{
843         for(i=0; i<count; i++){
844             LOAD2;
845             WRITE2;
846         }
847     }
848     return 0;
849 }
850 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
851
852 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
853     int i;
854     for(i=0; i<count; i++){
855         int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
856         if(code != -1){
857             *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
858         }else if(decorrelate){
859             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
860             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
861             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
862         }else{
863             s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
864             s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
865             s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
866         }
867         if(alpha)
868             s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
869     }
870 }
871
872 static void decode_bgr_bitstream(HYuvContext *s, int count){
873     if(s->decorrelate){
874         if(s->bitstream_bpp==24)
875             decode_bgr_1(s, count, 1, 0);
876         else
877             decode_bgr_1(s, count, 1, 1);
878     }else{
879         if(s->bitstream_bpp==24)
880             decode_bgr_1(s, count, 0, 0);
881         else
882             decode_bgr_1(s, count, 0, 1);
883     }
884 }
885
886 static int encode_bgr_bitstream(HYuvContext *s, int count){
887     int i;
888
889     if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
890         av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
891         return -1;
892     }
893
894 #define LOAD3\
895             int g= s->temp[0][4*i+G];\
896             int b= (s->temp[0][4*i+B] - g) & 0xff;\
897             int r= (s->temp[0][4*i+R] - g) & 0xff;
898 #define STAT3\
899             s->stats[0][b]++;\
900             s->stats[1][g]++;\
901             s->stats[2][r]++;
902 #define WRITE3\
903             put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
904             put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
905             put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
906
907     if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
908         for(i=0; i<count; i++){
909             LOAD3;
910             STAT3;
911         }
912     }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
913         for(i=0; i<count; i++){
914             LOAD3;
915             STAT3;
916             WRITE3;
917         }
918     }else{
919         for(i=0; i<count; i++){
920             LOAD3;
921             WRITE3;
922         }
923     }
924     return 0;
925 }
926
927 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
928 static void draw_slice(HYuvContext *s, int y){
929     int h, cy;
930     int offset[4];
931
932     if(s->avctx->draw_horiz_band==NULL)
933         return;
934
935     h= y - s->last_slice_end;
936     y -= h;
937
938     if(s->bitstream_bpp==12){
939         cy= y>>1;
940     }else{
941         cy= y;
942     }
943
944     offset[0] = s->picture.linesize[0]*y;
945     offset[1] = s->picture.linesize[1]*cy;
946     offset[2] = s->picture.linesize[2]*cy;
947     offset[3] = 0;
948     emms_c();
949
950     s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
951
952     s->last_slice_end= y + h;
953 }
954
955 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
956     const uint8_t *buf = avpkt->data;
957     int buf_size = avpkt->size;
958     HYuvContext *s = avctx->priv_data;
959     const int width= s->width;
960     const int width2= s->width>>1;
961     const int height= s->height;
962     int fake_ystride, fake_ustride, fake_vstride;
963     AVFrame * const p= &s->picture;
964     int table_size= 0;
965
966     AVFrame *picture = data;
967
968     av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
969     if (!s->bitstream_buffer)
970         return AVERROR(ENOMEM);
971
972     memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
973     s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
974
975     if(p->data[0])
976         ff_thread_release_buffer(avctx, p);
977
978     p->reference= 0;
979     if(ff_thread_get_buffer(avctx, p) < 0){
980         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
981         return -1;
982     }
983
984     if(s->context){
985         table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
986         if(table_size < 0)
987             return -1;
988     }
989
990     if((unsigned)(buf_size-table_size) >= INT_MAX/8)
991         return -1;
992
993     init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
994
995     fake_ystride= s->interlaced ? p->linesize[0]*2  : p->linesize[0];
996     fake_ustride= s->interlaced ? p->linesize[1]*2  : p->linesize[1];
997     fake_vstride= s->interlaced ? p->linesize[2]*2  : p->linesize[2];
998
999     s->last_slice_end= 0;
1000
1001     if(s->bitstream_bpp<24){
1002         int y, cy;
1003         int lefty, leftu, leftv;
1004         int lefttopy, lefttopu, lefttopv;
1005
1006         if(s->yuy2){
1007             p->data[0][3]= get_bits(&s->gb, 8);
1008             p->data[0][2]= get_bits(&s->gb, 8);
1009             p->data[0][1]= get_bits(&s->gb, 8);
1010             p->data[0][0]= get_bits(&s->gb, 8);
1011
1012             av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1013             return -1;
1014         }else{
1015
1016             leftv= p->data[2][0]= get_bits(&s->gb, 8);
1017             lefty= p->data[0][1]= get_bits(&s->gb, 8);
1018             leftu= p->data[1][0]= get_bits(&s->gb, 8);
1019                    p->data[0][0]= get_bits(&s->gb, 8);
1020
1021             switch(s->predictor){
1022             case LEFT:
1023             case PLANE:
1024                 decode_422_bitstream(s, width-2);
1025                 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1026                 if(!(s->flags&CODEC_FLAG_GRAY)){
1027                     leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1028                     leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1029                 }
1030
1031                 for(cy=y=1; y<s->height; y++,cy++){
1032                     uint8_t *ydst, *udst, *vdst;
1033
1034                     if(s->bitstream_bpp==12){
1035                         decode_gray_bitstream(s, width);
1036
1037                         ydst= p->data[0] + p->linesize[0]*y;
1038
1039                         lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1040                         if(s->predictor == PLANE){
1041                             if(y>s->interlaced)
1042                                 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1043                         }
1044                         y++;
1045                         if(y>=s->height) break;
1046                     }
1047
1048                     draw_slice(s, y);
1049
1050                     ydst= p->data[0] + p->linesize[0]*y;
1051                     udst= p->data[1] + p->linesize[1]*cy;
1052                     vdst= p->data[2] + p->linesize[2]*cy;
1053
1054                     decode_422_bitstream(s, width);
1055                     lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1056                     if(!(s->flags&CODEC_FLAG_GRAY)){
1057                         leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1058                         leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1059                     }
1060                     if(s->predictor == PLANE){
1061                         if(cy>s->interlaced){
1062                             s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1063                             if(!(s->flags&CODEC_FLAG_GRAY)){
1064                                 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1065                                 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1066                             }
1067                         }
1068                     }
1069                 }
1070                 draw_slice(s, height);
1071
1072                 break;
1073             case MEDIAN:
1074                 /* first line except first 2 pixels is left predicted */
1075                 decode_422_bitstream(s, width-2);
1076                 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1077                 if(!(s->flags&CODEC_FLAG_GRAY)){
1078                     leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1079                     leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1080                 }
1081
1082                 cy=y=1;
1083
1084                 /* second line is left predicted for interlaced case */
1085                 if(s->interlaced){
1086                     decode_422_bitstream(s, width);
1087                     lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1088                     if(!(s->flags&CODEC_FLAG_GRAY)){
1089                         leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1090                         leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1091                     }
1092                     y++; cy++;
1093                 }
1094
1095                 /* next 4 pixels are left predicted too */
1096                 decode_422_bitstream(s, 4);
1097                 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1098                 if(!(s->flags&CODEC_FLAG_GRAY)){
1099                     leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1100                     leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1101                 }
1102
1103                 /* next line except the first 4 pixels is median predicted */
1104                 lefttopy= p->data[0][3];
1105                 decode_422_bitstream(s, width-4);
1106                 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1107                 if(!(s->flags&CODEC_FLAG_GRAY)){
1108                     lefttopu= p->data[1][1];
1109                     lefttopv= p->data[2][1];
1110                     s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1111                     s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1112                 }
1113                 y++; cy++;
1114
1115                 for(; y<height; y++,cy++){
1116                     uint8_t *ydst, *udst, *vdst;
1117
1118                     if(s->bitstream_bpp==12){
1119                         while(2*cy > y){
1120                             decode_gray_bitstream(s, width);
1121                             ydst= p->data[0] + p->linesize[0]*y;
1122                             s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1123                             y++;
1124                         }
1125                         if(y>=height) break;
1126                     }
1127                     draw_slice(s, y);
1128
1129                     decode_422_bitstream(s, width);
1130
1131                     ydst= p->data[0] + p->linesize[0]*y;
1132                     udst= p->data[1] + p->linesize[1]*cy;
1133                     vdst= p->data[2] + p->linesize[2]*cy;
1134
1135                     s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1136                     if(!(s->flags&CODEC_FLAG_GRAY)){
1137                         s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1138                         s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1139                     }
1140                 }
1141
1142                 draw_slice(s, height);
1143                 break;
1144             }
1145         }
1146     }else{
1147         int y;
1148         int leftr, leftg, leftb, lefta;
1149         const int last_line= (height-1)*p->linesize[0];
1150
1151         if(s->bitstream_bpp==32){
1152             lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1153             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1154             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1155             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1156         }else{
1157             leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1158             leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1159             leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1160             lefta= p->data[0][last_line+A]= 255;
1161             skip_bits(&s->gb, 8);
1162         }
1163
1164         if(s->bgr32){
1165             switch(s->predictor){
1166             case LEFT:
1167             case PLANE:
1168                 decode_bgr_bitstream(s, width-1);
1169                 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1170
1171                 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1172                     decode_bgr_bitstream(s, width);
1173
1174                     s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1175                     if(s->predictor == PLANE){
1176                         if(s->bitstream_bpp!=32) lefta=0;
1177                         if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1178                             s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1179                                              p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1180                         }
1181                     }
1182                 }
1183                 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1184                 break;
1185             default:
1186                 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1187             }
1188         }else{
1189
1190             av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1191             return -1;
1192         }
1193     }
1194     emms_c();
1195
1196     *picture= *p;
1197     *data_size = sizeof(AVFrame);
1198
1199     return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1200 }
1201 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1202
1203 static int common_end(HYuvContext *s){
1204     int i;
1205
1206     for(i=0; i<3; i++){
1207         av_freep(&s->temp[i]);
1208     }
1209     return 0;
1210 }
1211
1212 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1213 static av_cold int decode_end(AVCodecContext *avctx)
1214 {
1215     HYuvContext *s = avctx->priv_data;
1216     int i;
1217
1218     if (s->picture.data[0])
1219         avctx->release_buffer(avctx, &s->picture);
1220
1221     common_end(s);
1222     av_freep(&s->bitstream_buffer);
1223
1224     for(i=0; i<6; i++){
1225         free_vlc(&s->vlc[i]);
1226     }
1227
1228     return 0;
1229 }
1230 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1231
1232 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1233 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1234     HYuvContext *s = avctx->priv_data;
1235     AVFrame *pict = data;
1236     const int width= s->width;
1237     const int width2= s->width>>1;
1238     const int height= s->height;
1239     const int fake_ystride= s->interlaced ? pict->linesize[0]*2  : pict->linesize[0];
1240     const int fake_ustride= s->interlaced ? pict->linesize[1]*2  : pict->linesize[1];
1241     const int fake_vstride= s->interlaced ? pict->linesize[2]*2  : pict->linesize[2];
1242     AVFrame * const p= &s->picture;
1243     int i, j, size=0;
1244
1245     *p = *pict;
1246     p->pict_type= FF_I_TYPE;
1247     p->key_frame= 1;
1248
1249     if(s->context){
1250         for(i=0; i<3; i++){
1251             generate_len_table(s->len[i], s->stats[i]);
1252             if(generate_bits_table(s->bits[i], s->len[i])<0)
1253                 return -1;
1254             size+= store_table(s, s->len[i], &buf[size]);
1255         }
1256
1257         for(i=0; i<3; i++)
1258             for(j=0; j<256; j++)
1259                 s->stats[i][j] >>= 1;
1260     }
1261
1262     init_put_bits(&s->pb, buf+size, buf_size-size);
1263
1264     if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1265         int lefty, leftu, leftv, y, cy;
1266
1267         put_bits(&s->pb, 8, leftv= p->data[2][0]);
1268         put_bits(&s->pb, 8, lefty= p->data[0][1]);
1269         put_bits(&s->pb, 8, leftu= p->data[1][0]);
1270         put_bits(&s->pb, 8,        p->data[0][0]);
1271
1272         lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1273         leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1274         leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1275
1276         encode_422_bitstream(s, 2, width-2);
1277
1278         if(s->predictor==MEDIAN){
1279             int lefttopy, lefttopu, lefttopv;
1280             cy=y=1;
1281             if(s->interlaced){
1282                 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1283                 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1284                 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1285
1286                 encode_422_bitstream(s, 0, width);
1287                 y++; cy++;
1288             }
1289
1290             lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1291             leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1292             leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1293
1294             encode_422_bitstream(s, 0, 4);
1295
1296             lefttopy= p->data[0][3];
1297             lefttopu= p->data[1][1];
1298             lefttopv= p->data[2][1];
1299             s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1300             s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1301             s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1302             encode_422_bitstream(s, 0, width-4);
1303             y++; cy++;
1304
1305             for(; y<height; y++,cy++){
1306                 uint8_t *ydst, *udst, *vdst;
1307
1308                 if(s->bitstream_bpp==12){
1309                     while(2*cy > y){
1310                         ydst= p->data[0] + p->linesize[0]*y;
1311                         s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1312                         encode_gray_bitstream(s, width);
1313                         y++;
1314                     }
1315                     if(y>=height) break;
1316                 }
1317                 ydst= p->data[0] + p->linesize[0]*y;
1318                 udst= p->data[1] + p->linesize[1]*cy;
1319                 vdst= p->data[2] + p->linesize[2]*cy;
1320
1321                 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1322                 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1323                 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1324
1325                 encode_422_bitstream(s, 0, width);
1326             }
1327         }else{
1328             for(cy=y=1; y<height; y++,cy++){
1329                 uint8_t *ydst, *udst, *vdst;
1330
1331                 /* encode a luma only line & y++ */
1332                 if(s->bitstream_bpp==12){
1333                     ydst= p->data[0] + p->linesize[0]*y;
1334
1335                     if(s->predictor == PLANE && s->interlaced < y){
1336                         s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1337
1338                         lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1339                     }else{
1340                         lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1341                     }
1342                     encode_gray_bitstream(s, width);
1343                     y++;
1344                     if(y>=height) break;
1345                 }
1346
1347                 ydst= p->data[0] + p->linesize[0]*y;
1348                 udst= p->data[1] + p->linesize[1]*cy;
1349                 vdst= p->data[2] + p->linesize[2]*cy;
1350
1351                 if(s->predictor == PLANE && s->interlaced < cy){
1352                     s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1353                     s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1354                     s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1355
1356                     lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1357                     leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1358                     leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1359                 }else{
1360                     lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1361                     leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1362                     leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1363                 }
1364
1365                 encode_422_bitstream(s, 0, width);
1366             }
1367         }
1368     }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1369         uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1370         const int stride = -p->linesize[0];
1371         const int fake_stride = -fake_ystride;
1372         int y;
1373         int leftr, leftg, leftb;
1374
1375         put_bits(&s->pb, 8, leftr= data[R]);
1376         put_bits(&s->pb, 8, leftg= data[G]);
1377         put_bits(&s->pb, 8, leftb= data[B]);
1378         put_bits(&s->pb, 8, 0);
1379
1380         sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1381         encode_bgr_bitstream(s, width-1);
1382
1383         for(y=1; y<s->height; y++){
1384             uint8_t *dst = data + y*stride;
1385             if(s->predictor == PLANE && s->interlaced < y){
1386                 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1387                 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1388             }else{
1389                 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1390             }
1391             encode_bgr_bitstream(s, width);
1392         }
1393     }else{
1394         av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1395     }
1396     emms_c();
1397
1398     size+= (put_bits_count(&s->pb)+31)/8;
1399     put_bits(&s->pb, 16, 0);
1400     put_bits(&s->pb, 15, 0);
1401     size/= 4;
1402
1403     if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1404         int j;
1405         char *p= avctx->stats_out;
1406         char *end= p + 1024*30;
1407         for(i=0; i<3; i++){
1408             for(j=0; j<256; j++){
1409                 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1410                 p+= strlen(p);
1411                 s->stats[i][j]= 0;
1412             }
1413             snprintf(p, end-p, "\n");
1414             p++;
1415         }
1416     } else
1417         avctx->stats_out[0] = '\0';
1418     if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1419         flush_put_bits(&s->pb);
1420         s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1421     }
1422
1423     s->picture_number++;
1424
1425     return size*4;
1426 }
1427
1428 static av_cold int encode_end(AVCodecContext *avctx)
1429 {
1430     HYuvContext *s = avctx->priv_data;
1431
1432     common_end(s);
1433
1434     av_freep(&avctx->extradata);
1435     av_freep(&avctx->stats_out);
1436
1437     return 0;
1438 }
1439 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1440
1441 #if CONFIG_HUFFYUV_DECODER
1442 AVCodec ff_huffyuv_decoder = {
1443     "huffyuv",
1444     AVMEDIA_TYPE_VIDEO,
1445     CODEC_ID_HUFFYUV,
1446     sizeof(HYuvContext),
1447     decode_init,
1448     NULL,
1449     decode_end,
1450     decode_frame,
1451     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1452     NULL,
1453     .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1454     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1455 };
1456 #endif
1457
1458 #if CONFIG_FFVHUFF_DECODER
1459 AVCodec ff_ffvhuff_decoder = {
1460     "ffvhuff",
1461     AVMEDIA_TYPE_VIDEO,
1462     CODEC_ID_FFVHUFF,
1463     sizeof(HYuvContext),
1464     decode_init,
1465     NULL,
1466     decode_end,
1467     decode_frame,
1468     CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1469     NULL,
1470     .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1471     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1472 };
1473 #endif
1474
1475 #if CONFIG_HUFFYUV_ENCODER
1476 AVCodec ff_huffyuv_encoder = {
1477     "huffyuv",
1478     AVMEDIA_TYPE_VIDEO,
1479     CODEC_ID_HUFFYUV,
1480     sizeof(HYuvContext),
1481     encode_init,
1482     encode_frame,
1483     encode_end,
1484     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1485     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1486 };
1487 #endif
1488
1489 #if CONFIG_FFVHUFF_ENCODER
1490 AVCodec ff_ffvhuff_encoder = {
1491     "ffvhuff",
1492     AVMEDIA_TYPE_VIDEO,
1493     CODEC_ID_FFVHUFF,
1494     sizeof(HYuvContext),
1495     encode_init,
1496     encode_frame,
1497     encode_end,
1498     .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1499     .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1500 };
1501 #endif