2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
51 typedef enum Predictor{
57 typedef struct HYuvContext{
58 AVCodecContext *avctx;
66 int yuy2; //use yuy2 instead of 422P
67 int bgr32; //use bgr32 instead of bgr24
74 uint64_t stats[3][256];
76 uint32_t bits[3][256];
77 uint32_t pix_bgr_map[1<<VLC_BITS];
78 VLC vlc[6]; //Y,U,V,YY,YU,YV
80 uint8_t *bitstream_buffer;
81 unsigned int bitstream_buffer_size;
85 static const unsigned char classic_shift_luma[] = {
86 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
87 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
91 static const unsigned char classic_shift_chroma[] = {
92 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
93 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
94 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
97 static const unsigned char classic_add_luma[256] = {
98 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
99 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
100 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
101 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
102 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
103 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
104 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
105 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
106 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
107 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
108 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
109 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
110 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
111 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
112 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
113 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
116 static const unsigned char classic_add_chroma[256] = {
117 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
118 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
119 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
120 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
121 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
122 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
123 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
124 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
125 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
126 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
127 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
128 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
129 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
130 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
131 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
132 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
135 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
139 const int temp= src[i];
146 const int temp= src[i];
150 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
155 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){
162 for(i=0; i<FFMIN(w,4); i++){
163 const int rt= src[i*4+R];
164 const int gt= src[i*4+G];
165 const int bt= src[i*4+B];
166 const int at= src[i*4+A];
176 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
177 *red= src[(w-1)*4+R];
178 *green= src[(w-1)*4+G];
179 *blue= src[(w-1)*4+B];
180 *alpha= src[(w-1)*4+A];
183 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
189 for(i=0; i<FFMIN(w,16); i++){
190 const int rt= src[i*3+0];
191 const int gt= src[i*3+1];
192 const int bt= src[i*3+2];
200 s->dsp.diff_bytes(dst+48, src+48, src+48-3, w*3-48);
201 *red= src[(w-1)*3+0];
202 *green= src[(w-1)*3+1];
203 *blue= src[(w-1)*3+2];
206 static int read_len_table(uint8_t *dst, GetBitContext *gb){
210 repeat= get_bits(gb, 3);
211 val = get_bits(gb, 5);
213 repeat= get_bits(gb, 8);
214 //printf("%d %d\n", val, repeat);
216 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
225 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
229 for(len=32; len>0; len--){
230 for(index=0; index<256; index++){
231 if(len_table[index]==len)
235 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
243 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
249 static void heap_sift(HeapElem *h, int root, int size)
251 while(root*2+1 < size) {
252 int child = root*2+1;
253 if(child < size-1 && h[child].val > h[child+1].val)
255 if(h[root].val > h[child].val) {
256 FFSWAP(HeapElem, h[root], h[child]);
263 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
270 for(offset=1; ; offset<<=1){
271 for(i=0; i<size; i++){
273 h[i].val = (stats[i] << 8) + offset;
275 for(i=size/2-1; i>=0; i--)
276 heap_sift(h, i, size);
278 for(next=size; next<size*2-1; next++){
279 // merge the two smallest entries, and put it back in the heap
280 uint64_t min1v = h[0].val;
281 up[h[0].name] = next;
282 h[0].val = INT64_MAX;
283 heap_sift(h, 0, size);
284 up[h[0].name] = next;
287 heap_sift(h, 0, size);
291 for(i=2*size-3; i>=size; i--)
292 len[i] = len[up[i]] + 1;
293 for(i=0; i<size; i++) {
294 dst[i] = len[up[i]] + 1;
295 if(dst[i] >= 32) break;
300 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
302 static void generate_joint_tables(HYuvContext *s){
303 uint16_t symbols[1<<VLC_BITS];
304 uint16_t bits[1<<VLC_BITS];
305 uint8_t len[1<<VLC_BITS];
306 if(s->bitstream_bpp < 24){
309 for(i=y=0; y<256; y++){
310 int len0 = s->len[0][y];
311 int limit = VLC_BITS - len0;
314 for(u=0; u<256; u++){
315 int len1 = s->len[p][u];
318 len[i] = len0 + len1;
319 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
320 symbols[i] = (y<<8) + u;
321 if(symbols[i] != 0xffff) // reserved to mean "invalid"
325 free_vlc(&s->vlc[3+p]);
326 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
329 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
330 int i, b, g, r, code;
331 int p0 = s->decorrelate;
332 int p1 = !s->decorrelate;
333 // restrict the range to +/-16 becaues that's pretty much guaranteed to
334 // cover all the combinations that fit in 11 bits total, and it doesn't
335 // matter if we miss a few rare codes.
336 for(i=0, g=-16; g<16; g++){
337 int len0 = s->len[p0][g&255];
338 int limit0 = VLC_BITS - len0;
341 for(b=-16; b<16; b++){
342 int len1 = s->len[p1][b&255];
343 int limit1 = limit0 - len1;
346 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
347 for(r=-16; r<16; r++){
348 int len2 = s->len[2][r&255];
351 len[i] = len0 + len1 + len2;
352 bits[i] = (code << len2) + s->bits[2][r&255];
366 free_vlc(&s->vlc[3]);
367 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
371 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
375 init_get_bits(&gb, src, length*8);
378 if(read_len_table(s->len[i], &gb)<0)
380 if(generate_bits_table(s->bits[i], s->len[i])<0){
383 free_vlc(&s->vlc[i]);
384 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
387 generate_joint_tables(s);
389 return (get_bits_count(&gb)+7)/8;
392 static int read_old_huffman_tables(HYuvContext *s){
397 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
398 if(read_len_table(s->len[0], &gb)<0)
400 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
401 if(read_len_table(s->len[1], &gb)<0)
404 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
405 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
407 if(s->bitstream_bpp >= 24){
408 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
409 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
411 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
412 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
415 free_vlc(&s->vlc[i]);
416 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
419 generate_joint_tables(s);
423 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
428 static av_cold void alloc_temp(HYuvContext *s){
431 if(s->bitstream_bpp<24){
433 s->temp[i]= av_malloc(s->width + 16);
436 s->temp[0]= av_mallocz(4*s->width + 16);
440 static av_cold int common_init(AVCodecContext *avctx){
441 HYuvContext *s = avctx->priv_data;
444 s->flags= avctx->flags;
446 dsputil_init(&s->dsp, avctx);
448 s->width= avctx->width;
449 s->height= avctx->height;
450 assert(s->width>0 && s->height>0);
455 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
456 static av_cold int decode_init(AVCodecContext *avctx)
458 HYuvContext *s = avctx->priv_data;
461 memset(s->vlc, 0, 3*sizeof(VLC));
463 avctx->coded_frame= &s->picture;
464 avcodec_get_frame_defaults(&s->picture);
465 s->interlaced= s->height > 288;
468 //if(avctx->extradata)
469 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
470 if(avctx->extradata_size){
471 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
472 s->version=1; // do such files exist at all?
479 int method, interlace;
481 if (avctx->extradata_size < 4)
484 method= ((uint8_t*)avctx->extradata)[0];
485 s->decorrelate= method&64 ? 1 : 0;
486 s->predictor= method&63;
487 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
488 if(s->bitstream_bpp==0)
489 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
490 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
491 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
492 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
494 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
497 switch(avctx->bits_per_coded_sample&7){
508 s->decorrelate= avctx->bits_per_coded_sample >= 24;
511 s->predictor= MEDIAN;
515 s->predictor= LEFT; //OLD
519 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
522 if(read_old_huffman_tables(s) < 0)
526 switch(s->bitstream_bpp){
528 avctx->pix_fmt = PIX_FMT_YUV420P;
532 avctx->pix_fmt = PIX_FMT_YUYV422;
534 avctx->pix_fmt = PIX_FMT_YUV422P;
540 avctx->pix_fmt = PIX_FMT_RGB32;
542 avctx->pix_fmt = PIX_FMT_BGR24;
551 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
556 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
558 HYuvContext *s = avctx->priv_data;
561 avctx->coded_frame= &s->picture;
564 for (i = 0; i < 6; i++)
565 s->vlc[i].table = NULL;
568 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
571 if(read_old_huffman_tables(s) < 0)
577 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
579 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
580 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
588 for(; i<256 && len[i]==val && repeat<255; i++)
591 assert(val < 32 && val >0 && repeat<256 && repeat>0);
594 buf[index++]= repeat;
596 buf[index++]= val | (repeat<<5);
603 static av_cold int encode_init(AVCodecContext *avctx)
605 HYuvContext *s = avctx->priv_data;
610 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
611 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
614 avctx->coded_frame= &s->picture;
616 switch(avctx->pix_fmt){
617 case PIX_FMT_YUV420P:
618 s->bitstream_bpp= 12;
620 case PIX_FMT_YUV422P:
621 s->bitstream_bpp= 16;
624 s->bitstream_bpp= 32;
627 s->bitstream_bpp= 24;
630 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
633 avctx->bits_per_coded_sample= s->bitstream_bpp;
634 s->decorrelate= s->bitstream_bpp >= 24;
635 s->predictor= avctx->prediction_method;
636 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
637 if(avctx->context_model==1){
638 s->context= avctx->context_model;
639 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
640 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
645 if(avctx->codec->id==CODEC_ID_HUFFYUV){
646 if(avctx->pix_fmt==PIX_FMT_YUV420P){
647 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
650 if(avctx->context_model){
651 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
654 if(s->interlaced != ( s->height > 288 ))
655 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
658 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
659 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
663 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
664 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
665 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
667 ((uint8_t*)avctx->extradata)[2]|= 0x40;
668 ((uint8_t*)avctx->extradata)[3]= 0;
669 s->avctx->extradata_size= 4;
672 char *p= avctx->stats_in;
682 for(j=0; j<256; j++){
683 s->stats[i][j]+= strtol(p, &next, 0);
684 if(next==p) return -1;
688 if(p[0]==0 || p[1]==0 || p[2]==0) break;
692 for(j=0; j<256; j++){
693 int d= FFMIN(j, 256-j);
695 s->stats[i][j]= 100000000/(d+1);
700 generate_len_table(s->len[i], s->stats[i]);
702 if(generate_bits_table(s->bits[i], s->len[i])<0){
706 s->avctx->extradata_size+=
707 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
712 int pels = s->width*s->height / (i?40:10);
713 for(j=0; j<256; j++){
714 int d= FFMIN(j, 256-j);
715 s->stats[i][j]= pels/(d+1);
724 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
732 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
734 /* TODO instead of restarting the read when the code isn't in the first level
735 * of the joint table, jump into the 2nd level of the individual table. */
736 #define READ_2PIX(dst0, dst1, plane1){\
737 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
742 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
743 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
747 static void decode_422_bitstream(HYuvContext *s, int count){
752 if(count >= (get_bits_left(&s->gb))/(31*4)){
753 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
754 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
755 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
758 for(i=0; i<count; i++){
759 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
760 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
765 static void decode_gray_bitstream(HYuvContext *s, int count){
770 if(count >= (get_bits_left(&s->gb))/(31*2)){
771 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
772 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
775 for(i=0; i<count; i++){
776 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
781 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
782 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
784 const uint8_t *y = s->temp[0] + offset;
785 const uint8_t *u = s->temp[1] + offset/2;
786 const uint8_t *v = s->temp[2] + offset/2;
788 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
789 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
800 if(s->flags&CODEC_FLAG_PASS1){
801 for(i=0; i<count; i++){
809 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
812 for(i=0; i<count; i++){
815 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
817 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
819 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
821 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
824 for(i=0; i<count; i++){
826 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
827 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
828 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
829 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
835 static int encode_gray_bitstream(HYuvContext *s, int count){
838 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
839 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
844 int y0 = s->temp[0][2*i];\
845 int y1 = s->temp[0][2*i+1];
850 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
851 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
854 if(s->flags&CODEC_FLAG_PASS1){
855 for(i=0; i<count; i++){
860 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
864 for(i=0; i<count; i++){
870 for(i=0; i<count; i++){
877 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
879 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
881 for(i=0; i<count; i++){
882 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
884 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
885 }else if(decorrelate){
886 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
887 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
888 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
890 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
891 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
892 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
895 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
899 static void decode_bgr_bitstream(HYuvContext *s, int count){
901 if(s->bitstream_bpp==24)
902 decode_bgr_1(s, count, 1, 0);
904 decode_bgr_1(s, count, 1, 1);
906 if(s->bitstream_bpp==24)
907 decode_bgr_1(s, count, 0, 0);
909 decode_bgr_1(s, count, 0, 1);
913 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes){
916 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count){
917 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
922 int g= s->temp[0][planes==3 ? 3*i+1 : 4*i+G];\
923 int b= (s->temp[0][planes==3 ? 3*i+2 : 4*i+B] - g) & 0xff;\
924 int r= (s->temp[0][planes==3 ? 3*i+0 : 4*i+R] - g) & 0xff;\
925 int a= s->temp[0][planes*i+A];
930 if(planes==4) s->stats[2][a]++;
932 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
933 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
934 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
935 if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
937 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
938 for(i=0; i<count; i++){
942 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
943 for(i=0; i<count; i++){
949 for(i=0; i<count; i++){
957 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
958 static void draw_slice(HYuvContext *s, int y){
962 if(s->avctx->draw_horiz_band==NULL)
965 h= y - s->last_slice_end;
968 if(s->bitstream_bpp==12){
974 offset[0] = s->picture.linesize[0]*y;
975 offset[1] = s->picture.linesize[1]*cy;
976 offset[2] = s->picture.linesize[2]*cy;
980 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
982 s->last_slice_end= y + h;
985 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
986 const uint8_t *buf = avpkt->data;
987 int buf_size = avpkt->size;
988 HYuvContext *s = avctx->priv_data;
989 const int width= s->width;
990 const int width2= s->width>>1;
991 const int height= s->height;
992 int fake_ystride, fake_ustride, fake_vstride;
993 AVFrame * const p= &s->picture;
996 AVFrame *picture = data;
998 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
999 if (!s->bitstream_buffer)
1000 return AVERROR(ENOMEM);
1002 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1003 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
1006 ff_thread_release_buffer(avctx, p);
1009 if(ff_thread_get_buffer(avctx, p) < 0){
1010 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1015 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1020 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
1023 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
1025 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
1026 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
1027 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
1029 s->last_slice_end= 0;
1031 if(s->bitstream_bpp<24){
1033 int lefty, leftu, leftv;
1034 int lefttopy, lefttopu, lefttopv;
1037 p->data[0][3]= get_bits(&s->gb, 8);
1038 p->data[0][2]= get_bits(&s->gb, 8);
1039 p->data[0][1]= get_bits(&s->gb, 8);
1040 p->data[0][0]= get_bits(&s->gb, 8);
1042 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1046 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1047 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1048 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1049 p->data[0][0]= get_bits(&s->gb, 8);
1051 switch(s->predictor){
1054 decode_422_bitstream(s, width-2);
1055 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1056 if(!(s->flags&CODEC_FLAG_GRAY)){
1057 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1058 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1061 for(cy=y=1; y<s->height; y++,cy++){
1062 uint8_t *ydst, *udst, *vdst;
1064 if(s->bitstream_bpp==12){
1065 decode_gray_bitstream(s, width);
1067 ydst= p->data[0] + p->linesize[0]*y;
1069 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1070 if(s->predictor == PLANE){
1072 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1075 if(y>=s->height) break;
1080 ydst= p->data[0] + p->linesize[0]*y;
1081 udst= p->data[1] + p->linesize[1]*cy;
1082 vdst= p->data[2] + p->linesize[2]*cy;
1084 decode_422_bitstream(s, width);
1085 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1086 if(!(s->flags&CODEC_FLAG_GRAY)){
1087 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1088 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1090 if(s->predictor == PLANE){
1091 if(cy>s->interlaced){
1092 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1093 if(!(s->flags&CODEC_FLAG_GRAY)){
1094 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1095 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1100 draw_slice(s, height);
1104 /* first line except first 2 pixels is left predicted */
1105 decode_422_bitstream(s, width-2);
1106 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1107 if(!(s->flags&CODEC_FLAG_GRAY)){
1108 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1109 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1114 /* second line is left predicted for interlaced case */
1116 decode_422_bitstream(s, width);
1117 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1118 if(!(s->flags&CODEC_FLAG_GRAY)){
1119 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1120 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1125 /* next 4 pixels are left predicted too */
1126 decode_422_bitstream(s, 4);
1127 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1128 if(!(s->flags&CODEC_FLAG_GRAY)){
1129 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1130 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1133 /* next line except the first 4 pixels is median predicted */
1134 lefttopy= p->data[0][3];
1135 decode_422_bitstream(s, width-4);
1136 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1137 if(!(s->flags&CODEC_FLAG_GRAY)){
1138 lefttopu= p->data[1][1];
1139 lefttopv= p->data[2][1];
1140 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1141 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1145 for(; y<height; y++,cy++){
1146 uint8_t *ydst, *udst, *vdst;
1148 if(s->bitstream_bpp==12){
1150 decode_gray_bitstream(s, width);
1151 ydst= p->data[0] + p->linesize[0]*y;
1152 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1155 if(y>=height) break;
1159 decode_422_bitstream(s, width);
1161 ydst= p->data[0] + p->linesize[0]*y;
1162 udst= p->data[1] + p->linesize[1]*cy;
1163 vdst= p->data[2] + p->linesize[2]*cy;
1165 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1166 if(!(s->flags&CODEC_FLAG_GRAY)){
1167 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1168 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1172 draw_slice(s, height);
1178 int leftr, leftg, leftb, lefta;
1179 const int last_line= (height-1)*p->linesize[0];
1181 if(s->bitstream_bpp==32){
1182 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1183 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1184 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1185 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1187 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1188 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1189 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1190 lefta= p->data[0][last_line+A]= 255;
1191 skip_bits(&s->gb, 8);
1195 switch(s->predictor){
1198 decode_bgr_bitstream(s, width-1);
1199 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1201 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1202 decode_bgr_bitstream(s, width);
1204 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1205 if(s->predictor == PLANE){
1206 if(s->bitstream_bpp!=32) lefta=0;
1207 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1208 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1209 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1213 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1216 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1220 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1227 *data_size = sizeof(AVFrame);
1229 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1231 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1233 static int common_end(HYuvContext *s){
1237 av_freep(&s->temp[i]);
1242 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1243 static av_cold int decode_end(AVCodecContext *avctx)
1245 HYuvContext *s = avctx->priv_data;
1248 if (s->picture.data[0])
1249 avctx->release_buffer(avctx, &s->picture);
1252 av_freep(&s->bitstream_buffer);
1255 free_vlc(&s->vlc[i]);
1260 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1262 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1263 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1264 HYuvContext *s = avctx->priv_data;
1265 AVFrame *pict = data;
1266 const int width= s->width;
1267 const int width2= s->width>>1;
1268 const int height= s->height;
1269 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1270 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1271 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1272 AVFrame * const p= &s->picture;
1276 p->pict_type= AV_PICTURE_TYPE_I;
1281 generate_len_table(s->len[i], s->stats[i]);
1282 if(generate_bits_table(s->bits[i], s->len[i])<0)
1284 size+= store_table(s, s->len[i], &buf[size]);
1288 for(j=0; j<256; j++)
1289 s->stats[i][j] >>= 1;
1292 init_put_bits(&s->pb, buf+size, buf_size-size);
1294 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1295 int lefty, leftu, leftv, y, cy;
1297 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1298 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1299 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1300 put_bits(&s->pb, 8, p->data[0][0]);
1302 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1303 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1304 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1306 encode_422_bitstream(s, 2, width-2);
1308 if(s->predictor==MEDIAN){
1309 int lefttopy, lefttopu, lefttopv;
1312 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1313 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1314 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1316 encode_422_bitstream(s, 0, width);
1320 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1321 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1322 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1324 encode_422_bitstream(s, 0, 4);
1326 lefttopy= p->data[0][3];
1327 lefttopu= p->data[1][1];
1328 lefttopv= p->data[2][1];
1329 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1330 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1331 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1332 encode_422_bitstream(s, 0, width-4);
1335 for(; y<height; y++,cy++){
1336 uint8_t *ydst, *udst, *vdst;
1338 if(s->bitstream_bpp==12){
1340 ydst= p->data[0] + p->linesize[0]*y;
1341 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1342 encode_gray_bitstream(s, width);
1345 if(y>=height) break;
1347 ydst= p->data[0] + p->linesize[0]*y;
1348 udst= p->data[1] + p->linesize[1]*cy;
1349 vdst= p->data[2] + p->linesize[2]*cy;
1351 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1352 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1353 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1355 encode_422_bitstream(s, 0, width);
1358 for(cy=y=1; y<height; y++,cy++){
1359 uint8_t *ydst, *udst, *vdst;
1361 /* encode a luma only line & y++ */
1362 if(s->bitstream_bpp==12){
1363 ydst= p->data[0] + p->linesize[0]*y;
1365 if(s->predictor == PLANE && s->interlaced < y){
1366 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1368 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1370 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1372 encode_gray_bitstream(s, width);
1374 if(y>=height) break;
1377 ydst= p->data[0] + p->linesize[0]*y;
1378 udst= p->data[1] + p->linesize[1]*cy;
1379 vdst= p->data[2] + p->linesize[2]*cy;
1381 if(s->predictor == PLANE && s->interlaced < cy){
1382 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1383 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1384 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1386 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1387 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1388 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1390 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1391 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1392 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1395 encode_422_bitstream(s, 0, width);
1398 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1399 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1400 const int stride = -p->linesize[0];
1401 const int fake_stride = -fake_ystride;
1403 int leftr, leftg, leftb, lefta;
1405 put_bits(&s->pb, 8, lefta= data[A]);
1406 put_bits(&s->pb, 8, leftr= data[R]);
1407 put_bits(&s->pb, 8, leftg= data[G]);
1408 put_bits(&s->pb, 8, leftb= data[B]);
1410 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb, &lefta);
1411 encode_bgra_bitstream(s, width-1, 4);
1413 for(y=1; y<s->height; y++){
1414 uint8_t *dst = data + y*stride;
1415 if(s->predictor == PLANE && s->interlaced < y){
1416 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1417 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
1419 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
1421 encode_bgra_bitstream(s, width, 4);
1423 }else if(avctx->pix_fmt == PIX_FMT_RGB24){
1424 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1425 const int stride = -p->linesize[0];
1426 const int fake_stride = -fake_ystride;
1428 int leftr, leftg, leftb;
1430 put_bits(&s->pb, 8, leftr= data[0]);
1431 put_bits(&s->pb, 8, leftg= data[1]);
1432 put_bits(&s->pb, 8, leftb= data[2]);
1433 put_bits(&s->pb, 8, 0);
1435 sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
1436 encode_bgra_bitstream(s, width-1, 3);
1438 for(y=1; y<s->height; y++){
1439 uint8_t *dst = data + y*stride;
1440 if(s->predictor == PLANE && s->interlaced < y){
1441 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
1442 sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1444 sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1446 encode_bgra_bitstream(s, width, 3);
1449 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1453 size+= (put_bits_count(&s->pb)+31)/8;
1454 put_bits(&s->pb, 16, 0);
1455 put_bits(&s->pb, 15, 0);
1458 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1460 char *p= avctx->stats_out;
1461 char *end= p + 1024*30;
1463 for(j=0; j<256; j++){
1464 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1468 snprintf(p, end-p, "\n");
1472 avctx->stats_out[0] = '\0';
1473 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1474 flush_put_bits(&s->pb);
1475 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1478 s->picture_number++;
1483 static av_cold int encode_end(AVCodecContext *avctx)
1485 HYuvContext *s = avctx->priv_data;
1489 av_freep(&avctx->extradata);
1490 av_freep(&avctx->stats_out);
1494 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1496 #if CONFIG_HUFFYUV_DECODER
1497 AVCodec ff_huffyuv_decoder = {
1499 .type = AVMEDIA_TYPE_VIDEO,
1500 .id = CODEC_ID_HUFFYUV,
1501 .priv_data_size = sizeof(HYuvContext),
1502 .init = decode_init,
1503 .close = decode_end,
1504 .decode = decode_frame,
1505 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1506 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1507 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1511 #if CONFIG_FFVHUFF_DECODER
1512 AVCodec ff_ffvhuff_decoder = {
1514 .type = AVMEDIA_TYPE_VIDEO,
1515 .id = CODEC_ID_FFVHUFF,
1516 .priv_data_size = sizeof(HYuvContext),
1517 .init = decode_init,
1518 .close = decode_end,
1519 .decode = decode_frame,
1520 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1521 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1522 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1526 #if CONFIG_HUFFYUV_ENCODER
1527 AVCodec ff_huffyuv_encoder = {
1529 .type = AVMEDIA_TYPE_VIDEO,
1530 .id = CODEC_ID_HUFFYUV,
1531 .priv_data_size = sizeof(HYuvContext),
1532 .init = encode_init,
1533 .encode = encode_frame,
1534 .close = encode_end,
1535 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
1536 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1540 #if CONFIG_FFVHUFF_ENCODER
1541 AVCodec ff_ffvhuff_encoder = {
1543 .type = AVMEDIA_TYPE_VIDEO,
1544 .id = CODEC_ID_FFVHUFF,
1545 .priv_data_size = sizeof(HYuvContext),
1546 .init = encode_init,
1547 .encode = encode_frame,
1548 .close = encode_end,
1549 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
1550 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),