2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
51 typedef enum Predictor{
57 typedef struct HYuvContext{
58 AVCodecContext *avctx;
66 int yuy2; //use yuy2 instead of 422P
67 int bgr32; //use bgr32 instead of bgr24
74 uint64_t stats[3][256];
76 uint32_t bits[3][256];
77 uint32_t pix_bgr_map[1<<VLC_BITS];
78 VLC vlc[6]; //Y,U,V,YY,YU,YV
80 uint8_t *bitstream_buffer;
81 unsigned int bitstream_buffer_size;
85 static const unsigned char classic_shift_luma[] = {
86 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
87 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
91 static const unsigned char classic_shift_chroma[] = {
92 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
93 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
94 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
97 static const unsigned char classic_add_luma[256] = {
98 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
99 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
100 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
101 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
102 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
103 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
104 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
105 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
106 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
107 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
108 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
109 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
110 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
111 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
112 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
113 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
116 static const unsigned char classic_add_chroma[256] = {
117 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
118 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
119 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
120 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
121 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
122 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
123 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
124 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
125 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
126 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
127 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
128 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
129 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
130 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
131 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
132 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
135 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
139 const int temp= src[i];
146 const int temp= src[i];
150 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
155 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){
162 for(i=0; i<FFMIN(w,4); i++){
163 const int rt= src[i*4+R];
164 const int gt= src[i*4+G];
165 const int bt= src[i*4+B];
166 const int at= src[i*4+A];
176 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
177 *red= src[(w-1)*4+R];
178 *green= src[(w-1)*4+G];
179 *blue= src[(w-1)*4+B];
180 *alpha= src[(w-1)*4+A];
183 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
189 for(i=0; i<FFMIN(w,16); i++){
190 const int rt= src[i*3+0];
191 const int gt= src[i*3+1];
192 const int bt= src[i*3+2];
200 s->dsp.diff_bytes(dst+48, src+48, src+48-3, w*3-48);
201 *red= src[(w-1)*3+0];
202 *green= src[(w-1)*3+1];
203 *blue= src[(w-1)*3+2];
206 static int read_len_table(uint8_t *dst, GetBitContext *gb){
210 repeat= get_bits(gb, 3);
211 val = get_bits(gb, 5);
213 repeat= get_bits(gb, 8);
214 //printf("%d %d\n", val, repeat);
216 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
225 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
229 for(len=32; len>0; len--){
230 for(index=0; index<256; index++){
231 if(len_table[index]==len)
235 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
243 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
249 static void heap_sift(HeapElem *h, int root, int size)
251 while(root*2+1 < size) {
252 int child = root*2+1;
253 if(child < size-1 && h[child].val > h[child+1].val)
255 if(h[root].val > h[child].val) {
256 FFSWAP(HeapElem, h[root], h[child]);
263 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
270 for(offset=1; ; offset<<=1){
271 for(i=0; i<size; i++){
273 h[i].val = (stats[i] << 8) + offset;
275 for(i=size/2-1; i>=0; i--)
276 heap_sift(h, i, size);
278 for(next=size; next<size*2-1; next++){
279 // merge the two smallest entries, and put it back in the heap
280 uint64_t min1v = h[0].val;
281 up[h[0].name] = next;
282 h[0].val = INT64_MAX;
283 heap_sift(h, 0, size);
284 up[h[0].name] = next;
287 heap_sift(h, 0, size);
291 for(i=2*size-3; i>=size; i--)
292 len[i] = len[up[i]] + 1;
293 for(i=0; i<size; i++) {
294 dst[i] = len[up[i]] + 1;
295 if(dst[i] >= 32) break;
300 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
302 static void generate_joint_tables(HYuvContext *s){
303 uint16_t symbols[1<<VLC_BITS];
304 uint16_t bits[1<<VLC_BITS];
305 uint8_t len[1<<VLC_BITS];
306 if(s->bitstream_bpp < 24){
309 for(i=y=0; y<256; y++){
310 int len0 = s->len[0][y];
311 int limit = VLC_BITS - len0;
314 for(u=0; u<256; u++){
315 int len1 = s->len[p][u];
318 len[i] = len0 + len1;
319 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
320 symbols[i] = (y<<8) + u;
321 if(symbols[i] != 0xffff) // reserved to mean "invalid"
325 free_vlc(&s->vlc[3+p]);
326 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
329 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
330 int i, b, g, r, code;
331 int p0 = s->decorrelate;
332 int p1 = !s->decorrelate;
333 // restrict the range to +/-16 becaues that's pretty much guaranteed to
334 // cover all the combinations that fit in 11 bits total, and it doesn't
335 // matter if we miss a few rare codes.
336 for(i=0, g=-16; g<16; g++){
337 int len0 = s->len[p0][g&255];
338 int limit0 = VLC_BITS - len0;
341 for(b=-16; b<16; b++){
342 int len1 = s->len[p1][b&255];
343 int limit1 = limit0 - len1;
346 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
347 for(r=-16; r<16; r++){
348 int len2 = s->len[2][r&255];
351 len[i] = len0 + len1 + len2;
352 bits[i] = (code << len2) + s->bits[2][r&255];
366 free_vlc(&s->vlc[3]);
367 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
371 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
375 init_get_bits(&gb, src, length*8);
378 if(read_len_table(s->len[i], &gb)<0)
380 if(generate_bits_table(s->bits[i], s->len[i])<0){
383 free_vlc(&s->vlc[i]);
384 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
387 generate_joint_tables(s);
389 return (get_bits_count(&gb)+7)/8;
392 static int read_old_huffman_tables(HYuvContext *s){
397 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
398 if(read_len_table(s->len[0], &gb)<0)
400 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
401 if(read_len_table(s->len[1], &gb)<0)
404 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
405 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
407 if(s->bitstream_bpp >= 24){
408 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
409 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
411 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
412 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
415 free_vlc(&s->vlc[i]);
416 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
419 generate_joint_tables(s);
423 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
428 static av_cold void alloc_temp(HYuvContext *s){
431 if(s->bitstream_bpp<24){
433 s->temp[i]= av_malloc(s->width + 16);
436 s->temp[0]= av_mallocz(4*s->width + 16);
440 static av_cold int common_init(AVCodecContext *avctx){
441 HYuvContext *s = avctx->priv_data;
444 s->flags= avctx->flags;
446 dsputil_init(&s->dsp, avctx);
448 s->width= avctx->width;
449 s->height= avctx->height;
450 assert(s->width>0 && s->height>0);
455 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
456 static av_cold int decode_init(AVCodecContext *avctx)
458 HYuvContext *s = avctx->priv_data;
461 memset(s->vlc, 0, 3*sizeof(VLC));
463 avctx->coded_frame= &s->picture;
464 avcodec_get_frame_defaults(&s->picture);
465 s->interlaced= s->height > 288;
468 //if(avctx->extradata)
469 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
470 if(avctx->extradata_size){
471 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
472 s->version=1; // do such files exist at all?
479 int method, interlace;
481 if (avctx->extradata_size < 4)
484 method= ((uint8_t*)avctx->extradata)[0];
485 s->decorrelate= method&64 ? 1 : 0;
486 s->predictor= method&63;
487 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
488 if(s->bitstream_bpp==0)
489 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
490 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
491 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
492 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
494 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
497 switch(avctx->bits_per_coded_sample&7){
508 s->decorrelate= avctx->bits_per_coded_sample >= 24;
511 s->predictor= MEDIAN;
515 s->predictor= LEFT; //OLD
519 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
522 if(read_old_huffman_tables(s) < 0)
526 switch(s->bitstream_bpp){
528 avctx->pix_fmt = PIX_FMT_YUV420P;
532 avctx->pix_fmt = PIX_FMT_YUYV422;
534 avctx->pix_fmt = PIX_FMT_YUV422P;
540 avctx->pix_fmt = PIX_FMT_RGB32;
542 avctx->pix_fmt = PIX_FMT_BGR24;
551 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
556 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
558 HYuvContext *s = avctx->priv_data;
561 avctx->coded_frame= &s->picture;
564 for (i = 0; i < 6; i++)
565 s->vlc[i].table = NULL;
568 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
571 if(read_old_huffman_tables(s) < 0)
577 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
579 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
580 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
588 for(; i<256 && len[i]==val && repeat<255; i++)
591 assert(val < 32 && val >0 && repeat<256 && repeat>0);
594 buf[index++]= repeat;
596 buf[index++]= val | (repeat<<5);
603 static av_cold int encode_init(AVCodecContext *avctx)
605 HYuvContext *s = avctx->priv_data;
610 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
611 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
614 avctx->coded_frame= &s->picture;
616 switch(avctx->pix_fmt){
617 case PIX_FMT_YUV420P:
618 s->bitstream_bpp= 12;
620 case PIX_FMT_YUV422P:
621 s->bitstream_bpp= 16;
624 s->bitstream_bpp= 32;
627 s->bitstream_bpp= 24;
630 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
633 avctx->bits_per_coded_sample= s->bitstream_bpp;
634 s->decorrelate= s->bitstream_bpp >= 24;
635 s->predictor= avctx->prediction_method;
636 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
637 if(avctx->context_model==1){
638 s->context= avctx->context_model;
639 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
640 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
645 if(avctx->codec->id==CODEC_ID_HUFFYUV){
646 if(avctx->pix_fmt==PIX_FMT_YUV420P){
647 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
650 if(avctx->context_model){
651 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
654 if(s->interlaced != ( s->height > 288 ))
655 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
658 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
659 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
663 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
664 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
665 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
667 ((uint8_t*)avctx->extradata)[2]|= 0x40;
668 ((uint8_t*)avctx->extradata)[3]= 0;
669 s->avctx->extradata_size= 4;
672 char *p= avctx->stats_in;
682 for(j=0; j<256; j++){
683 s->stats[i][j]+= strtol(p, &next, 0);
684 if(next==p) return -1;
688 if(p[0]==0 || p[1]==0 || p[2]==0) break;
692 for(j=0; j<256; j++){
693 int d= FFMIN(j, 256-j);
695 s->stats[i][j]= 100000000/(d+1);
700 generate_len_table(s->len[i], s->stats[i]);
702 if(generate_bits_table(s->bits[i], s->len[i])<0){
706 s->avctx->extradata_size+=
707 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
712 int pels = s->width*s->height / (i?40:10);
713 for(j=0; j<256; j++){
714 int d= FFMIN(j, 256-j);
715 s->stats[i][j]= pels/(d+1);
724 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
732 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
734 /* TODO instead of restarting the read when the code isn't in the first level
735 * of the joint table, jump into the 2nd level of the individual table. */
736 #define READ_2PIX(dst0, dst1, plane1){\
737 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
742 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
743 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
747 static void decode_422_bitstream(HYuvContext *s, int count){
752 if(count >= (get_bits_left(&s->gb))/(31*4)){
753 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
754 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
755 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
758 for(i=0; i<count; i++){
759 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
760 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
765 static void decode_gray_bitstream(HYuvContext *s, int count){
770 if(count >= (get_bits_left(&s->gb))/(31*2)){
771 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
772 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
775 for(i=0; i<count; i++){
776 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
781 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
782 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
784 const uint8_t *y = s->temp[0] + offset;
785 const uint8_t *u = s->temp[1] + offset/2;
786 const uint8_t *v = s->temp[2] + offset/2;
788 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
789 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
800 if(s->flags&CODEC_FLAG_PASS1){
801 for(i=0; i<count; i++){
809 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
812 for(i=0; i<count; i++){
815 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
817 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
819 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
821 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
824 for(i=0; i<count; i++){
826 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
827 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
828 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
829 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
835 static int encode_gray_bitstream(HYuvContext *s, int count){
838 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
839 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
844 int y0 = s->temp[0][2*i];\
845 int y1 = s->temp[0][2*i+1];
850 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
851 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
854 if(s->flags&CODEC_FLAG_PASS1){
855 for(i=0; i<count; i++){
860 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
864 for(i=0; i<count; i++){
870 for(i=0; i<count; i++){
877 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
879 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
881 for(i=0; i<count; i++){
882 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
884 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
885 }else if(decorrelate){
886 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
887 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
888 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
890 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
891 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
892 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
895 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
899 static void decode_bgr_bitstream(HYuvContext *s, int count){
901 if(s->bitstream_bpp==24)
902 decode_bgr_1(s, count, 1, 0);
904 decode_bgr_1(s, count, 1, 1);
906 if(s->bitstream_bpp==24)
907 decode_bgr_1(s, count, 0, 0);
909 decode_bgr_1(s, count, 0, 1);
913 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes){
916 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count){
917 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
922 int g= s->temp[0][planes==3 ? 3*i+1 : 4*i+G];\
923 int b= (s->temp[0][planes==3 ? 3*i+2 : 4*i+B] - g) & 0xff;\
924 int r= (s->temp[0][planes==3 ? 3*i+0 : 4*i+R] - g) & 0xff;\
925 int a= s->temp[0][planes*i+A];
930 if(planes==4) s->stats[2][a]++;
932 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
933 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
934 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
935 if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
937 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
938 for(i=0; i<count; i++){
942 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
943 for(i=0; i<count; i++){
949 for(i=0; i<count; i++){
957 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
958 static void draw_slice(HYuvContext *s, int y){
960 int offset[AV_NUM_DATA_POINTERS];
962 if(s->avctx->draw_horiz_band==NULL)
965 h= y - s->last_slice_end;
968 if(s->bitstream_bpp==12){
974 offset[0] = s->picture.linesize[0]*y;
975 offset[1] = s->picture.linesize[1]*cy;
976 offset[2] = s->picture.linesize[2]*cy;
977 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
981 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
983 s->last_slice_end= y + h;
986 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
987 const uint8_t *buf = avpkt->data;
988 int buf_size = avpkt->size;
989 HYuvContext *s = avctx->priv_data;
990 const int width= s->width;
991 const int width2= s->width>>1;
992 const int height= s->height;
993 int fake_ystride, fake_ustride, fake_vstride;
994 AVFrame * const p= &s->picture;
997 AVFrame *picture = data;
999 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
1000 if (!s->bitstream_buffer)
1001 return AVERROR(ENOMEM);
1003 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1004 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
1007 ff_thread_release_buffer(avctx, p);
1010 if(ff_thread_get_buffer(avctx, p) < 0){
1011 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1016 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1021 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
1024 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
1026 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
1027 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
1028 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
1030 s->last_slice_end= 0;
1032 if(s->bitstream_bpp<24){
1034 int lefty, leftu, leftv;
1035 int lefttopy, lefttopu, lefttopv;
1038 p->data[0][3]= get_bits(&s->gb, 8);
1039 p->data[0][2]= get_bits(&s->gb, 8);
1040 p->data[0][1]= get_bits(&s->gb, 8);
1041 p->data[0][0]= get_bits(&s->gb, 8);
1043 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1047 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1048 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1049 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1050 p->data[0][0]= get_bits(&s->gb, 8);
1052 switch(s->predictor){
1055 decode_422_bitstream(s, width-2);
1056 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1057 if(!(s->flags&CODEC_FLAG_GRAY)){
1058 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1059 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1062 for(cy=y=1; y<s->height; y++,cy++){
1063 uint8_t *ydst, *udst, *vdst;
1065 if(s->bitstream_bpp==12){
1066 decode_gray_bitstream(s, width);
1068 ydst= p->data[0] + p->linesize[0]*y;
1070 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1071 if(s->predictor == PLANE){
1073 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1076 if(y>=s->height) break;
1081 ydst= p->data[0] + p->linesize[0]*y;
1082 udst= p->data[1] + p->linesize[1]*cy;
1083 vdst= p->data[2] + p->linesize[2]*cy;
1085 decode_422_bitstream(s, width);
1086 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1087 if(!(s->flags&CODEC_FLAG_GRAY)){
1088 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1089 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1091 if(s->predictor == PLANE){
1092 if(cy>s->interlaced){
1093 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1094 if(!(s->flags&CODEC_FLAG_GRAY)){
1095 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1096 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1101 draw_slice(s, height);
1105 /* first line except first 2 pixels is left predicted */
1106 decode_422_bitstream(s, width-2);
1107 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1108 if(!(s->flags&CODEC_FLAG_GRAY)){
1109 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1110 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1115 /* second line is left predicted for interlaced case */
1117 decode_422_bitstream(s, width);
1118 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1119 if(!(s->flags&CODEC_FLAG_GRAY)){
1120 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1121 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1126 /* next 4 pixels are left predicted too */
1127 decode_422_bitstream(s, 4);
1128 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1129 if(!(s->flags&CODEC_FLAG_GRAY)){
1130 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1131 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1134 /* next line except the first 4 pixels is median predicted */
1135 lefttopy= p->data[0][3];
1136 decode_422_bitstream(s, width-4);
1137 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1138 if(!(s->flags&CODEC_FLAG_GRAY)){
1139 lefttopu= p->data[1][1];
1140 lefttopv= p->data[2][1];
1141 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1142 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1146 for(; y<height; y++,cy++){
1147 uint8_t *ydst, *udst, *vdst;
1149 if(s->bitstream_bpp==12){
1151 decode_gray_bitstream(s, width);
1152 ydst= p->data[0] + p->linesize[0]*y;
1153 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1156 if(y>=height) break;
1160 decode_422_bitstream(s, width);
1162 ydst= p->data[0] + p->linesize[0]*y;
1163 udst= p->data[1] + p->linesize[1]*cy;
1164 vdst= p->data[2] + p->linesize[2]*cy;
1166 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1167 if(!(s->flags&CODEC_FLAG_GRAY)){
1168 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1169 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1173 draw_slice(s, height);
1179 int leftr, leftg, leftb, lefta;
1180 const int last_line= (height-1)*p->linesize[0];
1182 if(s->bitstream_bpp==32){
1183 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1184 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1185 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1186 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1188 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1189 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1190 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1191 lefta= p->data[0][last_line+A]= 255;
1192 skip_bits(&s->gb, 8);
1196 switch(s->predictor){
1199 decode_bgr_bitstream(s, width-1);
1200 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1202 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1203 decode_bgr_bitstream(s, width);
1205 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1206 if(s->predictor == PLANE){
1207 if(s->bitstream_bpp!=32) lefta=0;
1208 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1209 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1210 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1214 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1217 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1221 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1228 *data_size = sizeof(AVFrame);
1230 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1232 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1234 static int common_end(HYuvContext *s){
1238 av_freep(&s->temp[i]);
1243 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1244 static av_cold int decode_end(AVCodecContext *avctx)
1246 HYuvContext *s = avctx->priv_data;
1249 if (s->picture.data[0])
1250 avctx->release_buffer(avctx, &s->picture);
1253 av_freep(&s->bitstream_buffer);
1256 free_vlc(&s->vlc[i]);
1261 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1263 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1264 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1265 HYuvContext *s = avctx->priv_data;
1266 AVFrame *pict = data;
1267 const int width= s->width;
1268 const int width2= s->width>>1;
1269 const int height= s->height;
1270 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1271 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1272 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1273 AVFrame * const p= &s->picture;
1277 p->pict_type= AV_PICTURE_TYPE_I;
1282 generate_len_table(s->len[i], s->stats[i]);
1283 if(generate_bits_table(s->bits[i], s->len[i])<0)
1285 size+= store_table(s, s->len[i], &buf[size]);
1289 for(j=0; j<256; j++)
1290 s->stats[i][j] >>= 1;
1293 init_put_bits(&s->pb, buf+size, buf_size-size);
1295 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1296 int lefty, leftu, leftv, y, cy;
1298 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1299 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1300 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1301 put_bits(&s->pb, 8, p->data[0][0]);
1303 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1304 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1305 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1307 encode_422_bitstream(s, 2, width-2);
1309 if(s->predictor==MEDIAN){
1310 int lefttopy, lefttopu, lefttopv;
1313 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1314 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1315 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1317 encode_422_bitstream(s, 0, width);
1321 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1322 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1323 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1325 encode_422_bitstream(s, 0, 4);
1327 lefttopy= p->data[0][3];
1328 lefttopu= p->data[1][1];
1329 lefttopv= p->data[2][1];
1330 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1331 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1332 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1333 encode_422_bitstream(s, 0, width-4);
1336 for(; y<height; y++,cy++){
1337 uint8_t *ydst, *udst, *vdst;
1339 if(s->bitstream_bpp==12){
1341 ydst= p->data[0] + p->linesize[0]*y;
1342 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1343 encode_gray_bitstream(s, width);
1346 if(y>=height) break;
1348 ydst= p->data[0] + p->linesize[0]*y;
1349 udst= p->data[1] + p->linesize[1]*cy;
1350 vdst= p->data[2] + p->linesize[2]*cy;
1352 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1353 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1354 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1356 encode_422_bitstream(s, 0, width);
1359 for(cy=y=1; y<height; y++,cy++){
1360 uint8_t *ydst, *udst, *vdst;
1362 /* encode a luma only line & y++ */
1363 if(s->bitstream_bpp==12){
1364 ydst= p->data[0] + p->linesize[0]*y;
1366 if(s->predictor == PLANE && s->interlaced < y){
1367 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1369 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1371 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1373 encode_gray_bitstream(s, width);
1375 if(y>=height) break;
1378 ydst= p->data[0] + p->linesize[0]*y;
1379 udst= p->data[1] + p->linesize[1]*cy;
1380 vdst= p->data[2] + p->linesize[2]*cy;
1382 if(s->predictor == PLANE && s->interlaced < cy){
1383 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1384 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1385 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1387 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1388 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1389 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1391 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1392 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1393 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1396 encode_422_bitstream(s, 0, width);
1399 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1400 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1401 const int stride = -p->linesize[0];
1402 const int fake_stride = -fake_ystride;
1404 int leftr, leftg, leftb, lefta;
1406 put_bits(&s->pb, 8, lefta= data[A]);
1407 put_bits(&s->pb, 8, leftr= data[R]);
1408 put_bits(&s->pb, 8, leftg= data[G]);
1409 put_bits(&s->pb, 8, leftb= data[B]);
1411 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb, &lefta);
1412 encode_bgra_bitstream(s, width-1, 4);
1414 for(y=1; y<s->height; y++){
1415 uint8_t *dst = data + y*stride;
1416 if(s->predictor == PLANE && s->interlaced < y){
1417 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1418 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
1420 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
1422 encode_bgra_bitstream(s, width, 4);
1424 }else if(avctx->pix_fmt == PIX_FMT_RGB24){
1425 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1426 const int stride = -p->linesize[0];
1427 const int fake_stride = -fake_ystride;
1429 int leftr, leftg, leftb;
1431 put_bits(&s->pb, 8, leftr= data[0]);
1432 put_bits(&s->pb, 8, leftg= data[1]);
1433 put_bits(&s->pb, 8, leftb= data[2]);
1434 put_bits(&s->pb, 8, 0);
1436 sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
1437 encode_bgra_bitstream(s, width-1, 3);
1439 for(y=1; y<s->height; y++){
1440 uint8_t *dst = data + y*stride;
1441 if(s->predictor == PLANE && s->interlaced < y){
1442 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
1443 sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1445 sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1447 encode_bgra_bitstream(s, width, 3);
1450 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1454 size+= (put_bits_count(&s->pb)+31)/8;
1455 put_bits(&s->pb, 16, 0);
1456 put_bits(&s->pb, 15, 0);
1459 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1461 char *p= avctx->stats_out;
1462 char *end= p + 1024*30;
1464 for(j=0; j<256; j++){
1465 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1469 snprintf(p, end-p, "\n");
1473 avctx->stats_out[0] = '\0';
1474 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1475 flush_put_bits(&s->pb);
1476 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1479 s->picture_number++;
1484 static av_cold int encode_end(AVCodecContext *avctx)
1486 HYuvContext *s = avctx->priv_data;
1490 av_freep(&avctx->extradata);
1491 av_freep(&avctx->stats_out);
1495 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1497 #if CONFIG_HUFFYUV_DECODER
1498 AVCodec ff_huffyuv_decoder = {
1500 .type = AVMEDIA_TYPE_VIDEO,
1501 .id = CODEC_ID_HUFFYUV,
1502 .priv_data_size = sizeof(HYuvContext),
1503 .init = decode_init,
1504 .close = decode_end,
1505 .decode = decode_frame,
1506 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1507 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1508 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1512 #if CONFIG_FFVHUFF_DECODER
1513 AVCodec ff_ffvhuff_decoder = {
1515 .type = AVMEDIA_TYPE_VIDEO,
1516 .id = CODEC_ID_FFVHUFF,
1517 .priv_data_size = sizeof(HYuvContext),
1518 .init = decode_init,
1519 .close = decode_end,
1520 .decode = decode_frame,
1521 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1522 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1523 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1527 #if CONFIG_HUFFYUV_ENCODER
1528 AVCodec ff_huffyuv_encoder = {
1530 .type = AVMEDIA_TYPE_VIDEO,
1531 .id = CODEC_ID_HUFFYUV,
1532 .priv_data_size = sizeof(HYuvContext),
1533 .init = encode_init,
1534 .encode = encode_frame,
1535 .close = encode_end,
1536 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
1537 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1541 #if CONFIG_FFVHUFF_ENCODER
1542 AVCodec ff_ffvhuff_encoder = {
1544 .type = AVMEDIA_TYPE_VIDEO,
1545 .id = CODEC_ID_FFVHUFF,
1546 .priv_data_size = sizeof(HYuvContext),
1547 .init = encode_init,
1548 .encode = encode_frame,
1549 .close = encode_end,
1550 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
1551 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),