2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
52 typedef enum Predictor{
58 typedef struct HYuvContext{
59 AVCodecContext *avctx;
67 int yuy2; //use yuy2 instead of 422P
68 int bgr32; //use bgr32 instead of bgr24
75 uint64_t stats[3][256];
77 uint32_t bits[3][256];
78 uint32_t pix_bgr_map[1<<VLC_BITS];
79 VLC vlc[6]; //Y,U,V,YY,YU,YV
81 uint8_t *bitstream_buffer;
82 unsigned int bitstream_buffer_size;
86 #define classic_shift_luma_table_size 42
87 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
88 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
89 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
94 #define classic_shift_chroma_table_size 59
95 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
96 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
97 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
98 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0,
102 static const unsigned char classic_add_luma[256] = {
103 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
104 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
105 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
106 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
107 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
108 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
109 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
110 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
111 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
112 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
113 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
114 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
115 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
116 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
117 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
118 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
121 static const unsigned char classic_add_chroma[256] = {
122 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
123 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
124 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
125 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
126 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
127 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
128 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
129 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
130 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
131 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
132 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
133 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
134 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
135 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
136 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
137 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
140 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int left){
144 const int temp= src[i];
151 const int temp= src[i];
155 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
160 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){
167 for(i=0; i<FFMIN(w,4); i++){
168 const int rt= src[i*4+R];
169 const int gt= src[i*4+G];
170 const int bt= src[i*4+B];
171 const int at= src[i*4+A];
181 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
182 *red= src[(w-1)*4+R];
183 *green= src[(w-1)*4+G];
184 *blue= src[(w-1)*4+B];
185 *alpha= src[(w-1)*4+A];
188 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue){
194 for(i=0; i<FFMIN(w,16); i++){
195 const int rt= src[i*3+0];
196 const int gt= src[i*3+1];
197 const int bt= src[i*3+2];
205 s->dsp.diff_bytes(dst+48, src+48, src+48-3, w*3-48);
206 *red= src[(w-1)*3+0];
207 *green= src[(w-1)*3+1];
208 *blue= src[(w-1)*3+2];
211 static int read_len_table(uint8_t *dst, GetBitContext *gb){
215 repeat= get_bits(gb, 3);
216 val = get_bits(gb, 5);
218 repeat= get_bits(gb, 8);
219 //printf("%d %d\n", val, repeat);
220 if(i+repeat > 256 || get_bits_left(gb) < 0) {
221 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
230 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
234 for(len=32; len>0; len--){
235 for(index=0; index<256; index++){
236 if(len_table[index]==len)
240 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
248 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
254 static void heap_sift(HeapElem *h, int root, int size)
256 while(root*2+1 < size) {
257 int child = root*2+1;
258 if(child < size-1 && h[child].val > h[child+1].val)
260 if(h[root].val > h[child].val) {
261 FFSWAP(HeapElem, h[root], h[child]);
268 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
275 for(offset=1; ; offset<<=1){
276 for(i=0; i<size; i++){
278 h[i].val = (stats[i] << 8) + offset;
280 for(i=size/2-1; i>=0; i--)
281 heap_sift(h, i, size);
283 for(next=size; next<size*2-1; next++){
284 // merge the two smallest entries, and put it back in the heap
285 uint64_t min1v = h[0].val;
286 up[h[0].name] = next;
287 h[0].val = INT64_MAX;
288 heap_sift(h, 0, size);
289 up[h[0].name] = next;
292 heap_sift(h, 0, size);
296 for(i=2*size-3; i>=size; i--)
297 len[i] = len[up[i]] + 1;
298 for(i=0; i<size; i++) {
299 dst[i] = len[up[i]] + 1;
300 if(dst[i] >= 32) break;
305 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
307 static void generate_joint_tables(HYuvContext *s){
308 uint16_t symbols[1<<VLC_BITS];
309 uint16_t bits[1<<VLC_BITS];
310 uint8_t len[1<<VLC_BITS];
311 if(s->bitstream_bpp < 24){
314 for(i=y=0; y<256; y++){
315 int len0 = s->len[0][y];
316 int limit = VLC_BITS - len0;
319 for(u=0; u<256; u++){
320 int len1 = s->len[p][u];
323 len[i] = len0 + len1;
324 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
325 symbols[i] = (y<<8) + u;
326 if(symbols[i] != 0xffff) // reserved to mean "invalid"
330 ff_free_vlc(&s->vlc[3+p]);
331 ff_init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
334 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
335 int i, b, g, r, code;
336 int p0 = s->decorrelate;
337 int p1 = !s->decorrelate;
338 // restrict the range to +/-16 becaues that's pretty much guaranteed to
339 // cover all the combinations that fit in 11 bits total, and it doesn't
340 // matter if we miss a few rare codes.
341 for(i=0, g=-16; g<16; g++){
342 int len0 = s->len[p0][g&255];
343 int limit0 = VLC_BITS - len0;
346 for(b=-16; b<16; b++){
347 int len1 = s->len[p1][b&255];
348 int limit1 = limit0 - len1;
351 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
352 for(r=-16; r<16; r++){
353 int len2 = s->len[2][r&255];
356 len[i] = len0 + len1 + len2;
357 bits[i] = (code << len2) + s->bits[2][r&255];
371 ff_free_vlc(&s->vlc[3]);
372 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
376 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
380 init_get_bits(&gb, src, length*8);
383 if(read_len_table(s->len[i], &gb)<0)
385 if(generate_bits_table(s->bits[i], s->len[i])<0){
388 ff_free_vlc(&s->vlc[i]);
389 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
392 generate_joint_tables(s);
394 return (get_bits_count(&gb)+7)/8;
397 static int read_old_huffman_tables(HYuvContext *s){
401 init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8);
402 if(read_len_table(s->len[0], &gb)<0)
404 init_get_bits(&gb, classic_shift_chroma, classic_shift_chroma_table_size*8);
405 if(read_len_table(s->len[1], &gb)<0)
408 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
409 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
411 if(s->bitstream_bpp >= 24){
412 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
413 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
415 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
416 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
419 ff_free_vlc(&s->vlc[i]);
420 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
423 generate_joint_tables(s);
428 static av_cold void alloc_temp(HYuvContext *s){
431 if(s->bitstream_bpp<24){
433 s->temp[i]= av_malloc(s->width + 16);
436 s->temp[0]= av_mallocz(4*s->width + 16);
440 static av_cold int common_init(AVCodecContext *avctx){
441 HYuvContext *s = avctx->priv_data;
444 s->flags= avctx->flags;
446 ff_dsputil_init(&s->dsp, avctx);
448 s->width= avctx->width;
449 s->height= avctx->height;
450 assert(s->width>0 && s->height>0);
455 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
456 static av_cold int decode_init(AVCodecContext *avctx)
458 HYuvContext *s = avctx->priv_data;
461 memset(s->vlc, 0, 3*sizeof(VLC));
463 avctx->coded_frame= &s->picture;
464 avcodec_get_frame_defaults(&s->picture);
465 s->interlaced= s->height > 288;
468 //if(avctx->extradata)
469 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
470 if(avctx->extradata_size){
471 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
472 s->version=1; // do such files exist at all?
479 int method, interlace;
481 if (avctx->extradata_size < 4)
484 method= ((uint8_t*)avctx->extradata)[0];
485 s->decorrelate= method&64 ? 1 : 0;
486 s->predictor= method&63;
487 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
488 if(s->bitstream_bpp==0)
489 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
490 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
491 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
492 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
494 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
497 switch(avctx->bits_per_coded_sample&7){
508 s->decorrelate= avctx->bits_per_coded_sample >= 24;
511 s->predictor= MEDIAN;
515 s->predictor= LEFT; //OLD
519 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
522 if(read_old_huffman_tables(s) < 0)
526 switch(s->bitstream_bpp){
528 avctx->pix_fmt = PIX_FMT_YUV420P;
532 avctx->pix_fmt = PIX_FMT_YUYV422;
534 avctx->pix_fmt = PIX_FMT_YUV422P;
540 avctx->pix_fmt = PIX_FMT_RGB32;
542 avctx->pix_fmt = PIX_FMT_BGR24;
546 return AVERROR_INVALIDDATA;
549 if ((avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P) && avctx->width & 1) {
550 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
551 return AVERROR_INVALIDDATA;
556 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
561 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
563 HYuvContext *s = avctx->priv_data;
566 avctx->coded_frame= &s->picture;
569 for (i = 0; i < 6; i++)
570 s->vlc[i].table = NULL;
573 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
576 if(read_old_huffman_tables(s) < 0)
582 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
584 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
585 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
593 for(; i<256 && len[i]==val && repeat<255; i++)
596 assert(val < 32 && val >0 && repeat<256 && repeat>0);
599 buf[index++]= repeat;
601 buf[index++]= val | (repeat<<5);
608 static av_cold int encode_init(AVCodecContext *avctx)
610 HYuvContext *s = avctx->priv_data;
615 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
616 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
619 avctx->coded_frame= &s->picture;
621 switch(avctx->pix_fmt){
622 case PIX_FMT_YUV420P:
623 case PIX_FMT_YUV422P:
625 av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
626 return AVERROR(EINVAL);
628 s->bitstream_bpp = avctx->pix_fmt == PIX_FMT_YUV420P ? 12 : 16;
631 s->bitstream_bpp= 32;
634 s->bitstream_bpp= 24;
637 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
640 avctx->bits_per_coded_sample= s->bitstream_bpp;
641 s->decorrelate= s->bitstream_bpp >= 24;
642 s->predictor= avctx->prediction_method;
643 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
644 if(avctx->context_model==1){
645 s->context= avctx->context_model;
646 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
647 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
652 if(avctx->codec->id==CODEC_ID_HUFFYUV){
653 if(avctx->pix_fmt==PIX_FMT_YUV420P){
654 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
657 if(avctx->context_model){
658 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
661 if(s->interlaced != ( s->height > 288 ))
662 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
665 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
666 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
670 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
671 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
672 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
674 ((uint8_t*)avctx->extradata)[2]|= 0x40;
675 ((uint8_t*)avctx->extradata)[3]= 0;
676 s->avctx->extradata_size= 4;
679 char *p= avctx->stats_in;
689 for(j=0; j<256; j++){
690 s->stats[i][j]+= strtol(p, &next, 0);
691 if(next==p) return -1;
695 if(p[0]==0 || p[1]==0 || p[2]==0) break;
699 for(j=0; j<256; j++){
700 int d= FFMIN(j, 256-j);
702 s->stats[i][j]= 100000000/(d+1);
707 generate_len_table(s->len[i], s->stats[i]);
709 if(generate_bits_table(s->bits[i], s->len[i])<0){
713 s->avctx->extradata_size+=
714 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
719 int pels = s->width*s->height / (i?40:10);
720 for(j=0; j<256; j++){
721 int d= FFMIN(j, 256-j);
722 s->stats[i][j]= pels/(d+1);
731 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
739 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
741 /* TODO instead of restarting the read when the code isn't in the first level
742 * of the joint table, jump into the 2nd level of the individual table. */
743 #define READ_2PIX(dst0, dst1, plane1){\
744 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
749 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
750 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
754 static void decode_422_bitstream(HYuvContext *s, int count){
759 if(count >= (get_bits_left(&s->gb))/(31*4)){
760 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
761 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
762 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
765 for(i=0; i<count; i++){
766 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
767 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
772 static void decode_gray_bitstream(HYuvContext *s, int count){
777 if(count >= (get_bits_left(&s->gb))/(31*2)){
778 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
779 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
782 for(i=0; i<count; i++){
783 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
788 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
789 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
791 const uint8_t *y = s->temp[0] + offset;
792 const uint8_t *u = s->temp[1] + offset/2;
793 const uint8_t *v = s->temp[2] + offset/2;
795 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
796 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
807 if(s->flags&CODEC_FLAG_PASS1){
808 for(i=0; i<count; i++){
816 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
819 for(i=0; i<count; i++){
822 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
824 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
826 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
828 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
831 for(i=0; i<count; i++){
833 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
834 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
835 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
836 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
842 static int encode_gray_bitstream(HYuvContext *s, int count){
845 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
846 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
851 int y0 = s->temp[0][2*i];\
852 int y1 = s->temp[0][2*i+1];
857 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
858 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
861 if(s->flags&CODEC_FLAG_PASS1){
862 for(i=0; i<count; i++){
867 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
871 for(i=0; i<count; i++){
877 for(i=0; i<count; i++){
884 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
886 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
888 for(i=0; i<count; i++){
889 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
891 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
892 }else if(decorrelate){
893 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
894 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
895 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
897 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
898 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
899 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
902 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
906 static void decode_bgr_bitstream(HYuvContext *s, int count){
908 if(s->bitstream_bpp==24)
909 decode_bgr_1(s, count, 1, 0);
911 decode_bgr_1(s, count, 1, 1);
913 if(s->bitstream_bpp==24)
914 decode_bgr_1(s, count, 0, 0);
916 decode_bgr_1(s, count, 0, 1);
920 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes){
923 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count){
924 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
929 int g= s->temp[0][planes==3 ? 3*i+1 : 4*i+G];\
930 int b= (s->temp[0][planes==3 ? 3*i+2 : 4*i+B] - g) & 0xff;\
931 int r= (s->temp[0][planes==3 ? 3*i+0 : 4*i+R] - g) & 0xff;\
932 int a= s->temp[0][planes*i+A];
937 if(planes==4) s->stats[2][a]++;
939 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
940 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
941 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
942 if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
944 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
945 for(i=0; i<count; i++){
949 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
950 for(i=0; i<count; i++){
956 for(i=0; i<count; i++){
964 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
965 static void draw_slice(HYuvContext *s, int y){
967 int offset[AV_NUM_DATA_POINTERS];
969 if(s->avctx->draw_horiz_band==NULL)
972 h= y - s->last_slice_end;
975 if(s->bitstream_bpp==12){
981 offset[0] = s->picture.linesize[0]*y;
982 offset[1] = s->picture.linesize[1]*cy;
983 offset[2] = s->picture.linesize[2]*cy;
984 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
988 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
990 s->last_slice_end= y + h;
993 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
994 const uint8_t *buf = avpkt->data;
995 int buf_size = avpkt->size;
996 HYuvContext *s = avctx->priv_data;
997 const int width= s->width;
998 const int width2= s->width>>1;
999 const int height= s->height;
1000 int fake_ystride, fake_ustride, fake_vstride;
1001 AVFrame * const p= &s->picture;
1004 AVFrame *picture = data;
1006 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
1007 if (!s->bitstream_buffer)
1008 return AVERROR(ENOMEM);
1010 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1011 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
1014 ff_thread_release_buffer(avctx, p);
1017 if(ff_thread_get_buffer(avctx, p) < 0){
1018 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1023 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1028 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
1031 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
1033 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
1034 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
1035 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
1037 s->last_slice_end= 0;
1039 if(s->bitstream_bpp<24){
1041 int lefty, leftu, leftv;
1042 int lefttopy, lefttopu, lefttopv;
1045 p->data[0][3]= get_bits(&s->gb, 8);
1046 p->data[0][2]= get_bits(&s->gb, 8);
1047 p->data[0][1]= get_bits(&s->gb, 8);
1048 p->data[0][0]= get_bits(&s->gb, 8);
1050 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1054 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1055 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1056 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1057 p->data[0][0]= get_bits(&s->gb, 8);
1059 switch(s->predictor){
1062 decode_422_bitstream(s, width-2);
1063 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1064 if(!(s->flags&CODEC_FLAG_GRAY)){
1065 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1066 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1069 for(cy=y=1; y<s->height; y++,cy++){
1070 uint8_t *ydst, *udst, *vdst;
1072 if(s->bitstream_bpp==12){
1073 decode_gray_bitstream(s, width);
1075 ydst= p->data[0] + p->linesize[0]*y;
1077 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1078 if(s->predictor == PLANE){
1080 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1083 if(y>=s->height) break;
1088 ydst= p->data[0] + p->linesize[0]*y;
1089 udst= p->data[1] + p->linesize[1]*cy;
1090 vdst= p->data[2] + p->linesize[2]*cy;
1092 decode_422_bitstream(s, width);
1093 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1094 if(!(s->flags&CODEC_FLAG_GRAY)){
1095 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1096 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1098 if(s->predictor == PLANE){
1099 if(cy>s->interlaced){
1100 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1101 if(!(s->flags&CODEC_FLAG_GRAY)){
1102 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1103 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1108 draw_slice(s, height);
1112 /* first line except first 2 pixels is left predicted */
1113 decode_422_bitstream(s, width-2);
1114 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1115 if(!(s->flags&CODEC_FLAG_GRAY)){
1116 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1117 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1122 /* second line is left predicted for interlaced case */
1124 decode_422_bitstream(s, width);
1125 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1126 if(!(s->flags&CODEC_FLAG_GRAY)){
1127 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1128 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1133 /* next 4 pixels are left predicted too */
1134 decode_422_bitstream(s, 4);
1135 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1136 if(!(s->flags&CODEC_FLAG_GRAY)){
1137 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1138 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1141 /* next line except the first 4 pixels is median predicted */
1142 lefttopy= p->data[0][3];
1143 decode_422_bitstream(s, width-4);
1144 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1145 if(!(s->flags&CODEC_FLAG_GRAY)){
1146 lefttopu= p->data[1][1];
1147 lefttopv= p->data[2][1];
1148 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1149 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1153 for(; y<height; y++,cy++){
1154 uint8_t *ydst, *udst, *vdst;
1156 if(s->bitstream_bpp==12){
1158 decode_gray_bitstream(s, width);
1159 ydst= p->data[0] + p->linesize[0]*y;
1160 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1163 if(y>=height) break;
1167 decode_422_bitstream(s, width);
1169 ydst= p->data[0] + p->linesize[0]*y;
1170 udst= p->data[1] + p->linesize[1]*cy;
1171 vdst= p->data[2] + p->linesize[2]*cy;
1173 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1174 if(!(s->flags&CODEC_FLAG_GRAY)){
1175 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1176 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1180 draw_slice(s, height);
1186 int leftr, leftg, leftb, lefta;
1187 const int last_line= (height-1)*p->linesize[0];
1189 if(s->bitstream_bpp==32){
1190 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1191 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1192 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1193 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1195 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1196 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1197 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1198 lefta= p->data[0][last_line+A]= 255;
1199 skip_bits(&s->gb, 8);
1203 switch(s->predictor){
1206 decode_bgr_bitstream(s, width-1);
1207 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1209 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1210 decode_bgr_bitstream(s, width);
1212 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1213 if(s->predictor == PLANE){
1214 if(s->bitstream_bpp!=32) lefta=0;
1215 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1216 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1217 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1221 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1224 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1228 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1235 *data_size = sizeof(AVFrame);
1237 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1239 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1241 static int common_end(HYuvContext *s){
1245 av_freep(&s->temp[i]);
1250 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1251 static av_cold int decode_end(AVCodecContext *avctx)
1253 HYuvContext *s = avctx->priv_data;
1256 if (s->picture.data[0])
1257 avctx->release_buffer(avctx, &s->picture);
1260 av_freep(&s->bitstream_buffer);
1263 ff_free_vlc(&s->vlc[i]);
1268 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1270 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1271 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1272 const AVFrame *pict, int *got_packet)
1274 HYuvContext *s = avctx->priv_data;
1275 const int width= s->width;
1276 const int width2= s->width>>1;
1277 const int height= s->height;
1278 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1279 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1280 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1281 AVFrame * const p= &s->picture;
1282 int i, j, size = 0, ret;
1284 if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0)
1288 p->pict_type= AV_PICTURE_TYPE_I;
1293 generate_len_table(s->len[i], s->stats[i]);
1294 if(generate_bits_table(s->bits[i], s->len[i])<0)
1296 size += store_table(s, s->len[i], &pkt->data[size]);
1300 for(j=0; j<256; j++)
1301 s->stats[i][j] >>= 1;
1304 init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
1306 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1307 int lefty, leftu, leftv, y, cy;
1309 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1310 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1311 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1312 put_bits(&s->pb, 8, p->data[0][0]);
1314 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1315 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1316 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1318 encode_422_bitstream(s, 2, width-2);
1320 if(s->predictor==MEDIAN){
1321 int lefttopy, lefttopu, lefttopv;
1324 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1325 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1326 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1328 encode_422_bitstream(s, 0, width);
1332 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1333 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1334 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1336 encode_422_bitstream(s, 0, 4);
1338 lefttopy= p->data[0][3];
1339 lefttopu= p->data[1][1];
1340 lefttopv= p->data[2][1];
1341 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1342 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1343 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1344 encode_422_bitstream(s, 0, width-4);
1347 for(; y<height; y++,cy++){
1348 uint8_t *ydst, *udst, *vdst;
1350 if(s->bitstream_bpp==12){
1352 ydst= p->data[0] + p->linesize[0]*y;
1353 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1354 encode_gray_bitstream(s, width);
1357 if(y>=height) break;
1359 ydst= p->data[0] + p->linesize[0]*y;
1360 udst= p->data[1] + p->linesize[1]*cy;
1361 vdst= p->data[2] + p->linesize[2]*cy;
1363 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1364 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1365 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1367 encode_422_bitstream(s, 0, width);
1370 for(cy=y=1; y<height; y++,cy++){
1371 uint8_t *ydst, *udst, *vdst;
1373 /* encode a luma only line & y++ */
1374 if(s->bitstream_bpp==12){
1375 ydst= p->data[0] + p->linesize[0]*y;
1377 if(s->predictor == PLANE && s->interlaced < y){
1378 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1380 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1382 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1384 encode_gray_bitstream(s, width);
1386 if(y>=height) break;
1389 ydst= p->data[0] + p->linesize[0]*y;
1390 udst= p->data[1] + p->linesize[1]*cy;
1391 vdst= p->data[2] + p->linesize[2]*cy;
1393 if(s->predictor == PLANE && s->interlaced < cy){
1394 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1395 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1396 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1398 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1399 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1400 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1402 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1403 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1404 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1407 encode_422_bitstream(s, 0, width);
1410 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1411 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1412 const int stride = -p->linesize[0];
1413 const int fake_stride = -fake_ystride;
1415 int leftr, leftg, leftb, lefta;
1417 put_bits(&s->pb, 8, lefta= data[A]);
1418 put_bits(&s->pb, 8, leftr= data[R]);
1419 put_bits(&s->pb, 8, leftg= data[G]);
1420 put_bits(&s->pb, 8, leftb= data[B]);
1422 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb, &lefta);
1423 encode_bgra_bitstream(s, width-1, 4);
1425 for(y=1; y<s->height; y++){
1426 uint8_t *dst = data + y*stride;
1427 if(s->predictor == PLANE && s->interlaced < y){
1428 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1429 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
1431 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
1433 encode_bgra_bitstream(s, width, 4);
1435 }else if(avctx->pix_fmt == PIX_FMT_RGB24){
1436 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1437 const int stride = -p->linesize[0];
1438 const int fake_stride = -fake_ystride;
1440 int leftr, leftg, leftb;
1442 put_bits(&s->pb, 8, leftr= data[0]);
1443 put_bits(&s->pb, 8, leftg= data[1]);
1444 put_bits(&s->pb, 8, leftb= data[2]);
1445 put_bits(&s->pb, 8, 0);
1447 sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
1448 encode_bgra_bitstream(s, width-1, 3);
1450 for(y=1; y<s->height; y++){
1451 uint8_t *dst = data + y*stride;
1452 if(s->predictor == PLANE && s->interlaced < y){
1453 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
1454 sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1456 sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1458 encode_bgra_bitstream(s, width, 3);
1461 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1465 size+= (put_bits_count(&s->pb)+31)/8;
1466 put_bits(&s->pb, 16, 0);
1467 put_bits(&s->pb, 15, 0);
1470 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1472 char *p= avctx->stats_out;
1473 char *end= p + 1024*30;
1475 for(j=0; j<256; j++){
1476 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1480 snprintf(p, end-p, "\n");
1484 avctx->stats_out[0] = '\0';
1485 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1486 flush_put_bits(&s->pb);
1487 s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
1490 s->picture_number++;
1493 pkt->flags |= AV_PKT_FLAG_KEY;
1499 static av_cold int encode_end(AVCodecContext *avctx)
1501 HYuvContext *s = avctx->priv_data;
1505 av_freep(&avctx->extradata);
1506 av_freep(&avctx->stats_out);
1510 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1512 #if CONFIG_HUFFYUV_DECODER
1513 AVCodec ff_huffyuv_decoder = {
1515 .type = AVMEDIA_TYPE_VIDEO,
1516 .id = CODEC_ID_HUFFYUV,
1517 .priv_data_size = sizeof(HYuvContext),
1518 .init = decode_init,
1519 .close = decode_end,
1520 .decode = decode_frame,
1521 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1522 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1523 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1527 #if CONFIG_FFVHUFF_DECODER
1528 AVCodec ff_ffvhuff_decoder = {
1530 .type = AVMEDIA_TYPE_VIDEO,
1531 .id = CODEC_ID_FFVHUFF,
1532 .priv_data_size = sizeof(HYuvContext),
1533 .init = decode_init,
1534 .close = decode_end,
1535 .decode = decode_frame,
1536 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1537 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1538 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1542 #if CONFIG_HUFFYUV_ENCODER
1543 AVCodec ff_huffyuv_encoder = {
1545 .type = AVMEDIA_TYPE_VIDEO,
1546 .id = CODEC_ID_HUFFYUV,
1547 .priv_data_size = sizeof(HYuvContext),
1548 .init = encode_init,
1549 .encode2 = encode_frame,
1550 .close = encode_end,
1551 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
1552 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1556 #if CONFIG_FFVHUFF_ENCODER
1557 AVCodec ff_ffvhuff_encoder = {
1559 .type = AVMEDIA_TYPE_VIDEO,
1560 .id = CODEC_ID_FFVHUFF,
1561 .priv_data_size = sizeof(HYuvContext),
1562 .init = encode_init,
1563 .encode2 = encode_frame,
1564 .close = encode_end,
1565 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
1566 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),