2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * @file libavcodec/huffyuv.c
28 * huffyuv codec for libavcodec.
50 typedef enum Predictor{
56 typedef struct HYuvContext{
57 AVCodecContext *avctx;
65 int yuy2; //use yuy2 instead of 422P
66 int bgr32; //use bgr32 instead of bgr24
73 uint64_t stats[3][256];
75 uint32_t bits[3][256];
76 uint32_t pix_bgr_map[1<<VLC_BITS];
77 VLC vlc[6]; //Y,U,V,YY,YU,YV
79 uint8_t *bitstream_buffer;
80 unsigned int bitstream_buffer_size;
84 static const unsigned char classic_shift_luma[] = {
85 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
86 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
90 static const unsigned char classic_shift_chroma[] = {
91 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
92 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
93 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
96 static const unsigned char classic_add_luma[256] = {
97 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
98 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
99 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
100 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
101 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
102 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
103 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
104 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
105 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
106 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
107 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
108 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
109 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
110 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
111 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
112 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
115 static const unsigned char classic_add_chroma[256] = {
116 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
117 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
118 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
119 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
120 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
121 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
122 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
123 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
124 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
125 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
126 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
127 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
128 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
129 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
130 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
131 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
134 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
138 const int temp= src[i];
145 const int temp= src[i];
149 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
154 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
160 for(i=0; i<FFMIN(w,4); i++){
161 const int rt= src[i*4+R];
162 const int gt= src[i*4+G];
163 const int bt= src[i*4+B];
171 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
172 *red= src[(w-1)*4+R];
173 *green= src[(w-1)*4+G];
174 *blue= src[(w-1)*4+B];
177 static int read_len_table(uint8_t *dst, GetBitContext *gb){
181 repeat= get_bits(gb, 3);
182 val = get_bits(gb, 5);
184 repeat= get_bits(gb, 8);
185 //printf("%d %d\n", val, repeat);
187 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
196 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
200 for(len=32; len>0; len--){
201 for(index=0; index<256; index++){
202 if(len_table[index]==len)
206 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
214 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
220 static void heap_sift(HeapElem *h, int root, int size)
222 while(root*2+1 < size) {
223 int child = root*2+1;
224 if(child < size-1 && h[child].val > h[child+1].val)
226 if(h[root].val > h[child].val) {
227 FFSWAP(HeapElem, h[root], h[child]);
234 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
240 for(offset=1; ; offset<<=1){
241 for(i=0; i<size; i++){
243 h[i].val = (stats[i] << 8) + offset;
245 for(i=size/2-1; i>=0; i--)
246 heap_sift(h, i, size);
248 for(next=size; next<size*2-1; next++){
249 // merge the two smallest entries, and put it back in the heap
250 uint64_t min1v = h[0].val;
251 up[h[0].name] = next;
252 h[0].val = INT64_MAX;
253 heap_sift(h, 0, size);
254 up[h[0].name] = next;
257 heap_sift(h, 0, size);
261 for(i=2*size-3; i>=size; i--)
262 len[i] = len[up[i]] + 1;
263 for(i=0; i<size; i++) {
264 dst[i] = len[up[i]] + 1;
265 if(dst[i] >= 32) break;
270 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
272 static void generate_joint_tables(HYuvContext *s){
273 uint16_t symbols[1<<VLC_BITS];
274 uint16_t bits[1<<VLC_BITS];
275 uint8_t len[1<<VLC_BITS];
276 if(s->bitstream_bpp < 24){
279 for(i=y=0; y<256; y++){
280 int len0 = s->len[0][y];
281 int limit = VLC_BITS - len0;
284 for(u=0; u<256; u++){
285 int len1 = s->len[p][u];
288 len[i] = len0 + len1;
289 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
290 symbols[i] = (y<<8) + u;
291 if(symbols[i] != 0xffff) // reserved to mean "invalid"
295 free_vlc(&s->vlc[3+p]);
296 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
299 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
300 int i, b, g, r, code;
301 int p0 = s->decorrelate;
302 int p1 = !s->decorrelate;
303 // restrict the range to +/-16 becaues that's pretty much guaranteed to
304 // cover all the combinations that fit in 11 bits total, and it doesn't
305 // matter if we miss a few rare codes.
306 for(i=0, g=-16; g<16; g++){
307 int len0 = s->len[p0][g&255];
308 int limit0 = VLC_BITS - len0;
311 for(b=-16; b<16; b++){
312 int len1 = s->len[p1][b&255];
313 int limit1 = limit0 - len1;
316 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
317 for(r=-16; r<16; r++){
318 int len2 = s->len[2][r&255];
321 len[i] = len0 + len1 + len2;
322 bits[i] = (code << len2) + s->bits[2][r&255];
336 free_vlc(&s->vlc[3]);
337 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
341 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
345 init_get_bits(&gb, src, length*8);
348 if(read_len_table(s->len[i], &gb)<0)
350 if(generate_bits_table(s->bits[i], s->len[i])<0){
354 for(j=0; j<256; j++){
355 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
358 free_vlc(&s->vlc[i]);
359 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
362 generate_joint_tables(s);
364 return (get_bits_count(&gb)+7)/8;
367 static int read_old_huffman_tables(HYuvContext *s){
372 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
373 if(read_len_table(s->len[0], &gb)<0)
375 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
376 if(read_len_table(s->len[1], &gb)<0)
379 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
380 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
382 if(s->bitstream_bpp >= 24){
383 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
384 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
386 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
387 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
390 free_vlc(&s->vlc[i]);
391 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
394 generate_joint_tables(s);
398 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
403 static av_cold void alloc_temp(HYuvContext *s){
406 if(s->bitstream_bpp<24){
408 s->temp[i]= av_malloc(s->width + 16);
411 s->temp[0]= av_mallocz(4*s->width + 16);
415 static av_cold int common_init(AVCodecContext *avctx){
416 HYuvContext *s = avctx->priv_data;
419 s->flags= avctx->flags;
421 dsputil_init(&s->dsp, avctx);
423 s->width= avctx->width;
424 s->height= avctx->height;
425 assert(s->width>0 && s->height>0);
430 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
431 static av_cold int decode_init(AVCodecContext *avctx)
433 HYuvContext *s = avctx->priv_data;
436 memset(s->vlc, 0, 3*sizeof(VLC));
438 avctx->coded_frame= &s->picture;
439 s->interlaced= s->height > 288;
442 //if(avctx->extradata)
443 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
444 if(avctx->extradata_size){
445 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
446 s->version=1; // do such files exist at all?
453 int method, interlace;
455 method= ((uint8_t*)avctx->extradata)[0];
456 s->decorrelate= method&64 ? 1 : 0;
457 s->predictor= method&63;
458 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
459 if(s->bitstream_bpp==0)
460 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
461 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
462 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
463 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
465 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
468 switch(avctx->bits_per_coded_sample&7){
479 s->decorrelate= avctx->bits_per_coded_sample >= 24;
482 s->predictor= MEDIAN;
486 s->predictor= LEFT; //OLD
490 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
493 if(read_old_huffman_tables(s) < 0)
497 switch(s->bitstream_bpp){
499 avctx->pix_fmt = PIX_FMT_YUV420P;
503 avctx->pix_fmt = PIX_FMT_YUYV422;
505 avctx->pix_fmt = PIX_FMT_YUV422P;
511 avctx->pix_fmt = PIX_FMT_RGB32;
513 avctx->pix_fmt = PIX_FMT_BGR24;
522 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
526 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
528 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
529 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
537 for(; i<256 && len[i]==val && repeat<255; i++)
540 assert(val < 32 && val >0 && repeat<256 && repeat>0);
543 buf[index++]= repeat;
545 buf[index++]= val | (repeat<<5);
552 static av_cold int encode_init(AVCodecContext *avctx)
554 HYuvContext *s = avctx->priv_data;
559 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
560 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
563 avctx->coded_frame= &s->picture;
565 switch(avctx->pix_fmt){
566 case PIX_FMT_YUV420P:
567 s->bitstream_bpp= 12;
569 case PIX_FMT_YUV422P:
570 s->bitstream_bpp= 16;
573 s->bitstream_bpp= 24;
576 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
579 avctx->bits_per_coded_sample= s->bitstream_bpp;
580 s->decorrelate= s->bitstream_bpp >= 24;
581 s->predictor= avctx->prediction_method;
582 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
583 if(avctx->context_model==1){
584 s->context= avctx->context_model;
585 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
586 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
591 if(avctx->codec->id==CODEC_ID_HUFFYUV){
592 if(avctx->pix_fmt==PIX_FMT_YUV420P){
593 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
596 if(avctx->context_model){
597 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
600 if(s->interlaced != ( s->height > 288 ))
601 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
604 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
605 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
609 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
610 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
611 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
613 ((uint8_t*)avctx->extradata)[2]|= 0x40;
614 ((uint8_t*)avctx->extradata)[3]= 0;
615 s->avctx->extradata_size= 4;
618 char *p= avctx->stats_in;
628 for(j=0; j<256; j++){
629 s->stats[i][j]+= strtol(p, &next, 0);
630 if(next==p) return -1;
634 if(p[0]==0 || p[1]==0 || p[2]==0) break;
638 for(j=0; j<256; j++){
639 int d= FFMIN(j, 256-j);
641 s->stats[i][j]= 100000000/(d+1);
646 generate_len_table(s->len[i], s->stats[i], 256);
648 if(generate_bits_table(s->bits[i], s->len[i])<0){
652 s->avctx->extradata_size+=
653 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
658 int pels = s->width*s->height / (i?40:10);
659 for(j=0; j<256; j++){
660 int d= FFMIN(j, 256-j);
661 s->stats[i][j]= pels/(d+1);
670 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
678 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
680 /* TODO instead of restarting the read when the code isn't in the first level
681 * of the joint table, jump into the 2nd level of the individual table. */
682 #define READ_2PIX(dst0, dst1, plane1){\
683 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
688 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
689 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
693 static void decode_422_bitstream(HYuvContext *s, int count){
698 if(count >= (get_bits_left(&s->gb))/(31*4)){
699 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
700 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
701 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
704 for(i=0; i<count; i++){
705 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
706 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
711 static void decode_gray_bitstream(HYuvContext *s, int count){
716 if(count >= (get_bits_left(&s->gb))/(31*2)){
717 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
718 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
721 for(i=0; i<count; i++){
722 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
727 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
728 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
730 const uint8_t *y = s->temp[0] + offset;
731 const uint8_t *u = s->temp[1] + offset/2;
732 const uint8_t *v = s->temp[2] + offset/2;
734 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
735 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
746 if(s->flags&CODEC_FLAG_PASS1){
747 for(i=0; i<count; i++){
755 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
758 for(i=0; i<count; i++){
761 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
763 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
765 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
767 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
770 for(i=0; i<count; i++){
772 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
773 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
774 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
775 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
781 static int encode_gray_bitstream(HYuvContext *s, int count){
784 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
785 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
790 int y0 = s->temp[0][2*i];\
791 int y1 = s->temp[0][2*i+1];
796 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
797 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
800 if(s->flags&CODEC_FLAG_PASS1){
801 for(i=0; i<count; i++){
806 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
810 for(i=0; i<count; i++){
816 for(i=0; i<count; i++){
823 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
825 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
827 for(i=0; i<count; i++){
828 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
830 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
831 }else if(decorrelate){
832 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
833 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
834 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
836 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
837 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
838 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
841 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
845 static void decode_bgr_bitstream(HYuvContext *s, int count){
847 if(s->bitstream_bpp==24)
848 decode_bgr_1(s, count, 1, 0);
850 decode_bgr_1(s, count, 1, 1);
852 if(s->bitstream_bpp==24)
853 decode_bgr_1(s, count, 0, 0);
855 decode_bgr_1(s, count, 0, 1);
859 static int encode_bgr_bitstream(HYuvContext *s, int count){
862 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
863 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
868 int g= s->temp[0][4*i+G];\
869 int b= (s->temp[0][4*i+B] - g) & 0xff;\
870 int r= (s->temp[0][4*i+R] - g) & 0xff;
876 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
877 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
878 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
880 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
881 for(i=0; i<count; i++){
885 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
886 for(i=0; i<count; i++){
892 for(i=0; i<count; i++){
900 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
901 static void draw_slice(HYuvContext *s, int y){
905 if(s->avctx->draw_horiz_band==NULL)
908 h= y - s->last_slice_end;
911 if(s->bitstream_bpp==12){
917 offset[0] = s->picture.linesize[0]*y;
918 offset[1] = s->picture.linesize[1]*cy;
919 offset[2] = s->picture.linesize[2]*cy;
923 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
925 s->last_slice_end= y + h;
928 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
929 const uint8_t *buf = avpkt->data;
930 int buf_size = avpkt->size;
931 HYuvContext *s = avctx->priv_data;
932 const int width= s->width;
933 const int width2= s->width>>1;
934 const int height= s->height;
935 int fake_ystride, fake_ustride, fake_vstride;
936 AVFrame * const p= &s->picture;
939 AVFrame *picture = data;
941 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
942 if (!s->bitstream_buffer)
943 return AVERROR(ENOMEM);
945 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
946 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
949 avctx->release_buffer(avctx, p);
952 if(avctx->get_buffer(avctx, p) < 0){
953 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
958 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
963 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
966 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
968 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
969 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
970 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
972 s->last_slice_end= 0;
974 if(s->bitstream_bpp<24){
976 int lefty, leftu, leftv;
977 int lefttopy, lefttopu, lefttopv;
980 p->data[0][3]= get_bits(&s->gb, 8);
981 p->data[0][2]= get_bits(&s->gb, 8);
982 p->data[0][1]= get_bits(&s->gb, 8);
983 p->data[0][0]= get_bits(&s->gb, 8);
985 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
989 leftv= p->data[2][0]= get_bits(&s->gb, 8);
990 lefty= p->data[0][1]= get_bits(&s->gb, 8);
991 leftu= p->data[1][0]= get_bits(&s->gb, 8);
992 p->data[0][0]= get_bits(&s->gb, 8);
994 switch(s->predictor){
997 decode_422_bitstream(s, width-2);
998 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
999 if(!(s->flags&CODEC_FLAG_GRAY)){
1000 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1001 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1004 for(cy=y=1; y<s->height; y++,cy++){
1005 uint8_t *ydst, *udst, *vdst;
1007 if(s->bitstream_bpp==12){
1008 decode_gray_bitstream(s, width);
1010 ydst= p->data[0] + p->linesize[0]*y;
1012 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1013 if(s->predictor == PLANE){
1015 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1018 if(y>=s->height) break;
1023 ydst= p->data[0] + p->linesize[0]*y;
1024 udst= p->data[1] + p->linesize[1]*cy;
1025 vdst= p->data[2] + p->linesize[2]*cy;
1027 decode_422_bitstream(s, width);
1028 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1029 if(!(s->flags&CODEC_FLAG_GRAY)){
1030 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1031 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1033 if(s->predictor == PLANE){
1034 if(cy>s->interlaced){
1035 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1036 if(!(s->flags&CODEC_FLAG_GRAY)){
1037 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1038 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1043 draw_slice(s, height);
1047 /* first line except first 2 pixels is left predicted */
1048 decode_422_bitstream(s, width-2);
1049 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1050 if(!(s->flags&CODEC_FLAG_GRAY)){
1051 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1052 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1057 /* second line is left predicted for interlaced case */
1059 decode_422_bitstream(s, width);
1060 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1061 if(!(s->flags&CODEC_FLAG_GRAY)){
1062 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1063 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1068 /* next 4 pixels are left predicted too */
1069 decode_422_bitstream(s, 4);
1070 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1071 if(!(s->flags&CODEC_FLAG_GRAY)){
1072 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1073 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1076 /* next line except the first 4 pixels is median predicted */
1077 lefttopy= p->data[0][3];
1078 decode_422_bitstream(s, width-4);
1079 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1080 if(!(s->flags&CODEC_FLAG_GRAY)){
1081 lefttopu= p->data[1][1];
1082 lefttopv= p->data[2][1];
1083 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1084 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1088 for(; y<height; y++,cy++){
1089 uint8_t *ydst, *udst, *vdst;
1091 if(s->bitstream_bpp==12){
1093 decode_gray_bitstream(s, width);
1094 ydst= p->data[0] + p->linesize[0]*y;
1095 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1098 if(y>=height) break;
1102 decode_422_bitstream(s, width);
1104 ydst= p->data[0] + p->linesize[0]*y;
1105 udst= p->data[1] + p->linesize[1]*cy;
1106 vdst= p->data[2] + p->linesize[2]*cy;
1108 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1109 if(!(s->flags&CODEC_FLAG_GRAY)){
1110 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1111 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1115 draw_slice(s, height);
1121 int leftr, leftg, leftb, lefta;
1122 const int last_line= (height-1)*p->linesize[0];
1124 if(s->bitstream_bpp==32){
1125 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1126 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1127 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1128 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1130 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1131 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1132 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1133 lefta= p->data[0][last_line+A]= 255;
1134 skip_bits(&s->gb, 8);
1138 switch(s->predictor){
1141 decode_bgr_bitstream(s, width-1);
1142 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1144 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1145 decode_bgr_bitstream(s, width);
1147 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1148 if(s->predictor == PLANE){
1149 if(s->bitstream_bpp!=32) lefta=0;
1150 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1151 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1152 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1156 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1159 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1163 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1170 *data_size = sizeof(AVFrame);
1172 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1174 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1176 static int common_end(HYuvContext *s){
1180 av_freep(&s->temp[i]);
1185 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1186 static av_cold int decode_end(AVCodecContext *avctx)
1188 HYuvContext *s = avctx->priv_data;
1191 if (s->picture.data[0])
1192 avctx->release_buffer(avctx, &s->picture);
1195 av_freep(&s->bitstream_buffer);
1198 free_vlc(&s->vlc[i]);
1203 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1205 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1206 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1207 HYuvContext *s = avctx->priv_data;
1208 AVFrame *pict = data;
1209 const int width= s->width;
1210 const int width2= s->width>>1;
1211 const int height= s->height;
1212 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1213 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1214 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1215 AVFrame * const p= &s->picture;
1219 p->pict_type= FF_I_TYPE;
1224 generate_len_table(s->len[i], s->stats[i], 256);
1225 if(generate_bits_table(s->bits[i], s->len[i])<0)
1227 size+= store_table(s, s->len[i], &buf[size]);
1231 for(j=0; j<256; j++)
1232 s->stats[i][j] >>= 1;
1235 init_put_bits(&s->pb, buf+size, buf_size-size);
1237 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1238 int lefty, leftu, leftv, y, cy;
1240 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1241 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1242 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1243 put_bits(&s->pb, 8, p->data[0][0]);
1245 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1246 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1247 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1249 encode_422_bitstream(s, 2, width-2);
1251 if(s->predictor==MEDIAN){
1252 int lefttopy, lefttopu, lefttopv;
1255 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1256 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1257 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1259 encode_422_bitstream(s, 0, width);
1263 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1264 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1265 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1267 encode_422_bitstream(s, 0, 4);
1269 lefttopy= p->data[0][3];
1270 lefttopu= p->data[1][1];
1271 lefttopv= p->data[2][1];
1272 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1273 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1274 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1275 encode_422_bitstream(s, 0, width-4);
1278 for(; y<height; y++,cy++){
1279 uint8_t *ydst, *udst, *vdst;
1281 if(s->bitstream_bpp==12){
1283 ydst= p->data[0] + p->linesize[0]*y;
1284 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1285 encode_gray_bitstream(s, width);
1288 if(y>=height) break;
1290 ydst= p->data[0] + p->linesize[0]*y;
1291 udst= p->data[1] + p->linesize[1]*cy;
1292 vdst= p->data[2] + p->linesize[2]*cy;
1294 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1295 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1296 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1298 encode_422_bitstream(s, 0, width);
1301 for(cy=y=1; y<height; y++,cy++){
1302 uint8_t *ydst, *udst, *vdst;
1304 /* encode a luma only line & y++ */
1305 if(s->bitstream_bpp==12){
1306 ydst= p->data[0] + p->linesize[0]*y;
1308 if(s->predictor == PLANE && s->interlaced < y){
1309 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1311 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1313 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1315 encode_gray_bitstream(s, width);
1317 if(y>=height) break;
1320 ydst= p->data[0] + p->linesize[0]*y;
1321 udst= p->data[1] + p->linesize[1]*cy;
1322 vdst= p->data[2] + p->linesize[2]*cy;
1324 if(s->predictor == PLANE && s->interlaced < cy){
1325 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1326 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1327 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1329 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1330 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1331 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1333 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1334 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1335 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1338 encode_422_bitstream(s, 0, width);
1341 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1342 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1343 const int stride = -p->linesize[0];
1344 const int fake_stride = -fake_ystride;
1346 int leftr, leftg, leftb;
1348 put_bits(&s->pb, 8, leftr= data[R]);
1349 put_bits(&s->pb, 8, leftg= data[G]);
1350 put_bits(&s->pb, 8, leftb= data[B]);
1351 put_bits(&s->pb, 8, 0);
1353 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1354 encode_bgr_bitstream(s, width-1);
1356 for(y=1; y<s->height; y++){
1357 uint8_t *dst = data + y*stride;
1358 if(s->predictor == PLANE && s->interlaced < y){
1359 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1360 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1362 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1364 encode_bgr_bitstream(s, width);
1367 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1371 size+= (put_bits_count(&s->pb)+31)/8;
1372 put_bits(&s->pb, 16, 0);
1373 put_bits(&s->pb, 15, 0);
1376 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1378 char *p= avctx->stats_out;
1379 char *end= p + 1024*30;
1381 for(j=0; j<256; j++){
1382 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1386 snprintf(p, end-p, "\n");
1390 avctx->stats_out[0] = '\0';
1391 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1392 flush_put_bits(&s->pb);
1393 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1396 s->picture_number++;
1401 static av_cold int encode_end(AVCodecContext *avctx)
1403 HYuvContext *s = avctx->priv_data;
1407 av_freep(&avctx->extradata);
1408 av_freep(&avctx->stats_out);
1412 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1414 #if CONFIG_HUFFYUV_DECODER
1415 AVCodec huffyuv_decoder = {
1419 sizeof(HYuvContext),
1424 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1426 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1430 #if CONFIG_FFVHUFF_DECODER
1431 AVCodec ffvhuff_decoder = {
1435 sizeof(HYuvContext),
1440 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1442 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1446 #if CONFIG_HUFFYUV_ENCODER
1447 AVCodec huffyuv_encoder = {
1451 sizeof(HYuvContext),
1455 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1456 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1460 #if CONFIG_FFVHUFF_ENCODER
1461 AVCodec ffvhuff_encoder = {
1465 sizeof(HYuvContext),
1469 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1470 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),