2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
50 typedef enum Predictor{
56 typedef struct HYuvContext{
57 AVCodecContext *avctx;
65 int yuy2; //use yuy2 instead of 422P
66 int bgr32; //use bgr32 instead of bgr24
73 uint64_t stats[3][256];
75 uint32_t bits[3][256];
76 uint32_t pix_bgr_map[1<<VLC_BITS];
77 VLC vlc[6]; //Y,U,V,YY,YU,YV
79 uint8_t *bitstream_buffer;
80 unsigned int bitstream_buffer_size;
84 static const unsigned char classic_shift_luma[] = {
85 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
86 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
90 static const unsigned char classic_shift_chroma[] = {
91 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
92 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
93 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
96 static const unsigned char classic_add_luma[256] = {
97 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
98 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
99 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
100 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
101 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
102 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
103 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
104 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
105 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
106 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
107 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
108 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
109 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
110 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
111 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
112 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
115 static const unsigned char classic_add_chroma[256] = {
116 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
117 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
118 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
119 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
120 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
121 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
122 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
123 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
124 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
125 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
126 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
127 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
128 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
129 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
130 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
131 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
134 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
138 const int temp= src[i];
145 const int temp= src[i];
149 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
154 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
160 for(i=0; i<FFMIN(w,4); i++){
161 const int rt= src[i*4+R];
162 const int gt= src[i*4+G];
163 const int bt= src[i*4+B];
171 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
172 *red= src[(w-1)*4+R];
173 *green= src[(w-1)*4+G];
174 *blue= src[(w-1)*4+B];
177 static int read_len_table(uint8_t *dst, GetBitContext *gb){
181 repeat= get_bits(gb, 3);
182 val = get_bits(gb, 5);
184 repeat= get_bits(gb, 8);
185 //printf("%d %d\n", val, repeat);
187 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
196 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
200 for(len=32; len>0; len--){
201 for(index=0; index<256; index++){
202 if(len_table[index]==len)
206 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
214 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
220 static void heap_sift(HeapElem *h, int root, int size)
222 while(root*2+1 < size) {
223 int child = root*2+1;
224 if(child < size-1 && h[child].val > h[child+1].val)
226 if(h[root].val > h[child].val) {
227 FFSWAP(HeapElem, h[root], h[child]);
234 static void generate_len_table(uint8_t *dst, const uint64_t *stats, int size){
240 for(offset=1; ; offset<<=1){
241 for(i=0; i<size; i++){
243 h[i].val = (stats[i] << 8) + offset;
245 for(i=size/2-1; i>=0; i--)
246 heap_sift(h, i, size);
248 for(next=size; next<size*2-1; next++){
249 // merge the two smallest entries, and put it back in the heap
250 uint64_t min1v = h[0].val;
251 up[h[0].name] = next;
252 h[0].val = INT64_MAX;
253 heap_sift(h, 0, size);
254 up[h[0].name] = next;
257 heap_sift(h, 0, size);
261 for(i=2*size-3; i>=size; i--)
262 len[i] = len[up[i]] + 1;
263 for(i=0; i<size; i++) {
264 dst[i] = len[up[i]] + 1;
265 if(dst[i] >= 32) break;
270 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
272 static void generate_joint_tables(HYuvContext *s){
273 uint16_t symbols[1<<VLC_BITS];
274 uint16_t bits[1<<VLC_BITS];
275 uint8_t len[1<<VLC_BITS];
276 if(s->bitstream_bpp < 24){
279 for(i=y=0; y<256; y++){
280 int len0 = s->len[0][y];
281 int limit = VLC_BITS - len0;
284 for(u=0; u<256; u++){
285 int len1 = s->len[p][u];
288 len[i] = len0 + len1;
289 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
290 symbols[i] = (y<<8) + u;
291 if(symbols[i] != 0xffff) // reserved to mean "invalid"
295 free_vlc(&s->vlc[3+p]);
296 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
299 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
300 int i, b, g, r, code;
301 int p0 = s->decorrelate;
302 int p1 = !s->decorrelate;
303 // restrict the range to +/-16 becaues that's pretty much guaranteed to
304 // cover all the combinations that fit in 11 bits total, and it doesn't
305 // matter if we miss a few rare codes.
306 for(i=0, g=-16; g<16; g++){
307 int len0 = s->len[p0][g&255];
308 int limit0 = VLC_BITS - len0;
311 for(b=-16; b<16; b++){
312 int len1 = s->len[p1][b&255];
313 int limit1 = limit0 - len1;
316 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
317 for(r=-16; r<16; r++){
318 int len2 = s->len[2][r&255];
321 len[i] = len0 + len1 + len2;
322 bits[i] = (code << len2) + s->bits[2][r&255];
336 free_vlc(&s->vlc[3]);
337 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
341 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
345 init_get_bits(&gb, src, length*8);
348 if(read_len_table(s->len[i], &gb)<0)
350 if(generate_bits_table(s->bits[i], s->len[i])<0){
354 for(j=0; j<256; j++){
355 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
358 free_vlc(&s->vlc[i]);
359 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
362 generate_joint_tables(s);
364 return (get_bits_count(&gb)+7)/8;
367 static int read_old_huffman_tables(HYuvContext *s){
372 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
373 if(read_len_table(s->len[0], &gb)<0)
375 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
376 if(read_len_table(s->len[1], &gb)<0)
379 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
380 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
382 if(s->bitstream_bpp >= 24){
383 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
384 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
386 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
387 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
390 free_vlc(&s->vlc[i]);
391 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
394 generate_joint_tables(s);
398 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
403 static av_cold void alloc_temp(HYuvContext *s){
406 if(s->bitstream_bpp<24){
408 s->temp[i]= av_malloc(s->width + 16);
411 s->temp[0]= av_mallocz(4*s->width + 16);
415 static av_cold int common_init(AVCodecContext *avctx){
416 HYuvContext *s = avctx->priv_data;
419 s->flags= avctx->flags;
421 dsputil_init(&s->dsp, avctx);
423 s->width= avctx->width;
424 s->height= avctx->height;
425 assert(s->width>0 && s->height>0);
430 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
431 static av_cold int decode_init(AVCodecContext *avctx)
433 HYuvContext *s = avctx->priv_data;
436 memset(s->vlc, 0, 3*sizeof(VLC));
438 avctx->coded_frame= &s->picture;
439 s->interlaced= s->height > 288;
442 //if(avctx->extradata)
443 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
444 if(avctx->extradata_size){
445 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
446 s->version=1; // do such files exist at all?
453 int method, interlace;
455 if (avctx->extradata_size < 4)
458 method= ((uint8_t*)avctx->extradata)[0];
459 s->decorrelate= method&64 ? 1 : 0;
460 s->predictor= method&63;
461 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
462 if(s->bitstream_bpp==0)
463 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
464 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
465 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
466 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
468 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
471 switch(avctx->bits_per_coded_sample&7){
482 s->decorrelate= avctx->bits_per_coded_sample >= 24;
485 s->predictor= MEDIAN;
489 s->predictor= LEFT; //OLD
493 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
496 if(read_old_huffman_tables(s) < 0)
500 switch(s->bitstream_bpp){
502 avctx->pix_fmt = PIX_FMT_YUV420P;
506 avctx->pix_fmt = PIX_FMT_YUYV422;
508 avctx->pix_fmt = PIX_FMT_YUV422P;
514 avctx->pix_fmt = PIX_FMT_RGB32;
516 avctx->pix_fmt = PIX_FMT_BGR24;
525 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
529 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
531 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
532 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
540 for(; i<256 && len[i]==val && repeat<255; i++)
543 assert(val < 32 && val >0 && repeat<256 && repeat>0);
546 buf[index++]= repeat;
548 buf[index++]= val | (repeat<<5);
555 static av_cold int encode_init(AVCodecContext *avctx)
557 HYuvContext *s = avctx->priv_data;
562 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
563 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
566 avctx->coded_frame= &s->picture;
568 switch(avctx->pix_fmt){
569 case PIX_FMT_YUV420P:
570 s->bitstream_bpp= 12;
572 case PIX_FMT_YUV422P:
573 s->bitstream_bpp= 16;
576 s->bitstream_bpp= 24;
579 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
582 avctx->bits_per_coded_sample= s->bitstream_bpp;
583 s->decorrelate= s->bitstream_bpp >= 24;
584 s->predictor= avctx->prediction_method;
585 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
586 if(avctx->context_model==1){
587 s->context= avctx->context_model;
588 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
589 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
594 if(avctx->codec->id==CODEC_ID_HUFFYUV){
595 if(avctx->pix_fmt==PIX_FMT_YUV420P){
596 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
599 if(avctx->context_model){
600 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
603 if(s->interlaced != ( s->height > 288 ))
604 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
607 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
608 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
612 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
613 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
614 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
616 ((uint8_t*)avctx->extradata)[2]|= 0x40;
617 ((uint8_t*)avctx->extradata)[3]= 0;
618 s->avctx->extradata_size= 4;
621 char *p= avctx->stats_in;
631 for(j=0; j<256; j++){
632 s->stats[i][j]+= strtol(p, &next, 0);
633 if(next==p) return -1;
637 if(p[0]==0 || p[1]==0 || p[2]==0) break;
641 for(j=0; j<256; j++){
642 int d= FFMIN(j, 256-j);
644 s->stats[i][j]= 100000000/(d+1);
649 generate_len_table(s->len[i], s->stats[i], 256);
651 if(generate_bits_table(s->bits[i], s->len[i])<0){
655 s->avctx->extradata_size+=
656 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
661 int pels = s->width*s->height / (i?40:10);
662 for(j=0; j<256; j++){
663 int d= FFMIN(j, 256-j);
664 s->stats[i][j]= pels/(d+1);
673 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
681 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
683 /* TODO instead of restarting the read when the code isn't in the first level
684 * of the joint table, jump into the 2nd level of the individual table. */
685 #define READ_2PIX(dst0, dst1, plane1){\
686 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
691 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
692 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
696 static void decode_422_bitstream(HYuvContext *s, int count){
701 if(count >= (get_bits_left(&s->gb))/(31*4)){
702 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
703 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
704 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
707 for(i=0; i<count; i++){
708 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
709 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
714 static void decode_gray_bitstream(HYuvContext *s, int count){
719 if(count >= (get_bits_left(&s->gb))/(31*2)){
720 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
721 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
724 for(i=0; i<count; i++){
725 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
730 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
731 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
733 const uint8_t *y = s->temp[0] + offset;
734 const uint8_t *u = s->temp[1] + offset/2;
735 const uint8_t *v = s->temp[2] + offset/2;
737 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
738 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
749 if(s->flags&CODEC_FLAG_PASS1){
750 for(i=0; i<count; i++){
758 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
761 for(i=0; i<count; i++){
764 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
766 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
768 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
770 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
773 for(i=0; i<count; i++){
775 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
776 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
777 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
778 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
784 static int encode_gray_bitstream(HYuvContext *s, int count){
787 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
788 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
793 int y0 = s->temp[0][2*i];\
794 int y1 = s->temp[0][2*i+1];
799 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
800 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
803 if(s->flags&CODEC_FLAG_PASS1){
804 for(i=0; i<count; i++){
809 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
813 for(i=0; i<count; i++){
819 for(i=0; i<count; i++){
826 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
828 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
830 for(i=0; i<count; i++){
831 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
833 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
834 }else if(decorrelate){
835 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
836 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
837 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
839 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
840 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
841 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
844 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
848 static void decode_bgr_bitstream(HYuvContext *s, int count){
850 if(s->bitstream_bpp==24)
851 decode_bgr_1(s, count, 1, 0);
853 decode_bgr_1(s, count, 1, 1);
855 if(s->bitstream_bpp==24)
856 decode_bgr_1(s, count, 0, 0);
858 decode_bgr_1(s, count, 0, 1);
862 static int encode_bgr_bitstream(HYuvContext *s, int count){
865 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
866 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
871 int g= s->temp[0][4*i+G];\
872 int b= (s->temp[0][4*i+B] - g) & 0xff;\
873 int r= (s->temp[0][4*i+R] - g) & 0xff;
879 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
880 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
881 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
883 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
884 for(i=0; i<count; i++){
888 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
889 for(i=0; i<count; i++){
895 for(i=0; i<count; i++){
903 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
904 static void draw_slice(HYuvContext *s, int y){
908 if(s->avctx->draw_horiz_band==NULL)
911 h= y - s->last_slice_end;
914 if(s->bitstream_bpp==12){
920 offset[0] = s->picture.linesize[0]*y;
921 offset[1] = s->picture.linesize[1]*cy;
922 offset[2] = s->picture.linesize[2]*cy;
926 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
928 s->last_slice_end= y + h;
931 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
932 const uint8_t *buf = avpkt->data;
933 int buf_size = avpkt->size;
934 HYuvContext *s = avctx->priv_data;
935 const int width= s->width;
936 const int width2= s->width>>1;
937 const int height= s->height;
938 int fake_ystride, fake_ustride, fake_vstride;
939 AVFrame * const p= &s->picture;
942 AVFrame *picture = data;
944 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
945 if (!s->bitstream_buffer)
946 return AVERROR(ENOMEM);
948 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
949 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
952 avctx->release_buffer(avctx, p);
955 if(avctx->get_buffer(avctx, p) < 0){
956 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
961 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
966 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
969 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
971 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
972 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
973 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
975 s->last_slice_end= 0;
977 if(s->bitstream_bpp<24){
979 int lefty, leftu, leftv;
980 int lefttopy, lefttopu, lefttopv;
983 p->data[0][3]= get_bits(&s->gb, 8);
984 p->data[0][2]= get_bits(&s->gb, 8);
985 p->data[0][1]= get_bits(&s->gb, 8);
986 p->data[0][0]= get_bits(&s->gb, 8);
988 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
992 leftv= p->data[2][0]= get_bits(&s->gb, 8);
993 lefty= p->data[0][1]= get_bits(&s->gb, 8);
994 leftu= p->data[1][0]= get_bits(&s->gb, 8);
995 p->data[0][0]= get_bits(&s->gb, 8);
997 switch(s->predictor){
1000 decode_422_bitstream(s, width-2);
1001 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1002 if(!(s->flags&CODEC_FLAG_GRAY)){
1003 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1004 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1007 for(cy=y=1; y<s->height; y++,cy++){
1008 uint8_t *ydst, *udst, *vdst;
1010 if(s->bitstream_bpp==12){
1011 decode_gray_bitstream(s, width);
1013 ydst= p->data[0] + p->linesize[0]*y;
1015 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1016 if(s->predictor == PLANE){
1018 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1021 if(y>=s->height) break;
1026 ydst= p->data[0] + p->linesize[0]*y;
1027 udst= p->data[1] + p->linesize[1]*cy;
1028 vdst= p->data[2] + p->linesize[2]*cy;
1030 decode_422_bitstream(s, width);
1031 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1032 if(!(s->flags&CODEC_FLAG_GRAY)){
1033 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1034 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1036 if(s->predictor == PLANE){
1037 if(cy>s->interlaced){
1038 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1039 if(!(s->flags&CODEC_FLAG_GRAY)){
1040 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1041 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1046 draw_slice(s, height);
1050 /* first line except first 2 pixels is left predicted */
1051 decode_422_bitstream(s, width-2);
1052 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1053 if(!(s->flags&CODEC_FLAG_GRAY)){
1054 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1055 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1060 /* second line is left predicted for interlaced case */
1062 decode_422_bitstream(s, width);
1063 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1064 if(!(s->flags&CODEC_FLAG_GRAY)){
1065 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1066 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1071 /* next 4 pixels are left predicted too */
1072 decode_422_bitstream(s, 4);
1073 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1074 if(!(s->flags&CODEC_FLAG_GRAY)){
1075 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1076 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1079 /* next line except the first 4 pixels is median predicted */
1080 lefttopy= p->data[0][3];
1081 decode_422_bitstream(s, width-4);
1082 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1083 if(!(s->flags&CODEC_FLAG_GRAY)){
1084 lefttopu= p->data[1][1];
1085 lefttopv= p->data[2][1];
1086 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1087 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1091 for(; y<height; y++,cy++){
1092 uint8_t *ydst, *udst, *vdst;
1094 if(s->bitstream_bpp==12){
1096 decode_gray_bitstream(s, width);
1097 ydst= p->data[0] + p->linesize[0]*y;
1098 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1101 if(y>=height) break;
1105 decode_422_bitstream(s, width);
1107 ydst= p->data[0] + p->linesize[0]*y;
1108 udst= p->data[1] + p->linesize[1]*cy;
1109 vdst= p->data[2] + p->linesize[2]*cy;
1111 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1112 if(!(s->flags&CODEC_FLAG_GRAY)){
1113 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1114 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1118 draw_slice(s, height);
1124 int leftr, leftg, leftb, lefta;
1125 const int last_line= (height-1)*p->linesize[0];
1127 if(s->bitstream_bpp==32){
1128 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1129 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1130 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1131 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1133 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1134 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1135 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1136 lefta= p->data[0][last_line+A]= 255;
1137 skip_bits(&s->gb, 8);
1141 switch(s->predictor){
1144 decode_bgr_bitstream(s, width-1);
1145 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1147 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1148 decode_bgr_bitstream(s, width);
1150 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1151 if(s->predictor == PLANE){
1152 if(s->bitstream_bpp!=32) lefta=0;
1153 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1154 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1155 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1159 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1162 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1166 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1173 *data_size = sizeof(AVFrame);
1175 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1177 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1179 static int common_end(HYuvContext *s){
1183 av_freep(&s->temp[i]);
1188 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1189 static av_cold int decode_end(AVCodecContext *avctx)
1191 HYuvContext *s = avctx->priv_data;
1194 if (s->picture.data[0])
1195 avctx->release_buffer(avctx, &s->picture);
1198 av_freep(&s->bitstream_buffer);
1201 free_vlc(&s->vlc[i]);
1206 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1208 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1209 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1210 HYuvContext *s = avctx->priv_data;
1211 AVFrame *pict = data;
1212 const int width= s->width;
1213 const int width2= s->width>>1;
1214 const int height= s->height;
1215 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1216 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1217 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1218 AVFrame * const p= &s->picture;
1222 p->pict_type= FF_I_TYPE;
1227 generate_len_table(s->len[i], s->stats[i], 256);
1228 if(generate_bits_table(s->bits[i], s->len[i])<0)
1230 size+= store_table(s, s->len[i], &buf[size]);
1234 for(j=0; j<256; j++)
1235 s->stats[i][j] >>= 1;
1238 init_put_bits(&s->pb, buf+size, buf_size-size);
1240 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1241 int lefty, leftu, leftv, y, cy;
1243 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1244 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1245 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1246 put_bits(&s->pb, 8, p->data[0][0]);
1248 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1249 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1250 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1252 encode_422_bitstream(s, 2, width-2);
1254 if(s->predictor==MEDIAN){
1255 int lefttopy, lefttopu, lefttopv;
1258 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1259 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1260 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1262 encode_422_bitstream(s, 0, width);
1266 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1267 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1268 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1270 encode_422_bitstream(s, 0, 4);
1272 lefttopy= p->data[0][3];
1273 lefttopu= p->data[1][1];
1274 lefttopv= p->data[2][1];
1275 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1276 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1277 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1278 encode_422_bitstream(s, 0, width-4);
1281 for(; y<height; y++,cy++){
1282 uint8_t *ydst, *udst, *vdst;
1284 if(s->bitstream_bpp==12){
1286 ydst= p->data[0] + p->linesize[0]*y;
1287 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1288 encode_gray_bitstream(s, width);
1291 if(y>=height) break;
1293 ydst= p->data[0] + p->linesize[0]*y;
1294 udst= p->data[1] + p->linesize[1]*cy;
1295 vdst= p->data[2] + p->linesize[2]*cy;
1297 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1298 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1299 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1301 encode_422_bitstream(s, 0, width);
1304 for(cy=y=1; y<height; y++,cy++){
1305 uint8_t *ydst, *udst, *vdst;
1307 /* encode a luma only line & y++ */
1308 if(s->bitstream_bpp==12){
1309 ydst= p->data[0] + p->linesize[0]*y;
1311 if(s->predictor == PLANE && s->interlaced < y){
1312 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1314 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1316 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1318 encode_gray_bitstream(s, width);
1320 if(y>=height) break;
1323 ydst= p->data[0] + p->linesize[0]*y;
1324 udst= p->data[1] + p->linesize[1]*cy;
1325 vdst= p->data[2] + p->linesize[2]*cy;
1327 if(s->predictor == PLANE && s->interlaced < cy){
1328 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1329 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1330 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1332 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1333 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1334 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1336 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1337 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1338 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1341 encode_422_bitstream(s, 0, width);
1344 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1345 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1346 const int stride = -p->linesize[0];
1347 const int fake_stride = -fake_ystride;
1349 int leftr, leftg, leftb;
1351 put_bits(&s->pb, 8, leftr= data[R]);
1352 put_bits(&s->pb, 8, leftg= data[G]);
1353 put_bits(&s->pb, 8, leftb= data[B]);
1354 put_bits(&s->pb, 8, 0);
1356 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1357 encode_bgr_bitstream(s, width-1);
1359 for(y=1; y<s->height; y++){
1360 uint8_t *dst = data + y*stride;
1361 if(s->predictor == PLANE && s->interlaced < y){
1362 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1363 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1365 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1367 encode_bgr_bitstream(s, width);
1370 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1374 size+= (put_bits_count(&s->pb)+31)/8;
1375 put_bits(&s->pb, 16, 0);
1376 put_bits(&s->pb, 15, 0);
1379 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1381 char *p= avctx->stats_out;
1382 char *end= p + 1024*30;
1384 for(j=0; j<256; j++){
1385 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1389 snprintf(p, end-p, "\n");
1393 avctx->stats_out[0] = '\0';
1394 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1395 flush_put_bits(&s->pb);
1396 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1399 s->picture_number++;
1404 static av_cold int encode_end(AVCodecContext *avctx)
1406 HYuvContext *s = avctx->priv_data;
1410 av_freep(&avctx->extradata);
1411 av_freep(&avctx->stats_out);
1415 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1417 #if CONFIG_HUFFYUV_DECODER
1418 AVCodec huffyuv_decoder = {
1422 sizeof(HYuvContext),
1427 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1429 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1433 #if CONFIG_FFVHUFF_DECODER
1434 AVCodec ffvhuff_decoder = {
1438 sizeof(HYuvContext),
1443 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1445 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1449 #if CONFIG_HUFFYUV_ENCODER
1450 AVCodec huffyuv_encoder = {
1454 sizeof(HYuvContext),
1458 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1459 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1463 #if CONFIG_FFVHUFF_ENCODER
1464 AVCodec ffvhuff_encoder = {
1468 sizeof(HYuvContext),
1472 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1473 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),