2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
28 * huffyuv codec for libavcodec.
32 #include "bitstream.h"
38 #ifdef WORDS_BIGENDIAN
48 typedef enum Predictor{
54 typedef struct HYuvContext{
55 AVCodecContext *avctx;
63 int yuy2; //use yuy2 instead of 422P
64 int bgr32; //use bgr32 instead of bgr24
71 uint64_t stats[3][256];
73 uint32_t bits[3][256];
76 uint8_t *bitstream_buffer;
77 unsigned int bitstream_buffer_size;
81 static const unsigned char classic_shift_luma[] = {
82 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
83 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
87 static const unsigned char classic_shift_chroma[] = {
88 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
89 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
90 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
93 static const unsigned char classic_add_luma[256] = {
94 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
95 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
96 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
97 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
98 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
99 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
100 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
101 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
102 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
103 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
104 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
105 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
106 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
107 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
108 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
109 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
112 static const unsigned char classic_add_chroma[256] = {
113 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
114 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
115 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
116 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
117 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
118 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
119 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
120 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
121 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
122 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
123 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
124 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
125 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
126 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
127 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
128 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
131 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
134 for(i=0; i<w-1; i++){
150 static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
158 l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
167 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
189 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
193 const int temp= src[i];
200 const int temp= src[i];
204 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
209 static void read_len_table(uint8_t *dst, GetBitContext *gb){
213 repeat= get_bits(gb, 3);
214 val = get_bits(gb, 5);
216 repeat= get_bits(gb, 8);
217 //printf("%d %d\n", val, repeat);
223 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
227 for(len=32; len>0; len--){
228 for(index=0; index<256; index++){
229 if(len_table[index]==len)
233 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
241 #ifdef CONFIG_ENCODERS
242 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
243 uint64_t counts[2*size];
247 for(offset=1; ; offset<<=1){
248 for(i=0; i<size; i++){
249 counts[i]= stats[i] + offset - 1;
252 for(next=size; next<size*2; next++){
256 min1=min2= INT64_MAX;
259 for(i=0; i<next; i++){
260 if(min2 > counts[i]){
261 if(min1 > counts[i]){
273 if(min2==INT64_MAX) break;
275 counts[next]= min1 + min2;
277 counts[min2_i]= INT64_MAX;
283 for(i=0; i<size; i++){
287 for(len=0; up[index] != -1; len++)
297 #endif /* CONFIG_ENCODERS */
299 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
303 init_get_bits(&gb, src, length*8);
306 read_len_table(s->len[i], &gb);
308 if(generate_bits_table(s->bits[i], s->len[i])<0){
312 for(j=0; j<256; j++){
313 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
316 free_vlc(&s->vlc[i]);
317 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
320 return (get_bits_count(&gb)+7)/8;
323 static int read_old_huffman_tables(HYuvContext *s){
328 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
329 read_len_table(s->len[0], &gb);
330 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
331 read_len_table(s->len[1], &gb);
333 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
334 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
336 if(s->bitstream_bpp >= 24){
337 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
338 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
340 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
341 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
344 free_vlc(&s->vlc[i]);
345 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
350 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
355 static void alloc_temp(HYuvContext *s){
358 if(s->bitstream_bpp<24){
360 s->temp[i]= av_malloc(s->width + 16);
363 s->temp[0]= av_malloc(4*s->width + 16);
367 static int common_init(AVCodecContext *avctx){
368 HYuvContext *s = avctx->priv_data;
371 s->flags= avctx->flags;
373 dsputil_init(&s->dsp, avctx);
375 s->width= avctx->width;
376 s->height= avctx->height;
377 assert(s->width>0 && s->height>0);
382 #ifdef CONFIG_DECODERS
383 static int decode_init(AVCodecContext *avctx)
385 HYuvContext *s = avctx->priv_data;
388 memset(s->vlc, 0, 3*sizeof(VLC));
390 avctx->coded_frame= &s->picture;
391 s->interlaced= s->height > 288;
394 //if(avctx->extradata)
395 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
396 if(avctx->extradata_size){
397 if((avctx->bits_per_sample&7) && avctx->bits_per_sample != 12)
398 s->version=1; // do such files exist at all?
405 int method, interlace;
407 method= ((uint8_t*)avctx->extradata)[0];
408 s->decorrelate= method&64 ? 1 : 0;
409 s->predictor= method&63;
410 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
411 if(s->bitstream_bpp==0)
412 s->bitstream_bpp= avctx->bits_per_sample&~7;
413 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
414 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
415 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
417 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
420 switch(avctx->bits_per_sample&7){
431 s->decorrelate= avctx->bits_per_sample >= 24;
434 s->predictor= MEDIAN;
438 s->predictor= LEFT; //OLD
442 s->bitstream_bpp= avctx->bits_per_sample & ~7;
445 if(read_old_huffman_tables(s) < 0)
449 switch(s->bitstream_bpp){
451 avctx->pix_fmt = PIX_FMT_YUV420P;
455 avctx->pix_fmt = PIX_FMT_YUV422;
457 avctx->pix_fmt = PIX_FMT_YUV422P;
463 avctx->pix_fmt = PIX_FMT_RGBA32;
465 avctx->pix_fmt = PIX_FMT_BGR24;
474 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
480 #ifdef CONFIG_ENCODERS
481 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
489 for(; i<256 && len[i]==val && repeat<255; i++)
492 assert(val < 32 && val >0 && repeat<256 && repeat>0);
495 buf[index++]= repeat;
497 buf[index++]= val | (repeat<<5);
504 static int encode_init(AVCodecContext *avctx)
506 HYuvContext *s = avctx->priv_data;
511 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
512 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
515 avctx->coded_frame= &s->picture;
517 switch(avctx->pix_fmt){
518 case PIX_FMT_YUV420P:
519 s->bitstream_bpp= 12;
521 case PIX_FMT_YUV422P:
522 s->bitstream_bpp= 16;
525 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
528 avctx->bits_per_sample= s->bitstream_bpp;
529 s->decorrelate= s->bitstream_bpp >= 24;
530 s->predictor= avctx->prediction_method;
531 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
532 if(avctx->context_model==1){
533 s->context= avctx->context_model;
534 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
535 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
540 if(avctx->codec->id==CODEC_ID_HUFFYUV){
541 if(avctx->pix_fmt==PIX_FMT_YUV420P){
542 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
545 if(avctx->context_model){
546 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
549 if(s->interlaced != ( s->height > 288 ))
550 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
553 ((uint8_t*)avctx->extradata)[0]= s->predictor;
554 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
555 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
557 ((uint8_t*)avctx->extradata)[2]|= 0x40;
558 ((uint8_t*)avctx->extradata)[3]= 0;
559 s->avctx->extradata_size= 4;
562 char *p= avctx->stats_in;
572 for(j=0; j<256; j++){
573 s->stats[i][j]+= strtol(p, &next, 0);
574 if(next==p) return -1;
578 if(p[0]==0 || p[1]==0 || p[2]==0) break;
582 for(j=0; j<256; j++){
583 int d= FFMIN(j, 256-j);
585 s->stats[i][j]= 100000000/(d+1);
590 generate_len_table(s->len[i], s->stats[i], 256);
592 if(generate_bits_table(s->bits[i], s->len[i])<0){
596 s->avctx->extradata_size+=
597 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
602 int pels = s->width*s->height / (i?40:10);
603 for(j=0; j<256; j++){
604 int d= FFMIN(j, 256-j);
605 s->stats[i][j]= pels/(d+1);
614 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
622 #endif /* CONFIG_ENCODERS */
624 static void decode_422_bitstream(HYuvContext *s, int count){
629 for(i=0; i<count; i++){
630 s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
631 s->temp[1][ i ]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
632 s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
633 s->temp[2][ i ]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
637 static void decode_gray_bitstream(HYuvContext *s, int count){
642 for(i=0; i<count; i++){
643 s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
644 s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
648 #ifdef CONFIG_ENCODERS
649 static int encode_422_bitstream(HYuvContext *s, int count){
652 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
653 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
658 if(s->flags&CODEC_FLAG_PASS1){
659 for(i=0; i<count; i++){
660 s->stats[0][ s->temp[0][2*i ] ]++;
661 s->stats[1][ s->temp[1][ i ] ]++;
662 s->stats[0][ s->temp[0][2*i+1] ]++;
663 s->stats[2][ s->temp[2][ i ] ]++;
666 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
669 for(i=0; i<count; i++){
670 s->stats[0][ s->temp[0][2*i ] ]++;
671 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
672 s->stats[1][ s->temp[1][ i ] ]++;
673 put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
674 s->stats[0][ s->temp[0][2*i+1] ]++;
675 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
676 s->stats[2][ s->temp[2][ i ] ]++;
677 put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
680 for(i=0; i<count; i++){
681 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
682 put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
683 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
684 put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
690 static int encode_gray_bitstream(HYuvContext *s, int count){
693 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
694 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
699 if(s->flags&CODEC_FLAG_PASS1){
700 for(i=0; i<count; i++){
701 s->stats[0][ s->temp[0][2*i ] ]++;
702 s->stats[0][ s->temp[0][2*i+1] ]++;
705 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
709 for(i=0; i<count; i++){
710 s->stats[0][ s->temp[0][2*i ] ]++;
711 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
712 s->stats[0][ s->temp[0][2*i+1] ]++;
713 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
716 for(i=0; i<count; i++){
717 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
718 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
723 #endif /* CONFIG_ENCODERS */
725 static void decode_bgr_bitstream(HYuvContext *s, int count){
729 if(s->bitstream_bpp==24){
730 for(i=0; i<count; i++){
731 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
732 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
733 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
736 for(i=0; i<count; i++){
737 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
738 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
739 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
740 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
744 if(s->bitstream_bpp==24){
745 for(i=0; i<count; i++){
746 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
747 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
748 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
751 for(i=0; i<count; i++){
752 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
753 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
754 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
755 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
761 #ifdef CONFIG_DECODERS
762 static void draw_slice(HYuvContext *s, int y){
766 if(s->avctx->draw_horiz_band==NULL)
769 h= y - s->last_slice_end;
772 if(s->bitstream_bpp==12){
778 offset[0] = s->picture.linesize[0]*y;
779 offset[1] = s->picture.linesize[1]*cy;
780 offset[2] = s->picture.linesize[2]*cy;
784 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
786 s->last_slice_end= y + h;
789 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
790 HYuvContext *s = avctx->priv_data;
791 const int width= s->width;
792 const int width2= s->width>>1;
793 const int height= s->height;
794 int fake_ystride, fake_ustride, fake_vstride;
795 AVFrame * const p= &s->picture;
798 AVFrame *picture = data;
800 s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
802 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
805 avctx->release_buffer(avctx, p);
808 if(avctx->get_buffer(avctx, p) < 0){
809 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
814 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
819 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
822 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
824 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
825 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
826 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
828 s->last_slice_end= 0;
830 if(s->bitstream_bpp<24){
832 int lefty, leftu, leftv;
833 int lefttopy, lefttopu, lefttopv;
836 p->data[0][3]= get_bits(&s->gb, 8);
837 p->data[0][2]= get_bits(&s->gb, 8);
838 p->data[0][1]= get_bits(&s->gb, 8);
839 p->data[0][0]= get_bits(&s->gb, 8);
841 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
845 leftv= p->data[2][0]= get_bits(&s->gb, 8);
846 lefty= p->data[0][1]= get_bits(&s->gb, 8);
847 leftu= p->data[1][0]= get_bits(&s->gb, 8);
848 p->data[0][0]= get_bits(&s->gb, 8);
850 switch(s->predictor){
853 decode_422_bitstream(s, width-2);
854 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
855 if(!(s->flags&CODEC_FLAG_GRAY)){
856 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
857 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
860 for(cy=y=1; y<s->height; y++,cy++){
861 uint8_t *ydst, *udst, *vdst;
863 if(s->bitstream_bpp==12){
864 decode_gray_bitstream(s, width);
866 ydst= p->data[0] + p->linesize[0]*y;
868 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
869 if(s->predictor == PLANE){
871 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
874 if(y>=s->height) break;
879 ydst= p->data[0] + p->linesize[0]*y;
880 udst= p->data[1] + p->linesize[1]*cy;
881 vdst= p->data[2] + p->linesize[2]*cy;
883 decode_422_bitstream(s, width);
884 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
885 if(!(s->flags&CODEC_FLAG_GRAY)){
886 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
887 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
889 if(s->predictor == PLANE){
890 if(cy>s->interlaced){
891 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
892 if(!(s->flags&CODEC_FLAG_GRAY)){
893 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
894 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
899 draw_slice(s, height);
903 /* first line except first 2 pixels is left predicted */
904 decode_422_bitstream(s, width-2);
905 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
906 if(!(s->flags&CODEC_FLAG_GRAY)){
907 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
908 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
913 /* second line is left predicted for interlaced case */
915 decode_422_bitstream(s, width);
916 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
917 if(!(s->flags&CODEC_FLAG_GRAY)){
918 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
919 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
924 /* next 4 pixels are left predicted too */
925 decode_422_bitstream(s, 4);
926 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
927 if(!(s->flags&CODEC_FLAG_GRAY)){
928 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
929 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
932 /* next line except the first 4 pixels is median predicted */
933 lefttopy= p->data[0][3];
934 decode_422_bitstream(s, width-4);
935 add_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
936 if(!(s->flags&CODEC_FLAG_GRAY)){
937 lefttopu= p->data[1][1];
938 lefttopv= p->data[2][1];
939 add_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
940 add_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
944 for(; y<height; y++,cy++){
945 uint8_t *ydst, *udst, *vdst;
947 if(s->bitstream_bpp==12){
949 decode_gray_bitstream(s, width);
950 ydst= p->data[0] + p->linesize[0]*y;
951 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
958 decode_422_bitstream(s, width);
960 ydst= p->data[0] + p->linesize[0]*y;
961 udst= p->data[1] + p->linesize[1]*cy;
962 vdst= p->data[2] + p->linesize[2]*cy;
964 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
965 if(!(s->flags&CODEC_FLAG_GRAY)){
966 add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
967 add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
971 draw_slice(s, height);
977 int leftr, leftg, leftb;
978 const int last_line= (height-1)*p->linesize[0];
980 if(s->bitstream_bpp==32){
981 skip_bits(&s->gb, 8);
982 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
983 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
984 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
986 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
987 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
988 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
989 skip_bits(&s->gb, 8);
993 switch(s->predictor){
996 decode_bgr_bitstream(s, width-1);
997 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
999 for(y=s->height-2; y>=0; y--){ //yes its stored upside down
1000 decode_bgr_bitstream(s, width);
1002 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1003 if(s->predictor == PLANE){
1004 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1005 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1006 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1010 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1013 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1017 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1024 *data_size = sizeof(AVFrame);
1026 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1030 static int common_end(HYuvContext *s){
1034 av_freep(&s->temp[i]);
1039 #ifdef CONFIG_DECODERS
1040 static int decode_end(AVCodecContext *avctx)
1042 HYuvContext *s = avctx->priv_data;
1046 av_freep(&s->bitstream_buffer);
1049 free_vlc(&s->vlc[i]);
1056 #ifdef CONFIG_ENCODERS
1057 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1058 HYuvContext *s = avctx->priv_data;
1059 AVFrame *pict = data;
1060 const int width= s->width;
1061 const int width2= s->width>>1;
1062 const int height= s->height;
1063 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1064 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1065 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1066 AVFrame * const p= &s->picture;
1070 p->pict_type= FF_I_TYPE;
1075 generate_len_table(s->len[i], s->stats[i], 256);
1076 if(generate_bits_table(s->bits[i], s->len[i])<0)
1078 size+= store_table(s, s->len[i], &buf[size]);
1082 for(j=0; j<256; j++)
1083 s->stats[i][j] >>= 1;
1086 init_put_bits(&s->pb, buf+size, buf_size-size);
1088 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1089 int lefty, leftu, leftv, y, cy;
1091 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1092 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1093 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1094 put_bits(&s->pb, 8, p->data[0][0]);
1096 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1097 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1098 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1100 encode_422_bitstream(s, width-2);
1102 if(s->predictor==MEDIAN){
1103 int lefttopy, lefttopu, lefttopv;
1106 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1107 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1108 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1110 encode_422_bitstream(s, width);
1114 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1115 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1116 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1118 encode_422_bitstream(s, 4);
1120 lefttopy= p->data[0][3];
1121 lefttopu= p->data[1][1];
1122 lefttopv= p->data[2][1];
1123 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1124 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1125 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1126 encode_422_bitstream(s, width-4);
1129 for(; y<height; y++,cy++){
1130 uint8_t *ydst, *udst, *vdst;
1132 if(s->bitstream_bpp==12){
1134 ydst= p->data[0] + p->linesize[0]*y;
1135 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1136 encode_gray_bitstream(s, width);
1139 if(y>=height) break;
1141 ydst= p->data[0] + p->linesize[0]*y;
1142 udst= p->data[1] + p->linesize[1]*cy;
1143 vdst= p->data[2] + p->linesize[2]*cy;
1145 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1146 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1147 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1149 encode_422_bitstream(s, width);
1152 for(cy=y=1; y<height; y++,cy++){
1153 uint8_t *ydst, *udst, *vdst;
1155 /* encode a luma only line & y++ */
1156 if(s->bitstream_bpp==12){
1157 ydst= p->data[0] + p->linesize[0]*y;
1159 if(s->predictor == PLANE && s->interlaced < y){
1160 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1162 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1164 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1166 encode_gray_bitstream(s, width);
1168 if(y>=height) break;
1171 ydst= p->data[0] + p->linesize[0]*y;
1172 udst= p->data[1] + p->linesize[1]*cy;
1173 vdst= p->data[2] + p->linesize[2]*cy;
1175 if(s->predictor == PLANE && s->interlaced < cy){
1176 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1177 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1178 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1180 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1181 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1182 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1184 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1185 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1186 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1189 encode_422_bitstream(s, width);
1193 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1197 size+= (put_bits_count(&s->pb)+31)/8;
1200 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1202 char *p= avctx->stats_out;
1203 char *end= p + 1024*30;
1205 for(j=0; j<256; j++){
1206 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1210 snprintf(p, end-p, "\n");
1214 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1215 flush_put_bits(&s->pb);
1216 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1217 avctx->stats_out[0] = '\0';
1220 s->picture_number++;
1225 static int encode_end(AVCodecContext *avctx)
1227 HYuvContext *s = avctx->priv_data;
1231 av_freep(&avctx->extradata);
1232 av_freep(&avctx->stats_out);
1236 #endif /* CONFIG_ENCODERS */
1238 #ifdef CONFIG_DECODERS
1239 AVCodec huffyuv_decoder = {
1243 sizeof(HYuvContext),
1248 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1252 AVCodec ffvhuff_decoder = {
1256 sizeof(HYuvContext),
1261 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1266 #ifdef CONFIG_ENCODERS
1268 AVCodec huffyuv_encoder = {
1272 sizeof(HYuvContext),
1276 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, -1},
1279 AVCodec ffvhuff_encoder = {
1283 sizeof(HYuvContext),
1287 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, -1},
1290 #endif //CONFIG_ENCODERS