2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
26 * huffyuv codec for libavcodec.
30 #include "bitstream.h"
36 #ifdef WORDS_BIGENDIAN
46 typedef enum Predictor{
52 typedef struct HYuvContext{
53 AVCodecContext *avctx;
61 int yuy2; //use yuy2 instead of 422P
62 int bgr32; //use bgr32 instead of bgr24
69 uint64_t stats[3][256];
71 uint32_t bits[3][256];
74 uint8_t *bitstream_buffer;
75 unsigned int bitstream_buffer_size;
79 static const unsigned char classic_shift_luma[] = {
80 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
81 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
85 static const unsigned char classic_shift_chroma[] = {
86 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
87 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
88 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
91 static const unsigned char classic_add_luma[256] = {
92 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
93 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
94 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
95 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
96 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
97 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
98 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
99 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
100 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
101 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
102 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
103 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
104 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
105 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
106 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
107 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
110 static const unsigned char classic_add_chroma[256] = {
111 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
112 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
113 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
114 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
115 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
116 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
117 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
118 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
119 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
120 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
121 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
122 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
123 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
124 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
125 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
126 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
129 static inline int add_left_prediction(uint8_t *dst, uint8_t *src, int w, int acc){
132 for(i=0; i<w-1; i++){
148 static inline void add_median_prediction(uint8_t *dst, uint8_t *src1, uint8_t *diff, int w, int *left, int *left_top){
156 l= mid_pred(l, src1[i], (l + src1[i] - lt)&0xFF) + diff[i];
165 static inline void add_left_prediction_bgr32(uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
187 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
191 const int temp= src[i];
198 const int temp= src[i];
202 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
207 static void read_len_table(uint8_t *dst, GetBitContext *gb){
211 repeat= get_bits(gb, 3);
212 val = get_bits(gb, 5);
214 repeat= get_bits(gb, 8);
215 //printf("%d %d\n", val, repeat);
221 static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
225 for(len=32; len>0; len--){
226 for(index=0; index<256; index++){
227 if(len_table[index]==len)
231 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
239 #ifdef CONFIG_ENCODERS
240 static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
241 uint64_t counts[2*size];
245 for(offset=1; ; offset<<=1){
246 for(i=0; i<size; i++){
247 counts[i]= stats[i] + offset - 1;
250 for(next=size; next<size*2; next++){
254 min1=min2= INT64_MAX;
257 for(i=0; i<next; i++){
258 if(min2 > counts[i]){
259 if(min1 > counts[i]){
271 if(min2==INT64_MAX) break;
273 counts[next]= min1 + min2;
275 counts[min2_i]= INT64_MAX;
281 for(i=0; i<size; i++){
285 for(len=0; up[index] != -1; len++)
295 #endif /* CONFIG_ENCODERS */
297 static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
301 init_get_bits(&gb, src, length*8);
304 read_len_table(s->len[i], &gb);
306 if(generate_bits_table(s->bits[i], s->len[i])<0){
310 for(j=0; j<256; j++){
311 printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
314 free_vlc(&s->vlc[i]);
315 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
318 return (get_bits_count(&gb)+7)/8;
321 static int read_old_huffman_tables(HYuvContext *s){
326 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
327 read_len_table(s->len[0], &gb);
328 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
329 read_len_table(s->len[1], &gb);
331 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
332 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
334 if(s->bitstream_bpp >= 24){
335 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
336 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
338 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
339 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
342 free_vlc(&s->vlc[i]);
343 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
348 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
353 static void alloc_temp(HYuvContext *s){
356 if(s->bitstream_bpp<24){
358 s->temp[i]= av_malloc(s->width + 16);
361 s->temp[0]= av_malloc(4*s->width + 16);
365 static int common_init(AVCodecContext *avctx){
366 HYuvContext *s = avctx->priv_data;
369 s->flags= avctx->flags;
371 dsputil_init(&s->dsp, avctx);
373 s->width= avctx->width;
374 s->height= avctx->height;
375 assert(s->width>0 && s->height>0);
380 #ifdef CONFIG_DECODERS
381 static int decode_init(AVCodecContext *avctx)
383 HYuvContext *s = avctx->priv_data;
386 memset(s->vlc, 0, 3*sizeof(VLC));
388 avctx->coded_frame= &s->picture;
389 s->interlaced= s->height > 288;
392 //if(avctx->extradata)
393 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
394 if(avctx->extradata_size){
395 if((avctx->bits_per_sample&7) && avctx->bits_per_sample != 12)
396 s->version=1; // do such files exist at all?
403 int method, interlace;
405 method= ((uint8_t*)avctx->extradata)[0];
406 s->decorrelate= method&64 ? 1 : 0;
407 s->predictor= method&63;
408 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
409 if(s->bitstream_bpp==0)
410 s->bitstream_bpp= avctx->bits_per_sample&~7;
411 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
412 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
413 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
415 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
418 switch(avctx->bits_per_sample&7){
429 s->decorrelate= avctx->bits_per_sample >= 24;
432 s->predictor= MEDIAN;
436 s->predictor= LEFT; //OLD
440 s->bitstream_bpp= avctx->bits_per_sample & ~7;
443 if(read_old_huffman_tables(s) < 0)
447 switch(s->bitstream_bpp){
449 avctx->pix_fmt = PIX_FMT_YUV420P;
453 avctx->pix_fmt = PIX_FMT_YUV422;
455 avctx->pix_fmt = PIX_FMT_YUV422P;
461 avctx->pix_fmt = PIX_FMT_RGBA32;
463 avctx->pix_fmt = PIX_FMT_BGR24;
472 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
478 #ifdef CONFIG_ENCODERS
479 static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
487 for(; i<256 && len[i]==val && repeat<255; i++)
490 assert(val < 32 && val >0 && repeat<256 && repeat>0);
493 buf[index++]= repeat;
495 buf[index++]= val | (repeat<<5);
502 static int encode_init(AVCodecContext *avctx)
504 HYuvContext *s = avctx->priv_data;
509 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
510 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
513 avctx->coded_frame= &s->picture;
515 switch(avctx->pix_fmt){
516 case PIX_FMT_YUV420P:
517 s->bitstream_bpp= 12;
519 case PIX_FMT_YUV422P:
520 s->bitstream_bpp= 16;
523 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
526 avctx->bits_per_sample= s->bitstream_bpp;
527 s->decorrelate= s->bitstream_bpp >= 24;
528 s->predictor= avctx->prediction_method;
529 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
530 if(avctx->context_model==1){
531 s->context= avctx->context_model;
532 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
533 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
538 if(avctx->codec->id==CODEC_ID_HUFFYUV){
539 if(avctx->pix_fmt==PIX_FMT_YUV420P){
540 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
543 if(avctx->context_model){
544 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
547 if(s->interlaced != ( s->height > 288 ))
548 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
551 ((uint8_t*)avctx->extradata)[0]= s->predictor;
552 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
553 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
555 ((uint8_t*)avctx->extradata)[2]|= 0x40;
556 ((uint8_t*)avctx->extradata)[3]= 0;
557 s->avctx->extradata_size= 4;
560 char *p= avctx->stats_in;
570 for(j=0; j<256; j++){
571 s->stats[i][j]+= strtol(p, &next, 0);
572 if(next==p) return -1;
576 if(p[0]==0 || p[1]==0 || p[2]==0) break;
580 for(j=0; j<256; j++){
581 int d= FFMIN(j, 256-j);
583 s->stats[i][j]= 100000000/(d+1);
588 generate_len_table(s->len[i], s->stats[i], 256);
590 if(generate_bits_table(s->bits[i], s->len[i])<0){
594 s->avctx->extradata_size+=
595 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
600 int pels = s->width*s->height / (i?40:10);
601 for(j=0; j<256; j++){
602 int d= FFMIN(j, 256-j);
603 s->stats[i][j]= pels/(d+1);
612 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
620 #endif /* CONFIG_ENCODERS */
622 static void decode_422_bitstream(HYuvContext *s, int count){
627 for(i=0; i<count; i++){
628 s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
629 s->temp[1][ i ]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
630 s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
631 s->temp[2][ i ]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
635 static void decode_gray_bitstream(HYuvContext *s, int count){
640 for(i=0; i<count; i++){
641 s->temp[0][2*i ]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
642 s->temp[0][2*i+1]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
646 #ifdef CONFIG_ENCODERS
647 static int encode_422_bitstream(HYuvContext *s, int count){
650 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
651 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
656 if(s->flags&CODEC_FLAG_PASS1){
657 for(i=0; i<count; i++){
658 s->stats[0][ s->temp[0][2*i ] ]++;
659 s->stats[1][ s->temp[1][ i ] ]++;
660 s->stats[0][ s->temp[0][2*i+1] ]++;
661 s->stats[2][ s->temp[2][ i ] ]++;
664 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
667 for(i=0; i<count; i++){
668 s->stats[0][ s->temp[0][2*i ] ]++;
669 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
670 s->stats[1][ s->temp[1][ i ] ]++;
671 put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
672 s->stats[0][ s->temp[0][2*i+1] ]++;
673 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
674 s->stats[2][ s->temp[2][ i ] ]++;
675 put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
678 for(i=0; i<count; i++){
679 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
680 put_bits(&s->pb, s->len[1][ s->temp[1][ i ] ], s->bits[1][ s->temp[1][ i ] ]);
681 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
682 put_bits(&s->pb, s->len[2][ s->temp[2][ i ] ], s->bits[2][ s->temp[2][ i ] ]);
688 static int encode_gray_bitstream(HYuvContext *s, int count){
691 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
692 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
697 if(s->flags&CODEC_FLAG_PASS1){
698 for(i=0; i<count; i++){
699 s->stats[0][ s->temp[0][2*i ] ]++;
700 s->stats[0][ s->temp[0][2*i+1] ]++;
703 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
707 for(i=0; i<count; i++){
708 s->stats[0][ s->temp[0][2*i ] ]++;
709 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
710 s->stats[0][ s->temp[0][2*i+1] ]++;
711 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
714 for(i=0; i<count; i++){
715 put_bits(&s->pb, s->len[0][ s->temp[0][2*i ] ], s->bits[0][ s->temp[0][2*i ] ]);
716 put_bits(&s->pb, s->len[0][ s->temp[0][2*i+1] ], s->bits[0][ s->temp[0][2*i+1] ]);
721 #endif /* CONFIG_ENCODERS */
723 static void decode_bgr_bitstream(HYuvContext *s, int count){
727 if(s->bitstream_bpp==24){
728 for(i=0; i<count; i++){
729 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
730 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
731 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
734 for(i=0; i<count; i++){
735 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
736 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
737 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
738 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
742 if(s->bitstream_bpp==24){
743 for(i=0; i<count; i++){
744 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
745 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
746 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
749 for(i=0; i<count; i++){
750 s->temp[0][4*i+B]= get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
751 s->temp[0][4*i+G]= get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
752 s->temp[0][4*i+R]= get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
753 get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); //?!
759 #ifdef CONFIG_DECODERS
760 static void draw_slice(HYuvContext *s, int y){
764 if(s->avctx->draw_horiz_band==NULL)
767 h= y - s->last_slice_end;
770 if(s->bitstream_bpp==12){
776 offset[0] = s->picture.linesize[0]*y;
777 offset[1] = s->picture.linesize[1]*cy;
778 offset[2] = s->picture.linesize[2]*cy;
782 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
784 s->last_slice_end= y + h;
787 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size){
788 HYuvContext *s = avctx->priv_data;
789 const int width= s->width;
790 const int width2= s->width>>1;
791 const int height= s->height;
792 int fake_ystride, fake_ustride, fake_vstride;
793 AVFrame * const p= &s->picture;
796 AVFrame *picture = data;
798 s->bitstream_buffer= av_fast_realloc(s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
800 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (uint32_t*)buf, buf_size/4);
803 avctx->release_buffer(avctx, p);
806 if(avctx->get_buffer(avctx, p) < 0){
807 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
812 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
817 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
820 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
822 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
823 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
824 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
826 s->last_slice_end= 0;
828 if(s->bitstream_bpp<24){
830 int lefty, leftu, leftv;
831 int lefttopy, lefttopu, lefttopv;
834 p->data[0][3]= get_bits(&s->gb, 8);
835 p->data[0][2]= get_bits(&s->gb, 8);
836 p->data[0][1]= get_bits(&s->gb, 8);
837 p->data[0][0]= get_bits(&s->gb, 8);
839 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
843 leftv= p->data[2][0]= get_bits(&s->gb, 8);
844 lefty= p->data[0][1]= get_bits(&s->gb, 8);
845 leftu= p->data[1][0]= get_bits(&s->gb, 8);
846 p->data[0][0]= get_bits(&s->gb, 8);
848 switch(s->predictor){
851 decode_422_bitstream(s, width-2);
852 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
853 if(!(s->flags&CODEC_FLAG_GRAY)){
854 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
855 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
858 for(cy=y=1; y<s->height; y++,cy++){
859 uint8_t *ydst, *udst, *vdst;
861 if(s->bitstream_bpp==12){
862 decode_gray_bitstream(s, width);
864 ydst= p->data[0] + p->linesize[0]*y;
866 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
867 if(s->predictor == PLANE){
869 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
872 if(y>=s->height) break;
877 ydst= p->data[0] + p->linesize[0]*y;
878 udst= p->data[1] + p->linesize[1]*cy;
879 vdst= p->data[2] + p->linesize[2]*cy;
881 decode_422_bitstream(s, width);
882 lefty= add_left_prediction(ydst, s->temp[0], width, lefty);
883 if(!(s->flags&CODEC_FLAG_GRAY)){
884 leftu= add_left_prediction(udst, s->temp[1], width2, leftu);
885 leftv= add_left_prediction(vdst, s->temp[2], width2, leftv);
887 if(s->predictor == PLANE){
888 if(cy>s->interlaced){
889 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
890 if(!(s->flags&CODEC_FLAG_GRAY)){
891 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
892 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
897 draw_slice(s, height);
901 /* first line except first 2 pixels is left predicted */
902 decode_422_bitstream(s, width-2);
903 lefty= add_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
904 if(!(s->flags&CODEC_FLAG_GRAY)){
905 leftu= add_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
906 leftv= add_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
911 /* second line is left predicted for interlaced case */
913 decode_422_bitstream(s, width);
914 lefty= add_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
915 if(!(s->flags&CODEC_FLAG_GRAY)){
916 leftu= add_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
917 leftv= add_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
922 /* next 4 pixels are left predicted too */
923 decode_422_bitstream(s, 4);
924 lefty= add_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
925 if(!(s->flags&CODEC_FLAG_GRAY)){
926 leftu= add_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
927 leftv= add_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
930 /* next line except the first 4 pixels is median predicted */
931 lefttopy= p->data[0][3];
932 decode_422_bitstream(s, width-4);
933 add_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
934 if(!(s->flags&CODEC_FLAG_GRAY)){
935 lefttopu= p->data[1][1];
936 lefttopv= p->data[2][1];
937 add_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
938 add_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
942 for(; y<height; y++,cy++){
943 uint8_t *ydst, *udst, *vdst;
945 if(s->bitstream_bpp==12){
947 decode_gray_bitstream(s, width);
948 ydst= p->data[0] + p->linesize[0]*y;
949 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
956 decode_422_bitstream(s, width);
958 ydst= p->data[0] + p->linesize[0]*y;
959 udst= p->data[1] + p->linesize[1]*cy;
960 vdst= p->data[2] + p->linesize[2]*cy;
962 add_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
963 if(!(s->flags&CODEC_FLAG_GRAY)){
964 add_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
965 add_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
969 draw_slice(s, height);
975 int leftr, leftg, leftb;
976 const int last_line= (height-1)*p->linesize[0];
978 if(s->bitstream_bpp==32){
979 skip_bits(&s->gb, 8);
980 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
981 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
982 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
984 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
985 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
986 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
987 skip_bits(&s->gb, 8);
991 switch(s->predictor){
994 decode_bgr_bitstream(s, width-1);
995 add_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb);
997 for(y=s->height-2; y>=0; y--){ //yes its stored upside down
998 decode_bgr_bitstream(s, width);
1000 add_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb);
1001 if(s->predictor == PLANE){
1002 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1003 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1004 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1008 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1011 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1015 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1022 *data_size = sizeof(AVFrame);
1024 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1028 static int common_end(HYuvContext *s){
1032 av_freep(&s->temp[i]);
1037 #ifdef CONFIG_DECODERS
1038 static int decode_end(AVCodecContext *avctx)
1040 HYuvContext *s = avctx->priv_data;
1044 av_freep(&s->bitstream_buffer);
1047 free_vlc(&s->vlc[i]);
1054 #ifdef CONFIG_ENCODERS
1055 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1056 HYuvContext *s = avctx->priv_data;
1057 AVFrame *pict = data;
1058 const int width= s->width;
1059 const int width2= s->width>>1;
1060 const int height= s->height;
1061 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1062 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1063 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1064 AVFrame * const p= &s->picture;
1068 p->pict_type= FF_I_TYPE;
1073 generate_len_table(s->len[i], s->stats[i], 256);
1074 if(generate_bits_table(s->bits[i], s->len[i])<0)
1076 size+= store_table(s, s->len[i], &buf[size]);
1080 for(j=0; j<256; j++)
1081 s->stats[i][j] >>= 1;
1084 init_put_bits(&s->pb, buf+size, buf_size-size);
1086 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1087 int lefty, leftu, leftv, y, cy;
1089 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1090 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1091 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1092 put_bits(&s->pb, 8, p->data[0][0]);
1094 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+2, width-2 , lefty);
1095 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+1, width2-1, leftu);
1096 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+1, width2-1, leftv);
1098 encode_422_bitstream(s, width-2);
1100 if(s->predictor==MEDIAN){
1101 int lefttopy, lefttopu, lefttopv;
1104 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1105 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1106 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1108 encode_422_bitstream(s, width);
1112 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1113 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1114 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1116 encode_422_bitstream(s, 4);
1118 lefttopy= p->data[0][3];
1119 lefttopu= p->data[1][1];
1120 lefttopv= p->data[2][1];
1121 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1122 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1123 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1124 encode_422_bitstream(s, width-4);
1127 for(; y<height; y++,cy++){
1128 uint8_t *ydst, *udst, *vdst;
1130 if(s->bitstream_bpp==12){
1132 ydst= p->data[0] + p->linesize[0]*y;
1133 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1134 encode_gray_bitstream(s, width);
1137 if(y>=height) break;
1139 ydst= p->data[0] + p->linesize[0]*y;
1140 udst= p->data[1] + p->linesize[1]*cy;
1141 vdst= p->data[2] + p->linesize[2]*cy;
1143 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1144 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1145 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1147 encode_422_bitstream(s, width);
1150 for(cy=y=1; y<height; y++,cy++){
1151 uint8_t *ydst, *udst, *vdst;
1153 /* encode a luma only line & y++ */
1154 if(s->bitstream_bpp==12){
1155 ydst= p->data[0] + p->linesize[0]*y;
1157 if(s->predictor == PLANE && s->interlaced < y){
1158 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1160 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1162 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1164 encode_gray_bitstream(s, width);
1166 if(y>=height) break;
1169 ydst= p->data[0] + p->linesize[0]*y;
1170 udst= p->data[1] + p->linesize[1]*cy;
1171 vdst= p->data[2] + p->linesize[2]*cy;
1173 if(s->predictor == PLANE && s->interlaced < cy){
1174 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1175 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1176 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1178 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1179 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1180 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1182 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1183 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1184 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1187 encode_422_bitstream(s, width);
1191 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1195 size+= (put_bits_count(&s->pb)+31)/8;
1198 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1200 char *p= avctx->stats_out;
1201 char *end= p + 1024*30;
1203 for(j=0; j<256; j++){
1204 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1208 snprintf(p, end-p, "\n");
1212 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1213 flush_put_bits(&s->pb);
1214 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1215 avctx->stats_out[0] = '\0';
1218 s->picture_number++;
1223 static int encode_end(AVCodecContext *avctx)
1225 HYuvContext *s = avctx->priv_data;
1229 av_freep(&avctx->extradata);
1230 av_freep(&avctx->stats_out);
1234 #endif /* CONFIG_ENCODERS */
1236 #ifdef CONFIG_DECODERS
1237 AVCodec huffyuv_decoder = {
1241 sizeof(HYuvContext),
1246 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1250 AVCodec ffvhuff_decoder = {
1254 sizeof(HYuvContext),
1259 CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
1264 #ifdef CONFIG_ENCODERS
1266 AVCodec huffyuv_encoder = {
1270 sizeof(HYuvContext),
1274 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, -1},
1277 AVCodec ffvhuff_encoder = {
1281 sizeof(HYuvContext),
1285 .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, -1},
1288 #endif //CONFIG_ENCODERS