2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
51 typedef enum Predictor{
57 typedef struct HYuvContext{
58 AVCodecContext *avctx;
66 int yuy2; //use yuy2 instead of 422P
67 int bgr32; //use bgr32 instead of bgr24
74 uint64_t stats[3][256];
76 uint32_t bits[3][256];
77 uint32_t pix_bgr_map[1<<VLC_BITS];
78 VLC vlc[6]; //Y,U,V,YY,YU,YV
80 uint8_t *bitstream_buffer;
81 unsigned int bitstream_buffer_size;
85 #define classic_shift_luma_table_size 42
86 static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
87 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
88 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
93 #define classic_shift_chroma_table_size 59
94 static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
95 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
96 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
97 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0,
101 static const unsigned char classic_add_luma[256] = {
102 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
103 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
104 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
105 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
106 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
107 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
108 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
109 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
110 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
111 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
112 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
113 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
114 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
115 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
116 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
117 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
120 static const unsigned char classic_add_chroma[256] = {
121 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
122 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
123 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
124 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
125 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
126 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
127 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
128 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
129 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
130 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
131 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
132 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
133 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
134 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
135 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
136 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
139 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int left){
143 const int temp= src[i];
150 const int temp= src[i];
154 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
159 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue, int *alpha){
166 for(i=0; i<FFMIN(w,4); i++){
167 const int rt= src[i*4+R];
168 const int gt= src[i*4+G];
169 const int bt= src[i*4+B];
170 const int at= src[i*4+A];
180 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
181 *red= src[(w-1)*4+R];
182 *green= src[(w-1)*4+G];
183 *blue= src[(w-1)*4+B];
184 *alpha= src[(w-1)*4+A];
187 static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue){
193 for(i=0; i<FFMIN(w,16); i++){
194 const int rt= src[i*3+0];
195 const int gt= src[i*3+1];
196 const int bt= src[i*3+2];
204 s->dsp.diff_bytes(dst+48, src+48, src+48-3, w*3-48);
205 *red= src[(w-1)*3+0];
206 *green= src[(w-1)*3+1];
207 *blue= src[(w-1)*3+2];
210 static int read_len_table(uint8_t *dst, GetBitContext *gb){
214 repeat= get_bits(gb, 3);
215 val = get_bits(gb, 5);
217 repeat= get_bits(gb, 8);
218 //printf("%d %d\n", val, repeat);
219 if(i+repeat > 256 || get_bits_left(gb) < 0) {
220 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
229 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
233 for(len=32; len>0; len--){
234 for(index=0; index<256; index++){
235 if(len_table[index]==len)
239 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
247 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
253 static void heap_sift(HeapElem *h, int root, int size)
255 while(root*2+1 < size) {
256 int child = root*2+1;
257 if(child < size-1 && h[child].val > h[child+1].val)
259 if(h[root].val > h[child].val) {
260 FFSWAP(HeapElem, h[root], h[child]);
267 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
274 for(offset=1; ; offset<<=1){
275 for(i=0; i<size; i++){
277 h[i].val = (stats[i] << 8) + offset;
279 for(i=size/2-1; i>=0; i--)
280 heap_sift(h, i, size);
282 for(next=size; next<size*2-1; next++){
283 // merge the two smallest entries, and put it back in the heap
284 uint64_t min1v = h[0].val;
285 up[h[0].name] = next;
286 h[0].val = INT64_MAX;
287 heap_sift(h, 0, size);
288 up[h[0].name] = next;
291 heap_sift(h, 0, size);
295 for(i=2*size-3; i>=size; i--)
296 len[i] = len[up[i]] + 1;
297 for(i=0; i<size; i++) {
298 dst[i] = len[up[i]] + 1;
299 if(dst[i] >= 32) break;
304 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
306 static void generate_joint_tables(HYuvContext *s){
307 uint16_t symbols[1<<VLC_BITS];
308 uint16_t bits[1<<VLC_BITS];
309 uint8_t len[1<<VLC_BITS];
310 if(s->bitstream_bpp < 24){
313 for(i=y=0; y<256; y++){
314 int len0 = s->len[0][y];
315 int limit = VLC_BITS - len0;
318 for(u=0; u<256; u++){
319 int len1 = s->len[p][u];
322 len[i] = len0 + len1;
323 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
324 symbols[i] = (y<<8) + u;
325 if(symbols[i] != 0xffff) // reserved to mean "invalid"
329 ff_free_vlc(&s->vlc[3+p]);
330 ff_init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
333 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
334 int i, b, g, r, code;
335 int p0 = s->decorrelate;
336 int p1 = !s->decorrelate;
337 // restrict the range to +/-16 becaues that's pretty much guaranteed to
338 // cover all the combinations that fit in 11 bits total, and it doesn't
339 // matter if we miss a few rare codes.
340 for(i=0, g=-16; g<16; g++){
341 int len0 = s->len[p0][g&255];
342 int limit0 = VLC_BITS - len0;
345 for(b=-16; b<16; b++){
346 int len1 = s->len[p1][b&255];
347 int limit1 = limit0 - len1;
350 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
351 for(r=-16; r<16; r++){
352 int len2 = s->len[2][r&255];
355 len[i] = len0 + len1 + len2;
356 bits[i] = (code << len2) + s->bits[2][r&255];
370 ff_free_vlc(&s->vlc[3]);
371 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
375 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
379 init_get_bits(&gb, src, length*8);
382 if(read_len_table(s->len[i], &gb)<0)
384 if(generate_bits_table(s->bits[i], s->len[i])<0){
387 ff_free_vlc(&s->vlc[i]);
388 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
391 generate_joint_tables(s);
393 return (get_bits_count(&gb)+7)/8;
396 static int read_old_huffman_tables(HYuvContext *s){
401 init_get_bits(&gb, classic_shift_luma, classic_shift_luma_table_size*8);
402 if(read_len_table(s->len[0], &gb)<0)
404 init_get_bits(&gb, classic_shift_chroma, classic_shift_chroma_table_size*8);
405 if(read_len_table(s->len[1], &gb)<0)
408 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
409 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
411 if(s->bitstream_bpp >= 24){
412 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
413 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
415 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
416 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
419 ff_free_vlc(&s->vlc[i]);
420 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
423 generate_joint_tables(s);
427 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
432 static av_cold void alloc_temp(HYuvContext *s){
435 if(s->bitstream_bpp<24){
437 s->temp[i]= av_malloc(s->width + 16);
440 s->temp[0]= av_mallocz(4*s->width + 16);
444 static av_cold int common_init(AVCodecContext *avctx){
445 HYuvContext *s = avctx->priv_data;
448 s->flags= avctx->flags;
450 ff_dsputil_init(&s->dsp, avctx);
452 s->width= avctx->width;
453 s->height= avctx->height;
454 assert(s->width>0 && s->height>0);
459 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
460 static av_cold int decode_init(AVCodecContext *avctx)
462 HYuvContext *s = avctx->priv_data;
465 memset(s->vlc, 0, 3*sizeof(VLC));
467 avctx->coded_frame= &s->picture;
468 avcodec_get_frame_defaults(&s->picture);
469 s->interlaced= s->height > 288;
472 //if(avctx->extradata)
473 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
474 if(avctx->extradata_size){
475 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
476 s->version=1; // do such files exist at all?
483 int method, interlace;
485 if (avctx->extradata_size < 4)
488 method= ((uint8_t*)avctx->extradata)[0];
489 s->decorrelate= method&64 ? 1 : 0;
490 s->predictor= method&63;
491 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
492 if(s->bitstream_bpp==0)
493 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
494 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
495 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
496 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
498 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
501 switch(avctx->bits_per_coded_sample&7){
512 s->decorrelate= avctx->bits_per_coded_sample >= 24;
515 s->predictor= MEDIAN;
519 s->predictor= LEFT; //OLD
523 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
526 if(read_old_huffman_tables(s) < 0)
530 switch(s->bitstream_bpp){
532 avctx->pix_fmt = PIX_FMT_YUV420P;
536 avctx->pix_fmt = PIX_FMT_YUYV422;
538 avctx->pix_fmt = PIX_FMT_YUV422P;
544 avctx->pix_fmt = PIX_FMT_RGB32;
546 avctx->pix_fmt = PIX_FMT_BGR24;
550 return AVERROR_INVALIDDATA;
555 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
560 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
562 HYuvContext *s = avctx->priv_data;
565 avctx->coded_frame= &s->picture;
568 for (i = 0; i < 6; i++)
569 s->vlc[i].table = NULL;
572 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
575 if(read_old_huffman_tables(s) < 0)
581 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
583 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
584 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
592 for(; i<256 && len[i]==val && repeat<255; i++)
595 assert(val < 32 && val >0 && repeat<256 && repeat>0);
598 buf[index++]= repeat;
600 buf[index++]= val | (repeat<<5);
607 static av_cold int encode_init(AVCodecContext *avctx)
609 HYuvContext *s = avctx->priv_data;
614 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
615 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
618 avctx->coded_frame= &s->picture;
620 switch(avctx->pix_fmt){
621 case PIX_FMT_YUV420P:
622 s->bitstream_bpp= 12;
624 case PIX_FMT_YUV422P:
625 s->bitstream_bpp= 16;
628 s->bitstream_bpp= 32;
631 s->bitstream_bpp= 24;
634 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
637 avctx->bits_per_coded_sample= s->bitstream_bpp;
638 s->decorrelate= s->bitstream_bpp >= 24;
639 s->predictor= avctx->prediction_method;
640 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
641 if(avctx->context_model==1){
642 s->context= avctx->context_model;
643 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
644 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
649 if(avctx->codec->id==CODEC_ID_HUFFYUV){
650 if(avctx->pix_fmt==PIX_FMT_YUV420P){
651 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
654 if(avctx->context_model){
655 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
658 if(s->interlaced != ( s->height > 288 ))
659 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
662 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
663 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
667 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
668 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
669 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
671 ((uint8_t*)avctx->extradata)[2]|= 0x40;
672 ((uint8_t*)avctx->extradata)[3]= 0;
673 s->avctx->extradata_size= 4;
676 char *p= avctx->stats_in;
686 for(j=0; j<256; j++){
687 s->stats[i][j]+= strtol(p, &next, 0);
688 if(next==p) return -1;
692 if(p[0]==0 || p[1]==0 || p[2]==0) break;
696 for(j=0; j<256; j++){
697 int d= FFMIN(j, 256-j);
699 s->stats[i][j]= 100000000/(d+1);
704 generate_len_table(s->len[i], s->stats[i]);
706 if(generate_bits_table(s->bits[i], s->len[i])<0){
710 s->avctx->extradata_size+=
711 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
716 int pels = s->width*s->height / (i?40:10);
717 for(j=0; j<256; j++){
718 int d= FFMIN(j, 256-j);
719 s->stats[i][j]= pels/(d+1);
728 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
736 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
738 /* TODO instead of restarting the read when the code isn't in the first level
739 * of the joint table, jump into the 2nd level of the individual table. */
740 #define READ_2PIX(dst0, dst1, plane1){\
741 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
746 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
747 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
751 static void decode_422_bitstream(HYuvContext *s, int count){
756 if(count >= (get_bits_left(&s->gb))/(31*4)){
757 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
758 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
759 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
762 for(i=0; i<count; i++){
763 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
764 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
769 static void decode_gray_bitstream(HYuvContext *s, int count){
774 if(count >= (get_bits_left(&s->gb))/(31*2)){
775 for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
776 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
779 for(i=0; i<count; i++){
780 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
785 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
786 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
788 const uint8_t *y = s->temp[0] + offset;
789 const uint8_t *u = s->temp[1] + offset/2;
790 const uint8_t *v = s->temp[2] + offset/2;
792 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
793 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
804 if(s->flags&CODEC_FLAG_PASS1){
805 for(i=0; i<count; i++){
813 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
816 for(i=0; i<count; i++){
819 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
821 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
823 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
825 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
828 for(i=0; i<count; i++){
830 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
831 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
832 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
833 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
839 static int encode_gray_bitstream(HYuvContext *s, int count){
842 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
843 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
848 int y0 = s->temp[0][2*i];\
849 int y1 = s->temp[0][2*i+1];
854 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
855 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
858 if(s->flags&CODEC_FLAG_PASS1){
859 for(i=0; i<count; i++){
864 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
868 for(i=0; i<count; i++){
874 for(i=0; i<count; i++){
881 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
883 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
885 for(i=0; i<count; i++){
886 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
888 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
889 }else if(decorrelate){
890 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
891 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
892 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
894 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
895 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
896 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
899 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
903 static void decode_bgr_bitstream(HYuvContext *s, int count){
905 if(s->bitstream_bpp==24)
906 decode_bgr_1(s, count, 1, 0);
908 decode_bgr_1(s, count, 1, 1);
910 if(s->bitstream_bpp==24)
911 decode_bgr_1(s, count, 0, 0);
913 decode_bgr_1(s, count, 0, 1);
917 static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes){
920 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count){
921 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
926 int g= s->temp[0][planes==3 ? 3*i+1 : 4*i+G];\
927 int b= (s->temp[0][planes==3 ? 3*i+2 : 4*i+B] - g) & 0xff;\
928 int r= (s->temp[0][planes==3 ? 3*i+0 : 4*i+R] - g) & 0xff;\
929 int a= s->temp[0][planes*i+A];
934 if(planes==4) s->stats[2][a]++;
936 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
937 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
938 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
939 if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
941 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
942 for(i=0; i<count; i++){
946 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
947 for(i=0; i<count; i++){
953 for(i=0; i<count; i++){
961 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
962 static void draw_slice(HYuvContext *s, int y){
964 int offset[AV_NUM_DATA_POINTERS];
966 if(s->avctx->draw_horiz_band==NULL)
969 h= y - s->last_slice_end;
972 if(s->bitstream_bpp==12){
978 offset[0] = s->picture.linesize[0]*y;
979 offset[1] = s->picture.linesize[1]*cy;
980 offset[2] = s->picture.linesize[2]*cy;
981 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
985 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
987 s->last_slice_end= y + h;
990 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
991 const uint8_t *buf = avpkt->data;
992 int buf_size = avpkt->size;
993 HYuvContext *s = avctx->priv_data;
994 const int width= s->width;
995 const int width2= s->width>>1;
996 const int height= s->height;
997 int fake_ystride, fake_ustride, fake_vstride;
998 AVFrame * const p= &s->picture;
1001 AVFrame *picture = data;
1003 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
1004 if (!s->bitstream_buffer)
1005 return AVERROR(ENOMEM);
1007 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1008 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
1011 ff_thread_release_buffer(avctx, p);
1014 if(ff_thread_get_buffer(avctx, p) < 0){
1015 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1020 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1025 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
1028 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
1030 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
1031 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
1032 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
1034 s->last_slice_end= 0;
1036 if(s->bitstream_bpp<24){
1038 int lefty, leftu, leftv;
1039 int lefttopy, lefttopu, lefttopv;
1042 p->data[0][3]= get_bits(&s->gb, 8);
1043 p->data[0][2]= get_bits(&s->gb, 8);
1044 p->data[0][1]= get_bits(&s->gb, 8);
1045 p->data[0][0]= get_bits(&s->gb, 8);
1047 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1051 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1052 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1053 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1054 p->data[0][0]= get_bits(&s->gb, 8);
1056 switch(s->predictor){
1059 decode_422_bitstream(s, width-2);
1060 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1061 if(!(s->flags&CODEC_FLAG_GRAY)){
1062 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1063 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1066 for(cy=y=1; y<s->height; y++,cy++){
1067 uint8_t *ydst, *udst, *vdst;
1069 if(s->bitstream_bpp==12){
1070 decode_gray_bitstream(s, width);
1072 ydst= p->data[0] + p->linesize[0]*y;
1074 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1075 if(s->predictor == PLANE){
1077 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1080 if(y>=s->height) break;
1085 ydst= p->data[0] + p->linesize[0]*y;
1086 udst= p->data[1] + p->linesize[1]*cy;
1087 vdst= p->data[2] + p->linesize[2]*cy;
1089 decode_422_bitstream(s, width);
1090 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1091 if(!(s->flags&CODEC_FLAG_GRAY)){
1092 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1093 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1095 if(s->predictor == PLANE){
1096 if(cy>s->interlaced){
1097 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1098 if(!(s->flags&CODEC_FLAG_GRAY)){
1099 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1100 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1105 draw_slice(s, height);
1109 /* first line except first 2 pixels is left predicted */
1110 decode_422_bitstream(s, width-2);
1111 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1112 if(!(s->flags&CODEC_FLAG_GRAY)){
1113 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1114 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1119 /* second line is left predicted for interlaced case */
1121 decode_422_bitstream(s, width);
1122 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1123 if(!(s->flags&CODEC_FLAG_GRAY)){
1124 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1125 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1130 /* next 4 pixels are left predicted too */
1131 decode_422_bitstream(s, 4);
1132 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1133 if(!(s->flags&CODEC_FLAG_GRAY)){
1134 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1135 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1138 /* next line except the first 4 pixels is median predicted */
1139 lefttopy= p->data[0][3];
1140 decode_422_bitstream(s, width-4);
1141 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1142 if(!(s->flags&CODEC_FLAG_GRAY)){
1143 lefttopu= p->data[1][1];
1144 lefttopv= p->data[2][1];
1145 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1146 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1150 for(; y<height; y++,cy++){
1151 uint8_t *ydst, *udst, *vdst;
1153 if(s->bitstream_bpp==12){
1155 decode_gray_bitstream(s, width);
1156 ydst= p->data[0] + p->linesize[0]*y;
1157 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1160 if(y>=height) break;
1164 decode_422_bitstream(s, width);
1166 ydst= p->data[0] + p->linesize[0]*y;
1167 udst= p->data[1] + p->linesize[1]*cy;
1168 vdst= p->data[2] + p->linesize[2]*cy;
1170 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1171 if(!(s->flags&CODEC_FLAG_GRAY)){
1172 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1173 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1177 draw_slice(s, height);
1183 int leftr, leftg, leftb, lefta;
1184 const int last_line= (height-1)*p->linesize[0];
1186 if(s->bitstream_bpp==32){
1187 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1188 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1189 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1190 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1192 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1193 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1194 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1195 lefta= p->data[0][last_line+A]= 255;
1196 skip_bits(&s->gb, 8);
1200 switch(s->predictor){
1203 decode_bgr_bitstream(s, width-1);
1204 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1206 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1207 decode_bgr_bitstream(s, width);
1209 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1210 if(s->predictor == PLANE){
1211 if(s->bitstream_bpp!=32) lefta=0;
1212 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1213 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1214 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1218 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1221 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1225 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1232 *data_size = sizeof(AVFrame);
1234 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1236 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1238 static int common_end(HYuvContext *s){
1242 av_freep(&s->temp[i]);
1247 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1248 static av_cold int decode_end(AVCodecContext *avctx)
1250 HYuvContext *s = avctx->priv_data;
1253 if (s->picture.data[0])
1254 avctx->release_buffer(avctx, &s->picture);
1257 av_freep(&s->bitstream_buffer);
1260 ff_free_vlc(&s->vlc[i]);
1265 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1267 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1268 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1269 const AVFrame *pict, int *got_packet)
1271 HYuvContext *s = avctx->priv_data;
1272 const int width= s->width;
1273 const int width2= s->width>>1;
1274 const int height= s->height;
1275 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1276 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1277 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1278 AVFrame * const p= &s->picture;
1279 int i, j, size = 0, ret;
1282 (ret = av_new_packet(pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
1283 av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
1288 p->pict_type= AV_PICTURE_TYPE_I;
1293 generate_len_table(s->len[i], s->stats[i]);
1294 if(generate_bits_table(s->bits[i], s->len[i])<0)
1296 size += store_table(s, s->len[i], &pkt->data[size]);
1300 for(j=0; j<256; j++)
1301 s->stats[i][j] >>= 1;
1304 init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
1306 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1307 int lefty, leftu, leftv, y, cy;
1309 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1310 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1311 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1312 put_bits(&s->pb, 8, p->data[0][0]);
1314 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1315 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1316 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1318 encode_422_bitstream(s, 2, width-2);
1320 if(s->predictor==MEDIAN){
1321 int lefttopy, lefttopu, lefttopv;
1324 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1325 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1326 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1328 encode_422_bitstream(s, 0, width);
1332 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1333 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1334 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1336 encode_422_bitstream(s, 0, 4);
1338 lefttopy= p->data[0][3];
1339 lefttopu= p->data[1][1];
1340 lefttopv= p->data[2][1];
1341 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1342 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1343 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1344 encode_422_bitstream(s, 0, width-4);
1347 for(; y<height; y++,cy++){
1348 uint8_t *ydst, *udst, *vdst;
1350 if(s->bitstream_bpp==12){
1352 ydst= p->data[0] + p->linesize[0]*y;
1353 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1354 encode_gray_bitstream(s, width);
1357 if(y>=height) break;
1359 ydst= p->data[0] + p->linesize[0]*y;
1360 udst= p->data[1] + p->linesize[1]*cy;
1361 vdst= p->data[2] + p->linesize[2]*cy;
1363 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1364 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1365 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1367 encode_422_bitstream(s, 0, width);
1370 for(cy=y=1; y<height; y++,cy++){
1371 uint8_t *ydst, *udst, *vdst;
1373 /* encode a luma only line & y++ */
1374 if(s->bitstream_bpp==12){
1375 ydst= p->data[0] + p->linesize[0]*y;
1377 if(s->predictor == PLANE && s->interlaced < y){
1378 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1380 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1382 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1384 encode_gray_bitstream(s, width);
1386 if(y>=height) break;
1389 ydst= p->data[0] + p->linesize[0]*y;
1390 udst= p->data[1] + p->linesize[1]*cy;
1391 vdst= p->data[2] + p->linesize[2]*cy;
1393 if(s->predictor == PLANE && s->interlaced < cy){
1394 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1395 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1396 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1398 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1399 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1400 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1402 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1403 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1404 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1407 encode_422_bitstream(s, 0, width);
1410 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1411 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1412 const int stride = -p->linesize[0];
1413 const int fake_stride = -fake_ystride;
1415 int leftr, leftg, leftb, lefta;
1417 put_bits(&s->pb, 8, lefta= data[A]);
1418 put_bits(&s->pb, 8, leftr= data[R]);
1419 put_bits(&s->pb, 8, leftg= data[G]);
1420 put_bits(&s->pb, 8, leftb= data[B]);
1422 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb, &lefta);
1423 encode_bgra_bitstream(s, width-1, 4);
1425 for(y=1; y<s->height; y++){
1426 uint8_t *dst = data + y*stride;
1427 if(s->predictor == PLANE && s->interlaced < y){
1428 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1429 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
1431 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
1433 encode_bgra_bitstream(s, width, 4);
1435 }else if(avctx->pix_fmt == PIX_FMT_RGB24){
1436 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1437 const int stride = -p->linesize[0];
1438 const int fake_stride = -fake_ystride;
1440 int leftr, leftg, leftb;
1442 put_bits(&s->pb, 8, leftr= data[0]);
1443 put_bits(&s->pb, 8, leftg= data[1]);
1444 put_bits(&s->pb, 8, leftb= data[2]);
1445 put_bits(&s->pb, 8, 0);
1447 sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
1448 encode_bgra_bitstream(s, width-1, 3);
1450 for(y=1; y<s->height; y++){
1451 uint8_t *dst = data + y*stride;
1452 if(s->predictor == PLANE && s->interlaced < y){
1453 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
1454 sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1456 sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1458 encode_bgra_bitstream(s, width, 3);
1461 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1465 size+= (put_bits_count(&s->pb)+31)/8;
1466 put_bits(&s->pb, 16, 0);
1467 put_bits(&s->pb, 15, 0);
1470 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1472 char *p= avctx->stats_out;
1473 char *end= p + 1024*30;
1475 for(j=0; j<256; j++){
1476 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1480 snprintf(p, end-p, "\n");
1484 avctx->stats_out[0] = '\0';
1485 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1486 flush_put_bits(&s->pb);
1487 s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
1490 s->picture_number++;
1493 pkt->flags |= AV_PKT_FLAG_KEY;
1499 static av_cold int encode_end(AVCodecContext *avctx)
1501 HYuvContext *s = avctx->priv_data;
1505 av_freep(&avctx->extradata);
1506 av_freep(&avctx->stats_out);
1510 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1512 #if CONFIG_HUFFYUV_DECODER
1513 AVCodec ff_huffyuv_decoder = {
1515 .type = AVMEDIA_TYPE_VIDEO,
1516 .id = CODEC_ID_HUFFYUV,
1517 .priv_data_size = sizeof(HYuvContext),
1518 .init = decode_init,
1519 .close = decode_end,
1520 .decode = decode_frame,
1521 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1522 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1523 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1527 #if CONFIG_FFVHUFF_DECODER
1528 AVCodec ff_ffvhuff_decoder = {
1530 .type = AVMEDIA_TYPE_VIDEO,
1531 .id = CODEC_ID_FFVHUFF,
1532 .priv_data_size = sizeof(HYuvContext),
1533 .init = decode_init,
1534 .close = decode_end,
1535 .decode = decode_frame,
1536 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1537 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1538 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1542 #if CONFIG_HUFFYUV_ENCODER
1543 AVCodec ff_huffyuv_encoder = {
1545 .type = AVMEDIA_TYPE_VIDEO,
1546 .id = CODEC_ID_HUFFYUV,
1547 .priv_data_size = sizeof(HYuvContext),
1548 .init = encode_init,
1549 .encode2 = encode_frame,
1550 .close = encode_end,
1551 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
1552 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1556 #if CONFIG_FFVHUFF_ENCODER
1557 AVCodec ff_ffvhuff_encoder = {
1559 .type = AVMEDIA_TYPE_VIDEO,
1560 .id = CODEC_ID_FFVHUFF,
1561 .priv_data_size = sizeof(HYuvContext),
1562 .init = encode_init,
1563 .encode2 = encode_frame,
1564 .close = encode_end,
1565 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
1566 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),