2 * FFV1 codec for libavcodec
4 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * FF Video Codec 1 (a lossless codec)
33 #include "rangecoder.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/crc.h"
39 #include "libavutil/opt.h"
41 #ifdef __INTEL_COMPILER
47 #define CONTEXT_SIZE 32
49 #define MAX_QUANT_TABLES 8
50 #define MAX_CONTEXT_INPUTS 5
52 extern const uint8_t ff_log2_run[41];
54 static const int8_t quant5_10bit[256]={
55 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
56 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
57 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
58 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
59 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
60 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
61 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
62 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
63 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
64 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
65 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
66 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
67 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,
68 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
69 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
70 -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0,
73 static const int8_t quant5[256]={
74 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
75 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
76 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
77 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
78 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
79 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
80 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
83 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
84 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
85 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
86 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
87 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
88 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
89 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1,
92 static const int8_t quant9_10bit[256]={
93 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
95 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
96 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
97 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
98 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
100 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
101 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
102 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
103 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
104 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
105 -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,
106 -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,
107 -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
108 -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0,
111 static const int8_t quant11[256]={
112 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
113 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
114 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
115 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
116 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
117 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
118 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
119 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
120 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
121 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
122 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
123 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
124 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
125 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4,
126 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
127 -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1,
130 static const uint8_t ver2_state[256]= {
131 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
132 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
133 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
134 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
135 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
136 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
137 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
138 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
139 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
140 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
141 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
142 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
143 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
144 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
145 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
146 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
149 typedef struct VlcState{
156 typedef struct PlaneContext{
157 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
158 int quant_table_index;
160 uint8_t (*state)[CONTEXT_SIZE];
162 uint8_t interlace_bit_state[2];
165 #define MAX_SLICES 256
167 typedef struct FFV1Context{
169 AVCodecContext *avctx;
173 uint64_t rc_stat[256][2];
174 uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
178 int chroma_h_shift, chroma_v_shift;
185 int ac; ///< 1=range coder <-> 0=golomb rice
186 int ac_byte_count; ///< number of bytes used for AC coding
187 PlaneContext plane[MAX_PLANES];
188 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
189 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
190 int context_count[MAX_QUANT_TABLES];
191 uint8_t state_transition[256];
192 uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
195 int16_t *sample_buffer;
201 int quant_table_count;
205 struct FFV1Context *slice_context[MAX_SLICES];
213 int bits_per_raw_sample;
216 static av_always_inline int fold(int diff, int bits){
228 static inline int predict(int16_t *src, int16_t *last)
230 const int LT= last[-1];
231 const int T= last[ 0];
232 const int L = src[-1];
234 return mid_pred(L, L + T - LT, T);
237 static inline int get_context(PlaneContext *p, int16_t *src,
238 int16_t *last, int16_t *last2)
240 const int LT= last[-1];
241 const int T= last[ 0];
242 const int RT= last[ 1];
243 const int L = src[-1];
245 if(p->quant_table[3][127]){
246 const int TT= last2[0];
247 const int LL= src[-2];
248 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF]
249 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xFF];
251 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF];
254 static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){
259 l2tab[i]= log2(i/256.0);
261 for(i=0; i<256; i++){
262 double best_len[256];
268 for(j=FFMAX(i-10,1); j<FFMIN(i+11,256); j++){
272 for(k=0; k<256; k++){
273 double newocc[256]={0};
274 for(m=0; m<256; m++){
276 len -=occ[m]*( p *l2tab[ m]
277 + (1-p)*l2tab[256-m]);
280 if(len < best_len[k]){
284 for(m=0; m<256; m++){
286 newocc[ one_state[ m]] += occ[m]* p ;
287 newocc[256-one_state[256-m]] += occ[m]*(1-p);
290 memcpy(occ, newocc, sizeof(occ));
296 static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
299 #define put_rac(C,S,B) \
303 rc_stat2[(S)-state][B]++;\
309 const int a= FFABS(v);
310 const int e= av_log2(a);
311 put_rac(c, state+0, 0);
314 put_rac(c, state+1+i, 1); //1..10
316 put_rac(c, state+1+i, 0);
318 for(i=e-1; i>=0; i--){
319 put_rac(c, state+22+i, (a>>i)&1); //22..31
323 put_rac(c, state+11 + e, v < 0); //11..21
326 put_rac(c, state+1+FFMIN(i,9), 1); //1..10
328 put_rac(c, state+1+9, 0);
330 for(i=e-1; i>=0; i--){
331 put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
335 put_rac(c, state+11 + 10, v < 0); //11..21
338 put_rac(c, state+0, 1);
343 static av_noinline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
344 put_symbol_inline(c, state, v, is_signed, NULL, NULL);
347 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){
348 if(get_rac(c, state+0))
353 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
358 for(i=e-1; i>=0; i--){
359 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
362 e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
367 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
368 return get_symbol_inline(c, state, is_signed);
371 static inline void update_vlc_state(VlcState * const state, const int v){
372 int drift= state->drift;
373 int count= state->count;
374 state->error_sum += FFABS(v);
377 if(count == 128){ //FIXME variable
380 state->error_sum >>= 1;
385 if(state->bias > -128) state->bias--;
391 if(state->bias < 127) state->bias++;
402 static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){
404 //printf("final: %d ", v);
405 v = fold(v - state->bias, bits);
409 while(i < state->error_sum){ //FIXME optimize
417 if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1);
420 code= v ^ ((2*state->drift + state->count)>>31);
423 //printf("v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, state->bias, state->error_sum, state->drift, state->count, k);
424 set_sr_golomb(pb, code, k, 12, bits);
426 update_vlc_state(state, v);
429 static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){
434 while(i < state->error_sum){ //FIXME optimize
441 v= get_sr_golomb(gb, k, 12, bits);
442 //printf("v:%d bias:%d error:%d drift:%d count:%d k:%d", v, state->bias, state->error_sum, state->drift, state->count, k);
445 if(k==0 && 2*state->drift <= - state->count) v ^= (-1);
447 v ^= ((2*state->drift + state->count)>>31);
450 ret= fold(v + state->bias, bits);
452 update_vlc_state(state, v);
453 //printf("final: %d\n", ret);
457 #if CONFIG_FFV1_ENCODER
458 static av_always_inline int encode_line(FFV1Context *s, int w,
460 int plane_index, int bits)
462 PlaneContext * const p= &s->plane[plane_index];
463 RangeCoder * const c= &s->c;
465 int run_index= s->run_index;
470 if(c->bytestream_end - c->bytestream < w*20){
471 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
475 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
476 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
484 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
485 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
492 diff= fold(diff, bits);
495 if(s->flags & CODEC_FLAG_PASS1){
496 put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, s->rc_stat2[p->quant_table_index][context]);
498 put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
501 if(context == 0) run_mode=1;
506 while(run_count >= 1<<ff_log2_run[run_index]){
507 run_count -= 1<<ff_log2_run[run_index];
509 put_bits(&s->pb, 1, 1);
512 put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
513 if(run_index) run_index--;
522 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, (int)put_bits_count(&s->pb));
525 put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
529 while(run_count >= 1<<ff_log2_run[run_index]){
530 run_count -= 1<<ff_log2_run[run_index];
532 put_bits(&s->pb, 1, 1);
536 put_bits(&s->pb, 1, 1);
538 s->run_index= run_index;
543 static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
545 const int ring_size= s->avctx->context_model ? 3 : 2;
549 memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
552 for(i=0; i<ring_size; i++)
553 sample[i]= s->sample_buffer + (w+6)*((h+i-y)%ring_size) + 3;
555 sample[0][-1]= sample[1][0 ];
556 sample[1][ w]= sample[1][w-1];
558 if(s->bits_per_raw_sample<=8){
560 sample[0][x]= src[x + stride*y];
562 encode_line(s, w, sample, plane_index, 8);
564 if(s->packed_at_lsb){
566 sample[0][x]= ((uint16_t*)(src + stride*y))[x];
570 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample);
573 encode_line(s, w, sample, plane_index, s->bits_per_raw_sample);
575 //STOP_TIMER("encode line")}
579 static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
581 const int ring_size= s->avctx->context_model ? 3 : 2;
582 int16_t *sample[4][3];
585 memset(s->sample_buffer, 0, ring_size*4*(w+6)*sizeof(*s->sample_buffer));
588 for(i=0; i<ring_size; i++)
590 sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
593 unsigned v= src[x + stride*y];
605 // assert(g>=0 && b>=0 && r>=0);
606 // assert(g<256 && b<512 && r<512);
612 for(p=0; p<3 + s->transparency; p++){
613 sample[p][0][-1]= sample[p][1][0 ];
614 sample[p][1][ w]= sample[p][1][w-1];
615 encode_line(s, w, sample[p], (p+1)/2, 9);
620 static void write_quant_table(RangeCoder *c, int16_t *quant_table){
623 uint8_t state[CONTEXT_SIZE];
624 memset(state, 128, sizeof(state));
626 for(i=1; i<128 ; i++){
627 if(quant_table[i] != quant_table[i-1]){
628 put_symbol(c, state, i-last-1, 0);
632 put_symbol(c, state, i-last-1, 0);
635 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
638 write_quant_table(c, quant_table[i]);
641 static void write_header(FFV1Context *f){
642 uint8_t state[CONTEXT_SIZE];
644 RangeCoder * const c= &f->slice_context[0]->c;
646 memset(state, 128, sizeof(state));
649 put_symbol(c, state, f->version, 0);
650 put_symbol(c, state, f->ac, 0);
652 for(i=1; i<256; i++){
653 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
656 put_symbol(c, state, f->colorspace, 0); //YUV cs type
658 put_symbol(c, state, f->bits_per_raw_sample, 0);
659 put_rac(c, state, f->chroma_planes);
660 put_symbol(c, state, f->chroma_h_shift, 0);
661 put_symbol(c, state, f->chroma_v_shift, 0);
662 put_rac(c, state, f->transparency);
664 write_quant_tables(c, f->quant_table);
665 }else if(f->version < 3){
666 put_symbol(c, state, f->slice_count, 0);
667 for(i=0; i<f->slice_count; i++){
668 FFV1Context *fs= f->slice_context[i];
669 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
670 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
671 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
672 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
673 for(j=0; j<f->plane_count; j++){
674 put_symbol(c, state, f->plane[j].quant_table_index, 0);
675 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
680 #endif /* CONFIG_FFV1_ENCODER */
682 static av_cold int common_init(AVCodecContext *avctx){
683 FFV1Context *s = avctx->priv_data;
686 s->flags= avctx->flags;
688 avcodec_get_frame_defaults(&s->picture);
690 ff_dsputil_init(&s->dsp, avctx);
692 s->width = avctx->width;
693 s->height= avctx->height;
695 assert(s->width && s->height);
704 static int init_slice_state(FFV1Context *f, FFV1Context *fs){
707 fs->plane_count= f->plane_count;
708 fs->transparency= f->transparency;
709 for(j=0; j<f->plane_count; j++){
710 PlaneContext * const p= &fs->plane[j];
713 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
715 return AVERROR(ENOMEM);
717 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState));
719 return AVERROR(ENOMEM);
724 //FIXME only redo if state_transition changed
725 for(j=1; j<256; j++){
726 fs->c.one_state [ j]= f->state_transition[j];
727 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
734 static int init_slices_state(FFV1Context *f){
736 for(i=0; i<f->slice_count; i++){
737 FFV1Context *fs= f->slice_context[i];
738 if(init_slice_state(f, fs) < 0)
744 static av_cold int init_slice_contexts(FFV1Context *f){
747 f->slice_count= f->num_h_slices * f->num_v_slices;
749 for(i=0; i<f->slice_count; i++){
750 FFV1Context *fs= av_mallocz(sizeof(*fs));
751 int sx= i % f->num_h_slices;
752 int sy= i / f->num_h_slices;
753 int sxs= f->avctx->width * sx / f->num_h_slices;
754 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
755 int sys= f->avctx->height* sy / f->num_v_slices;
756 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
757 f->slice_context[i]= fs;
758 memcpy(fs, f, sizeof(*fs));
759 memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
761 fs->slice_width = sxe - sxs;
762 fs->slice_height= sye - sys;
766 fs->sample_buffer = av_malloc(3*4 * (fs->width+6) * sizeof(*fs->sample_buffer));
767 if (!fs->sample_buffer)
768 return AVERROR(ENOMEM);
773 static int allocate_initial_states(FFV1Context *f){
776 for(i=0; i<f->quant_table_count; i++){
777 f->initial_states[i]= av_malloc(f->context_count[i]*sizeof(*f->initial_states[i]));
778 if(!f->initial_states[i])
779 return AVERROR(ENOMEM);
780 memset(f->initial_states[i], 128, f->context_count[i]*sizeof(*f->initial_states[i]));
785 #if CONFIG_FFV1_ENCODER
786 static int write_extra_header(FFV1Context *f){
787 RangeCoder * const c= &f->c;
788 uint8_t state[CONTEXT_SIZE];
790 uint8_t state2[32][CONTEXT_SIZE];
793 memset(state2, 128, sizeof(state2));
794 memset(state, 128, sizeof(state));
796 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000 + (11*11*5*5*5+11*11*11)*32);
797 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
798 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
800 put_symbol(c, state, f->version, 0);
802 put_symbol(c, state, f->minor_version, 0);
803 put_symbol(c, state, f->ac, 0);
805 for(i=1; i<256; i++){
806 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
809 put_symbol(c, state, f->colorspace, 0); //YUV cs type
810 put_symbol(c, state, f->bits_per_raw_sample, 0);
811 put_rac(c, state, f->chroma_planes);
812 put_symbol(c, state, f->chroma_h_shift, 0);
813 put_symbol(c, state, f->chroma_v_shift, 0);
814 put_rac(c, state, f->transparency);
815 put_symbol(c, state, f->num_h_slices-1, 0);
816 put_symbol(c, state, f->num_v_slices-1, 0);
818 put_symbol(c, state, f->quant_table_count, 0);
819 for(i=0; i<f->quant_table_count; i++)
820 write_quant_tables(c, f->quant_tables[i]);
822 for(i=0; i<f->quant_table_count; i++){
823 for(j=0; j<f->context_count[i]*CONTEXT_SIZE; j++)
824 if(f->initial_states[i] && f->initial_states[i][0][j] != 128)
826 if(j<f->context_count[i]*CONTEXT_SIZE){
827 put_rac(c, state, 1);
828 for(j=0; j<f->context_count[i]; j++){
829 for(k=0; k<CONTEXT_SIZE; k++){
830 int pred= j ? f->initial_states[i][j-1][k] : 128;
831 put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k]-pred), 1);
835 put_rac(c, state, 0);
840 put_symbol(c, state, f->ec, 0);
843 f->avctx->extradata_size= ff_rac_terminate(c);
844 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
845 AV_WL32(f->avctx->extradata + f->avctx->extradata_size, v);
846 f->avctx->extradata_size += 4;
851 static int sort_stt(FFV1Context *s, uint8_t stt[256]){
852 int i,i2,changed,print=0;
856 for(i=12; i<244; i++){
857 for(i2=i+1; i2<245 && i2<i+4; i2++){
858 #define COST(old, new) \
859 s->rc_stat[old][0]*-log2((256-(new))/256.0)\
860 +s->rc_stat[old][1]*-log2( (new) /256.0)
862 #define COST2(old, new) \
864 +COST(256-(old), 256-(new))
866 double size0= COST2(i, i ) + COST2(i2, i2);
867 double sizeX= COST2(i, i2) + COST2(i2, i );
868 if(sizeX < size0 && i!=128 && i2!=128){
870 FFSWAP(int, stt[ i], stt[ i2]);
871 FFSWAP(int, s->rc_stat[i ][0],s->rc_stat[ i2][0]);
872 FFSWAP(int, s->rc_stat[i ][1],s->rc_stat[ i2][1]);
874 FFSWAP(int, stt[256-i], stt[256-i2]);
875 FFSWAP(int, s->rc_stat[256-i][0],s->rc_stat[256-i2][0]);
876 FFSWAP(int, s->rc_stat[256-i][1],s->rc_stat[256-i2][1]);
878 for(j=1; j<256; j++){
879 if (stt[j] == i ) stt[j] = i2;
880 else if(stt[j] == i2) stt[j] = i ;
882 if (stt[256-j] == 256-i ) stt[256-j] = 256-i2;
883 else if(stt[256-j] == 256-i2) stt[256-j] = 256-i ;
894 static av_cold int encode_init(AVCodecContext *avctx)
896 FFV1Context *s = avctx->priv_data;
903 if((avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) || avctx->slices>1)
904 s->version = FFMAX(s->version, 2);
906 if(avctx->level == 3){
911 s->ec = (s->version >= 3);
914 if(s->version >= 2 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
915 av_log(avctx, AV_LOG_ERROR, "Version 2 needed for requested features but version 2 is experimental and not enabled\n");
916 return AVERROR_INVALIDDATA;
919 s->ac= avctx->coder_type > 0 ? 2 : 0;
922 switch(avctx->pix_fmt){
923 case PIX_FMT_YUV444P9:
924 case PIX_FMT_YUV422P9:
925 case PIX_FMT_YUV420P9:
926 if (!avctx->bits_per_raw_sample)
927 s->bits_per_raw_sample = 9;
928 case PIX_FMT_YUV444P10:
929 case PIX_FMT_YUV420P10:
930 case PIX_FMT_YUV422P10:
931 s->packed_at_lsb = 1;
932 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
933 s->bits_per_raw_sample = 10;
935 case PIX_FMT_YUV444P16:
936 case PIX_FMT_YUV422P16:
937 case PIX_FMT_YUV420P16:
938 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) {
939 s->bits_per_raw_sample = 16;
940 } else if (!s->bits_per_raw_sample){
941 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
943 if(s->bits_per_raw_sample <=8){
944 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
945 return AVERROR_INVALIDDATA;
947 if(!s->ac && avctx->coder_type == -1) {
948 av_log(avctx, AV_LOG_INFO, "bits_per_raw_sample > 8, forcing coder 1\n");
952 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
953 return AVERROR_INVALIDDATA;
955 s->version= FFMAX(s->version, 1);
957 case PIX_FMT_YUV444P:
958 case PIX_FMT_YUV440P:
959 case PIX_FMT_YUV422P:
960 case PIX_FMT_YUV420P:
961 case PIX_FMT_YUV411P:
962 case PIX_FMT_YUV410P:
963 s->chroma_planes= av_pix_fmt_descriptors[avctx->pix_fmt].nb_components < 3 ? 0 : 1;
966 case PIX_FMT_YUVA444P:
967 case PIX_FMT_YUVA422P:
968 case PIX_FMT_YUVA420P:
981 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
982 return AVERROR_INVALIDDATA;
984 if (s->transparency) {
985 av_log(avctx, AV_LOG_WARNING, "Storing alpha plane, this will require a recent FFV1 decoder to playback!\n");
987 if (avctx->context_model > 1U) {
988 av_log(avctx, AV_LOG_ERROR, "Invalid context model %d, valid values are 0 and 1\n", avctx->context_model);
989 return AVERROR(EINVAL);
994 s->state_transition[i]=ver2_state[i];
996 for(i=0; i<256; i++){
997 s->quant_table_count=2;
998 if(s->bits_per_raw_sample <=8){
999 s->quant_tables[0][0][i]= quant11[i];
1000 s->quant_tables[0][1][i]= 11*quant11[i];
1001 s->quant_tables[0][2][i]= 11*11*quant11[i];
1002 s->quant_tables[1][0][i]= quant11[i];
1003 s->quant_tables[1][1][i]= 11*quant11[i];
1004 s->quant_tables[1][2][i]= 11*11*quant5 [i];
1005 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
1006 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
1008 s->quant_tables[0][0][i]= quant9_10bit[i];
1009 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
1010 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
1011 s->quant_tables[1][0][i]= quant9_10bit[i];
1012 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
1013 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
1014 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
1015 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
1018 s->context_count[0]= (11*11*11+1)/2;
1019 s->context_count[1]= (11*11*5*5*5+1)/2;
1020 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table));
1022 for(i=0; i<s->plane_count; i++){
1023 PlaneContext * const p= &s->plane[i];
1025 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
1026 p->quant_table_index= avctx->context_model;
1027 p->context_count= s->context_count[p->quant_table_index];
1030 if(allocate_initial_states(s) < 0)
1031 return AVERROR(ENOMEM);
1033 avctx->coded_frame= &s->picture;
1034 if(!s->transparency)
1036 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
1038 s->picture_number=0;
1040 if(avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
1041 for(i=0; i<s->quant_table_count; i++){
1042 s->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*s->rc_stat2[i]));
1044 return AVERROR(ENOMEM);
1047 if(avctx->stats_in){
1048 char *p= avctx->stats_in;
1049 uint8_t best_state[256][256];
1053 av_assert0(s->version>=2);
1056 for(j=0; j<256; j++){
1058 s->rc_stat[j][i]= strtol(p, &next, 0);
1060 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d [%s]\n", j,i,p);
1066 for(i=0; i<s->quant_table_count; i++){
1067 for(j=0; j<s->context_count[i]; j++){
1068 for(k=0; k<32; k++){
1070 s->rc_stat2[i][j][k][m]= strtol(p, &next, 0);
1072 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d %d %d [%s]\n", i,j,k,m,p);
1073 return AVERROR_INVALIDDATA;
1080 gob_count= strtol(p, &next, 0);
1081 if(next==p || gob_count <0){
1082 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
1083 return AVERROR_INVALIDDATA;
1086 while(*p=='\n' || *p==' ') p++;
1089 sort_stt(s, s->state_transition);
1091 find_best_state(best_state, s->state_transition);
1093 for(i=0; i<s->quant_table_count; i++){
1094 for(j=0; j<s->context_count[i]; j++){
1095 for(k=0; k<32; k++){
1097 if(s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]){
1098 p=256.0*s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]);
1100 s->initial_states[i][j][k]= best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1])/gob_count, 0, 255)];
1107 for(s->num_v_slices=2; s->num_v_slices<9; s->num_v_slices++){
1108 for(s->num_h_slices=s->num_v_slices; s->num_h_slices<2*s->num_v_slices; s->num_h_slices++){
1109 if(avctx->slices == s->num_h_slices * s->num_v_slices && avctx->slices <= 64)
1113 av_log(avctx, AV_LOG_ERROR, "Unsupported number %d of slices requested, please specify a supported number with -slices (ex:4,6,9,12,16, ...)\n", avctx->slices);
1116 write_extra_header(s);
1119 if(init_slice_contexts(s) < 0)
1121 if(init_slices_state(s) < 0)
1124 #define STATS_OUT_SIZE 1024*1024*6
1125 if(avctx->flags & CODEC_FLAG_PASS1){
1126 avctx->stats_out= av_mallocz(STATS_OUT_SIZE);
1127 for(i=0; i<s->quant_table_count; i++){
1128 for(j=0; j<s->slice_count; j++){
1129 FFV1Context *sf= s->slice_context[j];
1130 av_assert0(!sf->rc_stat2[i]);
1131 sf->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*sf->rc_stat2[i]));
1132 if(!sf->rc_stat2[i])
1133 return AVERROR(ENOMEM);
1140 #endif /* CONFIG_FFV1_ENCODER */
1143 static void clear_slice_state(FFV1Context *f, FFV1Context *fs){
1146 for(i=0; i<f->plane_count; i++){
1147 PlaneContext *p= &fs->plane[i];
1149 p->interlace_bit_state[0]= 128;
1150 p->interlace_bit_state[1]= 128;
1153 if(f->initial_states[p->quant_table_index]){
1154 memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE*p->context_count);
1156 memset(p->state, 128, CONTEXT_SIZE*p->context_count);
1158 for(j=0; j<p->context_count; j++){
1159 p->vlc_state[j].drift= 0;
1160 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
1161 p->vlc_state[j].bias= 0;
1162 p->vlc_state[j].count= 1;
1168 #if CONFIG_FFV1_ENCODER
1170 static void encode_slice_header(FFV1Context *f, FFV1Context *fs){
1171 RangeCoder *c = &fs->c;
1172 uint8_t state[CONTEXT_SIZE];
1174 memset(state, 128, sizeof(state));
1176 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
1177 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
1178 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
1179 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
1180 for(j=0; j<f->plane_count; j++){
1181 put_symbol(c, state, f->plane[j].quant_table_index, 0);
1182 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
1184 if(!f->picture.interlaced_frame) put_symbol(c, state, 3, 0);
1185 else put_symbol(c, state, 1 + !f->picture.top_field_first, 0);
1186 put_symbol(c, state, f->picture.sample_aspect_ratio.num, 0);
1187 put_symbol(c, state, f->picture.sample_aspect_ratio.den, 0);
1190 static int encode_slice(AVCodecContext *c, void *arg){
1191 FFV1Context *fs= *(void**)arg;
1192 FFV1Context *f= fs->avctx->priv_data;
1193 int width = fs->slice_width;
1194 int height= fs->slice_height;
1197 AVFrame * const p= &f->picture;
1198 const int ps= (f->bits_per_raw_sample>8)+1;
1201 clear_slice_state(f, fs);
1203 encode_slice_header(f, fs);
1206 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? ff_rac_terminate(&fs->c) : 0;
1207 init_put_bits(&fs->pb, fs->c.bytestream_start + fs->ac_byte_count, fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count);
1210 if(f->colorspace==0){
1211 const int chroma_width = -((-width )>>f->chroma_h_shift);
1212 const int chroma_height= -((-height)>>f->chroma_v_shift);
1213 const int cx= x>>f->chroma_h_shift;
1214 const int cy= y>>f->chroma_v_shift;
1216 encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1218 if (f->chroma_planes){
1219 encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1220 encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1222 if (fs->transparency)
1223 encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1225 encode_rgb_frame(fs, (uint32_t*)(p->data[0]) + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1232 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1233 const AVFrame *pict, int *got_packet)
1235 FFV1Context *f = avctx->priv_data;
1236 RangeCoder * const c= &f->slice_context[0]->c;
1237 AVFrame * const p= &f->picture;
1239 uint8_t keystate=128;
1243 if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*((8*2+1+1)*4)/8
1244 + FF_MIN_BUFFER_SIZE)) < 0)
1247 ff_init_range_encoder(c, pkt->data, pkt->size);
1248 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1251 p->pict_type= AV_PICTURE_TYPE_I;
1253 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
1254 put_rac(c, &keystate, 1);
1259 put_rac(c, &keystate, 0);
1265 for(i=1; i<256; i++){
1266 c->one_state[i]= f->state_transition[i];
1267 c->zero_state[256-i]= 256-c->one_state[i];
1271 for(i=1; i<f->slice_count; i++){
1272 FFV1Context *fs= f->slice_context[i];
1273 uint8_t *start = pkt->data + (pkt->size-used_count)*i/f->slice_count;
1274 int len = pkt->size/f->slice_count;
1276 ff_init_range_encoder(&fs->c, start, len);
1278 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1281 for(i=0; i<f->slice_count; i++){
1282 FFV1Context *fs= f->slice_context[i];
1287 put_rac(&fs->c, &state, 0);
1288 bytes= ff_rac_terminate(&fs->c);
1290 flush_put_bits(&fs->pb); //nicer padding FIXME
1291 bytes= fs->ac_byte_count + (put_bits_count(&fs->pb)+7)/8;
1293 if(i>0 || f->version>2){
1294 av_assert0(bytes < pkt->size/f->slice_count);
1295 memmove(buf_p, fs->c.bytestream_start, bytes);
1296 av_assert0(bytes < (1<<24));
1297 AV_WB24(buf_p+bytes, bytes);
1303 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, bytes);
1304 AV_WL32(buf_p + bytes, v); bytes += 4;
1309 if((avctx->flags&CODEC_FLAG_PASS1) && (f->picture_number&31)==0){
1311 char *p= avctx->stats_out;
1312 char *end= p + STATS_OUT_SIZE;
1314 memset(f->rc_stat, 0, sizeof(f->rc_stat));
1315 for(i=0; i<f->quant_table_count; i++)
1316 memset(f->rc_stat2[i], 0, f->context_count[i]*sizeof(*f->rc_stat2[i]));
1318 for(j=0; j<f->slice_count; j++){
1319 FFV1Context *fs= f->slice_context[j];
1320 for(i=0; i<256; i++){
1321 f->rc_stat[i][0] += fs->rc_stat[i][0];
1322 f->rc_stat[i][1] += fs->rc_stat[i][1];
1324 for(i=0; i<f->quant_table_count; i++){
1325 for(k=0; k<f->context_count[i]; k++){
1326 for(m=0; m<32; m++){
1327 f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
1328 f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
1334 for(j=0; j<256; j++){
1335 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat[j][0], f->rc_stat[j][1]);
1338 snprintf(p, end-p, "\n");
1340 for(i=0; i<f->quant_table_count; i++){
1341 for(j=0; j<f->context_count[i]; j++){
1342 for(m=0; m<32; m++){
1343 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
1348 snprintf(p, end-p, "%d\n", f->gob_count);
1349 } else if(avctx->flags&CODEC_FLAG_PASS1)
1350 avctx->stats_out[0] = '\0';
1352 f->picture_number++;
1353 pkt->size = buf_p - pkt->data;
1354 pkt->flags |= AV_PKT_FLAG_KEY*p->key_frame;
1359 #endif /* CONFIG_FFV1_ENCODER */
1361 static av_cold int common_end(AVCodecContext *avctx){
1362 FFV1Context *s = avctx->priv_data;
1365 if (avctx->codec->decode && s->picture.data[0])
1366 avctx->release_buffer(avctx, &s->picture);
1368 for(j=0; j<s->slice_count; j++){
1369 FFV1Context *fs= s->slice_context[j];
1370 for(i=0; i<s->plane_count; i++){
1371 PlaneContext *p= &fs->plane[i];
1373 av_freep(&p->state);
1374 av_freep(&p->vlc_state);
1376 av_freep(&fs->sample_buffer);
1379 av_freep(&avctx->stats_out);
1380 for(j=0; j<s->quant_table_count; j++){
1381 av_freep(&s->initial_states[j]);
1382 for(i=0; i<s->slice_count; i++){
1383 FFV1Context *sf= s->slice_context[i];
1384 av_freep(&sf->rc_stat2[j]);
1386 av_freep(&s->rc_stat2[j]);
1389 for(i=0; i<s->slice_count; i++){
1390 av_freep(&s->slice_context[i]);
1396 static av_always_inline void decode_line(FFV1Context *s, int w,
1398 int plane_index, int bits)
1400 PlaneContext * const p= &s->plane[plane_index];
1401 RangeCoder * const c= &s->c;
1405 int run_index= s->run_index;
1408 int diff, context, sign;
1410 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
1417 av_assert2(context < p->context_count);
1420 diff= get_symbol_inline(c, p->state[context], 1);
1422 if(context == 0 && run_mode==0) run_mode=1;
1425 if(run_count==0 && run_mode==1){
1426 if(get_bits1(&s->gb)){
1427 run_count = 1<<ff_log2_run[run_index];
1428 if(x + run_count <= w) run_index++;
1430 if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]);
1432 if(run_index) run_index--;
1440 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1445 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1447 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, get_bits_count(&s->gb));
1450 if(sign) diff= -diff;
1452 sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1);
1454 s->run_index= run_index;
1457 static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
1460 sample[0]=s->sample_buffer +3;
1461 sample[1]=s->sample_buffer+w+6+3;
1465 memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
1468 int16_t *temp = sample[0]; //FIXME try a normal buffer
1470 sample[0]= sample[1];
1473 sample[1][-1]= sample[0][0 ];
1474 sample[0][ w]= sample[0][w-1];
1477 if(s->avctx->bits_per_raw_sample <= 8){
1478 decode_line(s, w, sample, plane_index, 8);
1480 src[x + stride*y]= sample[1][x];
1483 decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
1484 if(s->packed_at_lsb){
1486 ((uint16_t*)(src + stride*y))[x]= sample[1][x];
1490 ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
1494 //STOP_TIMER("decode-line")}
1498 static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
1500 int16_t *sample[4][2];
1502 sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
1503 sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
1508 memset(s->sample_buffer, 0, 8*(w+6)*sizeof(*s->sample_buffer));
1511 for(p=0; p<3 + s->transparency; p++){
1512 int16_t *temp = sample[p][0]; //FIXME try a normal buffer
1514 sample[p][0]= sample[p][1];
1517 sample[p][1][-1]= sample[p][0][0 ];
1518 sample[p][0][ w]= sample[p][0][w-1];
1519 decode_line(s, w, sample[p], (p+1)/2, 9);
1522 int g= sample[0][1][x];
1523 int b= sample[1][1][x];
1524 int r= sample[2][1][x];
1525 int a= sample[3][1][x];
1527 // assert(g>=0 && b>=0 && r>=0);
1528 // assert(g<256 && b<512 && r<512);
1536 src[x + stride*y]= b + (g<<8) + (r<<16) + (a<<24);
1541 static int decode_slice_header(FFV1Context *f, FFV1Context *fs){
1542 RangeCoder *c = &fs->c;
1543 uint8_t state[CONTEXT_SIZE];
1544 unsigned ps, i, context_count;
1545 memset(state, 128, sizeof(state));
1547 av_assert0(f->version > 2);
1549 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1550 fs->slice_y = get_symbol(c, state, 0) *f->height;
1551 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1552 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1554 fs->slice_x /= f->num_h_slices;
1555 fs->slice_y /= f->num_v_slices;
1556 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1557 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1558 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1560 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1561 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1564 for(i=0; i<f->plane_count; i++){
1565 PlaneContext * const p= &fs->plane[i];
1566 int idx=get_symbol(c, state, 0);
1567 if(idx > (unsigned)f->quant_table_count){
1568 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1571 p->quant_table_index= idx;
1572 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1573 context_count= f->context_count[idx];
1575 if(p->context_count < context_count){
1576 av_freep(&p->state);
1577 av_freep(&p->vlc_state);
1579 p->context_count= context_count;
1582 ps = get_symbol(c, state, 0);
1584 f->picture.interlaced_frame = 1;
1585 f->picture.top_field_first = 1;
1587 f->picture.interlaced_frame = 1;
1588 f->picture.top_field_first = 0;
1590 f->picture.interlaced_frame = 0;
1592 f->picture.sample_aspect_ratio.num = get_symbol(c, state, 0);
1593 f->picture.sample_aspect_ratio.den = get_symbol(c, state, 0);
1598 static int decode_slice(AVCodecContext *c, void *arg){
1599 FFV1Context *fs= *(void**)arg;
1600 FFV1Context *f= fs->avctx->priv_data;
1601 int width, height, x, y;
1602 const int ps= (c->bits_per_raw_sample>8)+1;
1603 AVFrame * const p= &f->picture;
1606 if(init_slice_state(f, fs) < 0)
1607 return AVERROR(ENOMEM);
1608 if(decode_slice_header(f, fs) < 0)
1609 return AVERROR_INVALIDDATA;
1611 if(init_slice_state(f, fs) < 0)
1612 return AVERROR(ENOMEM);
1613 if(f->picture.key_frame)
1614 clear_slice_state(f, fs);
1615 width = fs->slice_width;
1616 height= fs->slice_height;
1621 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? fs->c.bytestream - fs->c.bytestream_start - 1 : 0;
1622 init_get_bits(&fs->gb,
1623 fs->c.bytestream_start + fs->ac_byte_count,
1624 (fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count) * 8);
1627 av_assert1(width && height);
1628 if(f->colorspace==0){
1629 const int chroma_width = -((-width )>>f->chroma_h_shift);
1630 const int chroma_height= -((-height)>>f->chroma_v_shift);
1631 const int cx= x>>f->chroma_h_shift;
1632 const int cy= y>>f->chroma_v_shift;
1633 decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1635 if (f->chroma_planes){
1636 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1637 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1639 if (fs->transparency)
1640 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1642 decode_rgb_frame(fs, (uint32_t*)p->data[0] + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1650 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
1653 uint8_t state[CONTEXT_SIZE];
1655 memset(state, 128, sizeof(state));
1657 for(v=0; i<128 ; v++){
1658 unsigned len= get_symbol(c, state, 0) + 1;
1660 if(len > 128 - i) return -1;
1663 quant_table[i] = scale*v;
1666 //if(i%16==0) printf("\n");
1670 for(i=1; i<128; i++){
1671 quant_table[256-i]= -quant_table[i];
1673 quant_table[128]= -quant_table[127];
1678 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
1680 int context_count=1;
1683 context_count*= read_quant_table(c, quant_table[i], context_count);
1684 if(context_count > 32768U){
1688 return (context_count+1)/2;
1691 static int read_extra_header(FFV1Context *f){
1692 RangeCoder * const c= &f->c;
1693 uint8_t state[CONTEXT_SIZE];
1695 uint8_t state2[32][CONTEXT_SIZE];
1697 memset(state2, 128, sizeof(state2));
1698 memset(state, 128, sizeof(state));
1700 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1701 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1703 f->version= get_symbol(c, state, 0);
1704 if(f->version > 2) {
1705 c->bytestream_end -= 4;
1706 f->minor_version= get_symbol(c, state, 0);
1708 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1710 for(i=1; i<256; i++){
1711 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1714 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1715 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1716 f->chroma_planes= get_rac(c, state);
1717 f->chroma_h_shift= get_symbol(c, state, 0);
1718 f->chroma_v_shift= get_symbol(c, state, 0);
1719 f->transparency= get_rac(c, state);
1720 f->plane_count= 2 + f->transparency;
1721 f->num_h_slices= 1 + get_symbol(c, state, 0);
1722 f->num_v_slices= 1 + get_symbol(c, state, 0);
1723 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->height){
1724 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1728 f->quant_table_count= get_symbol(c, state, 0);
1729 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1731 for(i=0; i<f->quant_table_count; i++){
1732 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1733 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1738 if(allocate_initial_states(f) < 0)
1739 return AVERROR(ENOMEM);
1741 for(i=0; i<f->quant_table_count; i++){
1742 if(get_rac(c, state)){
1743 for(j=0; j<f->context_count[i]; j++){
1744 for(k=0; k<CONTEXT_SIZE; k++){
1745 int pred= j ? f->initial_states[i][j-1][k] : 128;
1746 f->initial_states[i][j][k]= (pred+get_symbol(c, state2[k], 1))&0xFF;
1753 f->ec = get_symbol(c, state, 0);
1758 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
1760 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v);
1761 return AVERROR_INVALIDDATA;
1768 static int read_header(FFV1Context *f){
1769 uint8_t state[CONTEXT_SIZE];
1770 int i, j, context_count;
1771 RangeCoder * const c= &f->slice_context[0]->c;
1773 memset(state, 128, sizeof(state));
1776 unsigned v= get_symbol(c, state, 0);
1778 av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
1779 return AVERROR_INVALIDDATA;
1782 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1784 for(i=1; i<256; i++){
1785 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1788 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1790 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1791 f->chroma_planes= get_rac(c, state);
1792 f->chroma_h_shift= get_symbol(c, state, 0);
1793 f->chroma_v_shift= get_symbol(c, state, 0);
1794 f->transparency= get_rac(c, state);
1795 f->plane_count= 2 + f->transparency;
1798 if(f->colorspace==0){
1799 if(!f->transparency && !f->chroma_planes){
1800 if (f->avctx->bits_per_raw_sample<=8)
1801 f->avctx->pix_fmt= PIX_FMT_GRAY8;
1803 f->avctx->pix_fmt= PIX_FMT_GRAY16;
1804 }else if(f->avctx->bits_per_raw_sample<=8 && !f->transparency){
1805 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1806 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
1807 case 0x01: f->avctx->pix_fmt= PIX_FMT_YUV440P; break;
1808 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
1809 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
1810 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
1811 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
1813 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1816 }else if(f->avctx->bits_per_raw_sample<=8 && f->transparency){
1817 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1818 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUVA444P; break;
1819 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUVA422P; break;
1820 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUVA420P; break;
1822 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1825 }else if(f->avctx->bits_per_raw_sample==9) {
1827 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1828 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P9; break;
1829 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P9; break;
1830 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P9; break;
1832 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1835 }else if(f->avctx->bits_per_raw_sample==10) {
1837 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1838 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P10; break;
1839 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P10; break;
1840 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P10; break;
1842 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1846 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1847 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1848 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1849 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P16; break;
1851 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1855 }else if(f->colorspace==1){
1856 if(f->chroma_h_shift || f->chroma_v_shift){
1857 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1860 if(f->transparency) f->avctx->pix_fmt= PIX_FMT_RGB32;
1861 else f->avctx->pix_fmt= PIX_FMT_0RGB32;
1863 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1867 //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt);
1869 context_count= read_quant_tables(c, f->quant_table);
1870 if(context_count < 0){
1871 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1874 }else if(f->version < 3){
1875 f->slice_count= get_symbol(c, state, 0);
1877 const uint8_t *p= c->bytestream_end;
1878 for(f->slice_count = 0; f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start; f->slice_count++){
1879 int trailer = 3 + 5*!!f->ec;
1880 int size = AV_RB24(p-trailer);
1881 if(size + trailer > p - c->bytestream_start)
1883 p -= size + trailer;
1886 if(f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0){
1887 av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
1891 for(j=0; j<f->slice_count; j++){
1892 FFV1Context *fs= f->slice_context[j];
1894 fs->packed_at_lsb= f->packed_at_lsb;
1896 if(f->version == 2){
1897 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1898 fs->slice_y = get_symbol(c, state, 0) *f->height;
1899 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1900 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1902 fs->slice_x /= f->num_h_slices;
1903 fs->slice_y /= f->num_v_slices;
1904 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1905 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1906 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1908 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1909 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1913 for(i=0; i<f->plane_count; i++){
1914 PlaneContext * const p= &fs->plane[i];
1916 if(f->version == 2){
1917 int idx=get_symbol(c, state, 0);
1918 if(idx > (unsigned)f->quant_table_count){
1919 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1922 p->quant_table_index= idx;
1923 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1924 context_count= f->context_count[idx];
1926 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
1929 if(f->version <= 2){
1930 if(p->context_count < context_count){
1931 av_freep(&p->state);
1932 av_freep(&p->vlc_state);
1934 p->context_count= context_count;
1941 static av_cold int decode_init(AVCodecContext *avctx)
1943 FFV1Context *f = avctx->priv_data;
1947 if(avctx->extradata && read_extra_header(f) < 0)
1950 if(init_slice_contexts(f) < 0)
1956 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
1957 const uint8_t *buf = avpkt->data;
1958 int buf_size = avpkt->size;
1959 FFV1Context *f = avctx->priv_data;
1960 RangeCoder * const c= &f->slice_context[0]->c;
1961 AVFrame * const p= &f->picture;
1963 uint8_t keystate= 128;
1964 const uint8_t *buf_p;
1966 AVFrame *picture = data;
1968 /* release previously stored data */
1970 avctx->release_buffer(avctx, p);
1972 ff_init_range_decoder(c, buf, buf_size);
1973 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1976 p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
1977 if(get_rac(c, &keystate)){
1979 f->key_frame_ok = 0;
1980 if(read_header(f) < 0)
1982 f->key_frame_ok = 1;
1984 if (!f->key_frame_ok) {
1985 av_log(avctx, AV_LOG_ERROR, "Cant decode non keyframe without valid keyframe\n");
1986 return AVERROR_INVALIDDATA;
1992 if(avctx->get_buffer(avctx, p) < 0){
1993 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1997 if(avctx->debug&FF_DEBUG_PICT_INFO)
1998 av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d\n",
1999 f->version, p->key_frame, f->ac, f->ec, f->slice_count);
2001 buf_p= buf + buf_size;
2002 for(i=f->slice_count-1; i>=0; i--){
2003 FFV1Context *fs= f->slice_context[i];
2004 int trailer = 3 + 5*!!f->ec;
2007 if(i || f->version>2) v = AV_RB24(buf_p-trailer)+trailer;
2008 else v = buf_p - c->bytestream_start;
2009 if(buf_p - c->bytestream_start < v){
2010 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
2016 unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, v);
2018 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", crc);
2023 ff_init_range_decoder(&fs->c, buf_p, v);
2027 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
2028 f->picture_number++;
2031 *data_size = sizeof(AVFrame);
2036 AVCodec ff_ffv1_decoder = {
2038 .type = AVMEDIA_TYPE_VIDEO,
2039 .id = CODEC_ID_FFV1,
2040 .priv_data_size = sizeof(FFV1Context),
2041 .init = decode_init,
2042 .close = common_end,
2043 .decode = decode_frame,
2044 .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
2045 CODEC_CAP_SLICE_THREADS,
2046 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2049 #if CONFIG_FFV1_ENCODER
2051 #define OFFSET(x) offsetof(FFV1Context, x)
2052 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2053 static const AVOption options[] = {
2054 { "slicecrc", "Protect slices with CRCs", OFFSET(ec), AV_OPT_TYPE_INT, {-1}, -1, 1, VE},
2058 static const AVClass class = {
2059 .class_name = "ffv1 encoder",
2060 .item_name = av_default_item_name,
2062 .version = LIBAVUTIL_VERSION_INT,
2065 static const AVCodecDefault ffv1_defaults[] = {
2070 AVCodec ff_ffv1_encoder = {
2072 .type = AVMEDIA_TYPE_VIDEO,
2073 .id = CODEC_ID_FFV1,
2074 .priv_data_size = sizeof(FFV1Context),
2075 .init = encode_init,
2076 .encode2 = encode_frame,
2077 .close = common_end,
2078 .capabilities = CODEC_CAP_SLICE_THREADS,
2079 .defaults = ffv1_defaults,
2080 .pix_fmts = (const enum PixelFormat[]){
2081 PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUVA422P, PIX_FMT_YUV444P,
2082 PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P,
2083 PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16,
2084 PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV444P9, PIX_FMT_YUV422P9,
2085 PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_YUV444P10,
2086 PIX_FMT_GRAY16, PIX_FMT_GRAY8,
2089 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2090 .priv_class = &class,