2 * FFV1 codec for libavcodec
4 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * FF Video Codec 1 (a lossless codec)
33 #include "rangecoder.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/crc.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/imgutils.h"
42 #ifdef __INTEL_COMPILER
48 #define CONTEXT_SIZE 32
50 #define MAX_QUANT_TABLES 8
51 #define MAX_CONTEXT_INPUTS 5
53 extern const uint8_t ff_log2_run[41];
55 static const int8_t quant5_10bit[256]={
56 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
57 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
58 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
59 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
60 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
61 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
62 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
63 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
64 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
65 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
66 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
67 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
68 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,
69 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
70 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
71 -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0,
74 static const int8_t quant5[256]={
75 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
76 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
77 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
78 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
79 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
80 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
84 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
85 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
86 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
87 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
88 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
89 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
90 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1,
93 static const int8_t quant9_10bit[256]={
94 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
96 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
97 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
98 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
100 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
101 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
102 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
103 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
104 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
105 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
106 -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,
107 -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,
108 -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
109 -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0,
112 static const int8_t quant11[256]={
113 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
114 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
115 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
116 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
117 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
118 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
119 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
120 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
121 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
122 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
123 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
124 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
125 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
126 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4,
127 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
128 -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1,
131 static const uint8_t ver2_state[256]= {
132 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
133 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
134 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
135 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
136 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
137 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
138 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
139 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
140 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
141 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
142 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
143 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
144 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
145 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
146 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
147 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
150 typedef struct VlcState{
157 typedef struct PlaneContext{
158 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
159 int quant_table_index;
161 uint8_t (*state)[CONTEXT_SIZE];
163 uint8_t interlace_bit_state[2];
166 #define MAX_SLICES 256
168 typedef struct FFV1Context{
170 AVCodecContext *avctx;
174 uint64_t rc_stat[256][2];
175 uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
179 int chroma_h_shift, chroma_v_shift;
185 AVFrame last_picture;
187 int ac; ///< 1=range coder <-> 0=golomb rice
188 int ac_byte_count; ///< number of bytes used for AC coding
189 PlaneContext plane[MAX_PLANES];
190 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
191 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
192 int context_count[MAX_QUANT_TABLES];
193 uint8_t state_transition[256];
194 uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
197 int16_t *sample_buffer;
204 int quant_table_count;
208 struct FFV1Context *slice_context[MAX_SLICES];
216 int bits_per_raw_sample;
219 static av_always_inline int fold(int diff, int bits){
231 static inline int predict(int16_t *src, int16_t *last)
233 const int LT= last[-1];
234 const int T= last[ 0];
235 const int L = src[-1];
237 return mid_pred(L, L + T - LT, T);
240 static inline int get_context(PlaneContext *p, int16_t *src,
241 int16_t *last, int16_t *last2)
243 const int LT= last[-1];
244 const int T= last[ 0];
245 const int RT= last[ 1];
246 const int L = src[-1];
248 if(p->quant_table[3][127]){
249 const int TT= last2[0];
250 const int LL= src[-2];
251 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF]
252 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xFF];
254 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF];
257 static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){
262 l2tab[i]= log2(i/256.0);
264 for(i=0; i<256; i++){
265 double best_len[256];
271 for(j=FFMAX(i-10,1); j<FFMIN(i+11,256); j++){
275 for(k=0; k<256; k++){
276 double newocc[256]={0};
277 for(m=0; m<256; m++){
279 len -=occ[m]*( p *l2tab[ m]
280 + (1-p)*l2tab[256-m]);
283 if(len < best_len[k]){
287 for(m=0; m<256; m++){
289 newocc[ one_state[ m]] += occ[m]* p ;
290 newocc[256-one_state[256-m]] += occ[m]*(1-p);
293 memcpy(occ, newocc, sizeof(occ));
299 static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
302 #define put_rac(C,S,B) \
306 rc_stat2[(S)-state][B]++;\
312 const int a= FFABS(v);
313 const int e= av_log2(a);
314 put_rac(c, state+0, 0);
317 put_rac(c, state+1+i, 1); //1..10
319 put_rac(c, state+1+i, 0);
321 for(i=e-1; i>=0; i--){
322 put_rac(c, state+22+i, (a>>i)&1); //22..31
326 put_rac(c, state+11 + e, v < 0); //11..21
329 put_rac(c, state+1+FFMIN(i,9), 1); //1..10
331 put_rac(c, state+1+9, 0);
333 for(i=e-1; i>=0; i--){
334 put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
338 put_rac(c, state+11 + 10, v < 0); //11..21
341 put_rac(c, state+0, 1);
346 static av_noinline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
347 put_symbol_inline(c, state, v, is_signed, NULL, NULL);
350 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){
351 if(get_rac(c, state+0))
356 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
361 for(i=e-1; i>=0; i--){
362 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
365 e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
370 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
371 return get_symbol_inline(c, state, is_signed);
374 static inline void update_vlc_state(VlcState * const state, const int v){
375 int drift= state->drift;
376 int count= state->count;
377 state->error_sum += FFABS(v);
380 if(count == 128){ //FIXME variable
383 state->error_sum >>= 1;
388 if(state->bias > -128) state->bias--;
394 if(state->bias < 127) state->bias++;
405 static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){
407 //printf("final: %d ", v);
408 v = fold(v - state->bias, bits);
412 while(i < state->error_sum){ //FIXME optimize
420 if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1);
423 code= v ^ ((2*state->drift + state->count)>>31);
426 //printf("v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, state->bias, state->error_sum, state->drift, state->count, k);
427 set_sr_golomb(pb, code, k, 12, bits);
429 update_vlc_state(state, v);
432 static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){
437 while(i < state->error_sum){ //FIXME optimize
444 v= get_sr_golomb(gb, k, 12, bits);
445 //printf("v:%d bias:%d error:%d drift:%d count:%d k:%d", v, state->bias, state->error_sum, state->drift, state->count, k);
448 if(k==0 && 2*state->drift <= - state->count) v ^= (-1);
450 v ^= ((2*state->drift + state->count)>>31);
453 ret= fold(v + state->bias, bits);
455 update_vlc_state(state, v);
456 //printf("final: %d\n", ret);
460 #if CONFIG_FFV1_ENCODER
461 static av_always_inline int encode_line(FFV1Context *s, int w,
463 int plane_index, int bits)
465 PlaneContext * const p= &s->plane[plane_index];
466 RangeCoder * const c= &s->c;
468 int run_index= s->run_index;
473 if(c->bytestream_end - c->bytestream < w*20){
474 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
478 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
479 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
487 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
488 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
495 diff= fold(diff, bits);
498 if(s->flags & CODEC_FLAG_PASS1){
499 put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, s->rc_stat2[p->quant_table_index][context]);
501 put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
504 if(context == 0) run_mode=1;
509 while(run_count >= 1<<ff_log2_run[run_index]){
510 run_count -= 1<<ff_log2_run[run_index];
512 put_bits(&s->pb, 1, 1);
515 put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
516 if(run_index) run_index--;
525 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, (int)put_bits_count(&s->pb));
528 put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
532 while(run_count >= 1<<ff_log2_run[run_index]){
533 run_count -= 1<<ff_log2_run[run_index];
535 put_bits(&s->pb, 1, 1);
539 put_bits(&s->pb, 1, 1);
541 s->run_index= run_index;
546 static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
548 const int ring_size= s->avctx->context_model ? 3 : 2;
552 memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
555 for(i=0; i<ring_size; i++)
556 sample[i]= s->sample_buffer + (w+6)*((h+i-y)%ring_size) + 3;
558 sample[0][-1]= sample[1][0 ];
559 sample[1][ w]= sample[1][w-1];
561 if(s->bits_per_raw_sample<=8){
563 sample[0][x]= src[x + stride*y];
565 encode_line(s, w, sample, plane_index, 8);
567 if(s->packed_at_lsb){
569 sample[0][x]= ((uint16_t*)(src + stride*y))[x];
573 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample);
576 encode_line(s, w, sample, plane_index, s->bits_per_raw_sample);
578 //STOP_TIMER("encode line")}
582 static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
584 const int ring_size= s->avctx->context_model ? 3 : 2;
585 int16_t *sample[4][3];
588 memset(s->sample_buffer, 0, ring_size*4*(w+6)*sizeof(*s->sample_buffer));
591 for(i=0; i<ring_size; i++)
593 sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
596 unsigned v= src[x + stride*y];
608 // assert(g>=0 && b>=0 && r>=0);
609 // assert(g<256 && b<512 && r<512);
615 for(p=0; p<3 + s->transparency; p++){
616 sample[p][0][-1]= sample[p][1][0 ];
617 sample[p][1][ w]= sample[p][1][w-1];
618 encode_line(s, w, sample[p], (p+1)/2, 9);
623 static void write_quant_table(RangeCoder *c, int16_t *quant_table){
626 uint8_t state[CONTEXT_SIZE];
627 memset(state, 128, sizeof(state));
629 for(i=1; i<128 ; i++){
630 if(quant_table[i] != quant_table[i-1]){
631 put_symbol(c, state, i-last-1, 0);
635 put_symbol(c, state, i-last-1, 0);
638 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
641 write_quant_table(c, quant_table[i]);
644 static void write_header(FFV1Context *f){
645 uint8_t state[CONTEXT_SIZE];
647 RangeCoder * const c= &f->slice_context[0]->c;
649 memset(state, 128, sizeof(state));
652 put_symbol(c, state, f->version, 0);
653 put_symbol(c, state, f->ac, 0);
655 for(i=1; i<256; i++){
656 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
659 put_symbol(c, state, f->colorspace, 0); //YUV cs type
661 put_symbol(c, state, f->bits_per_raw_sample, 0);
662 put_rac(c, state, f->chroma_planes);
663 put_symbol(c, state, f->chroma_h_shift, 0);
664 put_symbol(c, state, f->chroma_v_shift, 0);
665 put_rac(c, state, f->transparency);
667 write_quant_tables(c, f->quant_table);
668 }else if(f->version < 3){
669 put_symbol(c, state, f->slice_count, 0);
670 for(i=0; i<f->slice_count; i++){
671 FFV1Context *fs= f->slice_context[i];
672 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
673 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
674 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
675 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
676 for(j=0; j<f->plane_count; j++){
677 put_symbol(c, state, f->plane[j].quant_table_index, 0);
678 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
683 #endif /* CONFIG_FFV1_ENCODER */
685 static av_cold int common_init(AVCodecContext *avctx){
686 FFV1Context *s = avctx->priv_data;
689 s->flags= avctx->flags;
691 avcodec_get_frame_defaults(&s->picture);
693 ff_dsputil_init(&s->dsp, avctx);
695 s->width = avctx->width;
696 s->height= avctx->height;
698 assert(s->width && s->height);
707 static int init_slice_state(FFV1Context *f, FFV1Context *fs){
710 fs->plane_count= f->plane_count;
711 fs->transparency= f->transparency;
712 for(j=0; j<f->plane_count; j++){
713 PlaneContext * const p= &fs->plane[j];
716 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
718 return AVERROR(ENOMEM);
720 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState));
722 return AVERROR(ENOMEM);
727 //FIXME only redo if state_transition changed
728 for(j=1; j<256; j++){
729 fs->c.one_state [ j]= f->state_transition[j];
730 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
737 static int init_slices_state(FFV1Context *f){
739 for(i=0; i<f->slice_count; i++){
740 FFV1Context *fs= f->slice_context[i];
741 if(init_slice_state(f, fs) < 0)
747 static av_cold int init_slice_contexts(FFV1Context *f){
750 f->slice_count= f->num_h_slices * f->num_v_slices;
752 for(i=0; i<f->slice_count; i++){
753 FFV1Context *fs= av_mallocz(sizeof(*fs));
754 int sx= i % f->num_h_slices;
755 int sy= i / f->num_h_slices;
756 int sxs= f->avctx->width * sx / f->num_h_slices;
757 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
758 int sys= f->avctx->height* sy / f->num_v_slices;
759 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
760 f->slice_context[i]= fs;
761 memcpy(fs, f, sizeof(*fs));
762 memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
764 fs->slice_width = sxe - sxs;
765 fs->slice_height= sye - sys;
769 fs->sample_buffer = av_malloc(3*4 * (fs->width+6) * sizeof(*fs->sample_buffer));
770 if (!fs->sample_buffer)
771 return AVERROR(ENOMEM);
776 static int allocate_initial_states(FFV1Context *f){
779 for(i=0; i<f->quant_table_count; i++){
780 f->initial_states[i]= av_malloc(f->context_count[i]*sizeof(*f->initial_states[i]));
781 if(!f->initial_states[i])
782 return AVERROR(ENOMEM);
783 memset(f->initial_states[i], 128, f->context_count[i]*sizeof(*f->initial_states[i]));
788 #if CONFIG_FFV1_ENCODER
789 static int write_extra_header(FFV1Context *f){
790 RangeCoder * const c= &f->c;
791 uint8_t state[CONTEXT_SIZE];
793 uint8_t state2[32][CONTEXT_SIZE];
796 memset(state2, 128, sizeof(state2));
797 memset(state, 128, sizeof(state));
799 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000 + (11*11*5*5*5+11*11*11)*32);
800 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
801 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
803 put_symbol(c, state, f->version, 0);
805 put_symbol(c, state, f->minor_version, 0);
806 put_symbol(c, state, f->ac, 0);
808 for(i=1; i<256; i++){
809 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
812 put_symbol(c, state, f->colorspace, 0); //YUV cs type
813 put_symbol(c, state, f->bits_per_raw_sample, 0);
814 put_rac(c, state, f->chroma_planes);
815 put_symbol(c, state, f->chroma_h_shift, 0);
816 put_symbol(c, state, f->chroma_v_shift, 0);
817 put_rac(c, state, f->transparency);
818 put_symbol(c, state, f->num_h_slices-1, 0);
819 put_symbol(c, state, f->num_v_slices-1, 0);
821 put_symbol(c, state, f->quant_table_count, 0);
822 for(i=0; i<f->quant_table_count; i++)
823 write_quant_tables(c, f->quant_tables[i]);
825 for(i=0; i<f->quant_table_count; i++){
826 for(j=0; j<f->context_count[i]*CONTEXT_SIZE; j++)
827 if(f->initial_states[i] && f->initial_states[i][0][j] != 128)
829 if(j<f->context_count[i]*CONTEXT_SIZE){
830 put_rac(c, state, 1);
831 for(j=0; j<f->context_count[i]; j++){
832 for(k=0; k<CONTEXT_SIZE; k++){
833 int pred= j ? f->initial_states[i][j-1][k] : 128;
834 put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k]-pred), 1);
838 put_rac(c, state, 0);
843 put_symbol(c, state, f->ec, 0);
846 f->avctx->extradata_size= ff_rac_terminate(c);
847 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
848 AV_WL32(f->avctx->extradata + f->avctx->extradata_size, v);
849 f->avctx->extradata_size += 4;
854 static int sort_stt(FFV1Context *s, uint8_t stt[256]){
855 int i,i2,changed,print=0;
859 for(i=12; i<244; i++){
860 for(i2=i+1; i2<245 && i2<i+4; i2++){
861 #define COST(old, new) \
862 s->rc_stat[old][0]*-log2((256-(new))/256.0)\
863 +s->rc_stat[old][1]*-log2( (new) /256.0)
865 #define COST2(old, new) \
867 +COST(256-(old), 256-(new))
869 double size0= COST2(i, i ) + COST2(i2, i2);
870 double sizeX= COST2(i, i2) + COST2(i2, i );
871 if(sizeX < size0 && i!=128 && i2!=128){
873 FFSWAP(int, stt[ i], stt[ i2]);
874 FFSWAP(int, s->rc_stat[i ][0],s->rc_stat[ i2][0]);
875 FFSWAP(int, s->rc_stat[i ][1],s->rc_stat[ i2][1]);
877 FFSWAP(int, stt[256-i], stt[256-i2]);
878 FFSWAP(int, s->rc_stat[256-i][0],s->rc_stat[256-i2][0]);
879 FFSWAP(int, s->rc_stat[256-i][1],s->rc_stat[256-i2][1]);
881 for(j=1; j<256; j++){
882 if (stt[j] == i ) stt[j] = i2;
883 else if(stt[j] == i2) stt[j] = i ;
885 if (stt[256-j] == 256-i ) stt[256-j] = 256-i2;
886 else if(stt[256-j] == 256-i2) stt[256-j] = 256-i ;
897 static av_cold int encode_init(AVCodecContext *avctx)
899 FFV1Context *s = avctx->priv_data;
906 if((avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) || avctx->slices>1)
907 s->version = FFMAX(s->version, 2);
909 if(avctx->level == 3){
914 s->ec = (s->version >= 3);
917 if(s->version >= 2 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
918 av_log(avctx, AV_LOG_ERROR, "Version 2 needed for requested features but version 2 is experimental and not enabled\n");
919 return AVERROR_INVALIDDATA;
922 s->ac= avctx->coder_type > 0 ? 2 : 0;
925 switch(avctx->pix_fmt){
926 case PIX_FMT_YUV444P9:
927 case PIX_FMT_YUV422P9:
928 case PIX_FMT_YUV420P9:
929 if (!avctx->bits_per_raw_sample)
930 s->bits_per_raw_sample = 9;
931 case PIX_FMT_YUV444P10:
932 case PIX_FMT_YUV420P10:
933 case PIX_FMT_YUV422P10:
934 s->packed_at_lsb = 1;
935 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
936 s->bits_per_raw_sample = 10;
938 case PIX_FMT_YUV444P16:
939 case PIX_FMT_YUV422P16:
940 case PIX_FMT_YUV420P16:
941 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) {
942 s->bits_per_raw_sample = 16;
943 } else if (!s->bits_per_raw_sample){
944 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
946 if(s->bits_per_raw_sample <=8){
947 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
948 return AVERROR_INVALIDDATA;
950 if(!s->ac && avctx->coder_type == -1) {
951 av_log(avctx, AV_LOG_INFO, "bits_per_raw_sample > 8, forcing coder 1\n");
955 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
956 return AVERROR_INVALIDDATA;
958 s->version= FFMAX(s->version, 1);
960 case PIX_FMT_YUV444P:
961 case PIX_FMT_YUV440P:
962 case PIX_FMT_YUV422P:
963 case PIX_FMT_YUV420P:
964 case PIX_FMT_YUV411P:
965 case PIX_FMT_YUV410P:
966 s->chroma_planes= av_pix_fmt_descriptors[avctx->pix_fmt].nb_components < 3 ? 0 : 1;
969 case PIX_FMT_YUVA444P:
970 case PIX_FMT_YUVA422P:
971 case PIX_FMT_YUVA420P:
984 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
985 return AVERROR_INVALIDDATA;
987 if (s->transparency) {
988 av_log(avctx, AV_LOG_WARNING, "Storing alpha plane, this will require a recent FFV1 decoder to playback!\n");
990 if (avctx->context_model > 1U) {
991 av_log(avctx, AV_LOG_ERROR, "Invalid context model %d, valid values are 0 and 1\n", avctx->context_model);
992 return AVERROR(EINVAL);
997 s->state_transition[i]=ver2_state[i];
999 for(i=0; i<256; i++){
1000 s->quant_table_count=2;
1001 if(s->bits_per_raw_sample <=8){
1002 s->quant_tables[0][0][i]= quant11[i];
1003 s->quant_tables[0][1][i]= 11*quant11[i];
1004 s->quant_tables[0][2][i]= 11*11*quant11[i];
1005 s->quant_tables[1][0][i]= quant11[i];
1006 s->quant_tables[1][1][i]= 11*quant11[i];
1007 s->quant_tables[1][2][i]= 11*11*quant5 [i];
1008 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
1009 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
1011 s->quant_tables[0][0][i]= quant9_10bit[i];
1012 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
1013 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
1014 s->quant_tables[1][0][i]= quant9_10bit[i];
1015 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
1016 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
1017 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
1018 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
1021 s->context_count[0]= (11*11*11+1)/2;
1022 s->context_count[1]= (11*11*5*5*5+1)/2;
1023 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table));
1025 for(i=0; i<s->plane_count; i++){
1026 PlaneContext * const p= &s->plane[i];
1028 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
1029 p->quant_table_index= avctx->context_model;
1030 p->context_count= s->context_count[p->quant_table_index];
1033 if(allocate_initial_states(s) < 0)
1034 return AVERROR(ENOMEM);
1036 avctx->coded_frame= &s->picture;
1037 if(!s->transparency)
1039 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
1041 s->picture_number=0;
1043 if(avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
1044 for(i=0; i<s->quant_table_count; i++){
1045 s->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*s->rc_stat2[i]));
1047 return AVERROR(ENOMEM);
1050 if(avctx->stats_in){
1051 char *p= avctx->stats_in;
1052 uint8_t best_state[256][256];
1056 av_assert0(s->version>=2);
1059 for(j=0; j<256; j++){
1061 s->rc_stat[j][i]= strtol(p, &next, 0);
1063 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d [%s]\n", j,i,p);
1069 for(i=0; i<s->quant_table_count; i++){
1070 for(j=0; j<s->context_count[i]; j++){
1071 for(k=0; k<32; k++){
1073 s->rc_stat2[i][j][k][m]= strtol(p, &next, 0);
1075 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d %d %d [%s]\n", i,j,k,m,p);
1076 return AVERROR_INVALIDDATA;
1083 gob_count= strtol(p, &next, 0);
1084 if(next==p || gob_count <0){
1085 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
1086 return AVERROR_INVALIDDATA;
1089 while(*p=='\n' || *p==' ') p++;
1092 sort_stt(s, s->state_transition);
1094 find_best_state(best_state, s->state_transition);
1096 for(i=0; i<s->quant_table_count; i++){
1097 for(j=0; j<s->context_count[i]; j++){
1098 for(k=0; k<32; k++){
1100 if(s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]){
1101 p=256.0*s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]);
1103 s->initial_states[i][j][k]= best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1])/gob_count, 0, 255)];
1110 for(s->num_v_slices=2; s->num_v_slices<9; s->num_v_slices++){
1111 for(s->num_h_slices=s->num_v_slices; s->num_h_slices<2*s->num_v_slices; s->num_h_slices++){
1112 if(avctx->slices == s->num_h_slices * s->num_v_slices && avctx->slices <= 64 || !avctx->slices)
1116 av_log(avctx, AV_LOG_ERROR, "Unsupported number %d of slices requested, please specify a supported number with -slices (ex:4,6,9,12,16, ...)\n", avctx->slices);
1119 write_extra_header(s);
1122 if(init_slice_contexts(s) < 0)
1124 if(init_slices_state(s) < 0)
1127 #define STATS_OUT_SIZE 1024*1024*6
1128 if(avctx->flags & CODEC_FLAG_PASS1){
1129 avctx->stats_out= av_mallocz(STATS_OUT_SIZE);
1130 for(i=0; i<s->quant_table_count; i++){
1131 for(j=0; j<s->slice_count; j++){
1132 FFV1Context *sf= s->slice_context[j];
1133 av_assert0(!sf->rc_stat2[i]);
1134 sf->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*sf->rc_stat2[i]));
1135 if(!sf->rc_stat2[i])
1136 return AVERROR(ENOMEM);
1143 #endif /* CONFIG_FFV1_ENCODER */
1146 static void clear_slice_state(FFV1Context *f, FFV1Context *fs){
1149 for(i=0; i<f->plane_count; i++){
1150 PlaneContext *p= &fs->plane[i];
1152 p->interlace_bit_state[0]= 128;
1153 p->interlace_bit_state[1]= 128;
1156 if(f->initial_states[p->quant_table_index]){
1157 memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE*p->context_count);
1159 memset(p->state, 128, CONTEXT_SIZE*p->context_count);
1161 for(j=0; j<p->context_count; j++){
1162 p->vlc_state[j].drift= 0;
1163 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
1164 p->vlc_state[j].bias= 0;
1165 p->vlc_state[j].count= 1;
1171 #if CONFIG_FFV1_ENCODER
1173 static void encode_slice_header(FFV1Context *f, FFV1Context *fs){
1174 RangeCoder *c = &fs->c;
1175 uint8_t state[CONTEXT_SIZE];
1177 memset(state, 128, sizeof(state));
1179 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
1180 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
1181 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
1182 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
1183 for(j=0; j<f->plane_count; j++){
1184 put_symbol(c, state, f->plane[j].quant_table_index, 0);
1185 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
1187 if(!f->picture.interlaced_frame) put_symbol(c, state, 3, 0);
1188 else put_symbol(c, state, 1 + !f->picture.top_field_first, 0);
1189 put_symbol(c, state, f->picture.sample_aspect_ratio.num, 0);
1190 put_symbol(c, state, f->picture.sample_aspect_ratio.den, 0);
1193 static int encode_slice(AVCodecContext *c, void *arg){
1194 FFV1Context *fs= *(void**)arg;
1195 FFV1Context *f= fs->avctx->priv_data;
1196 int width = fs->slice_width;
1197 int height= fs->slice_height;
1200 AVFrame * const p= &f->picture;
1201 const int ps= (f->bits_per_raw_sample>8)+1;
1204 clear_slice_state(f, fs);
1206 encode_slice_header(f, fs);
1209 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? ff_rac_terminate(&fs->c) : 0;
1210 init_put_bits(&fs->pb, fs->c.bytestream_start + fs->ac_byte_count, fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count);
1213 if(f->colorspace==0){
1214 const int chroma_width = -((-width )>>f->chroma_h_shift);
1215 const int chroma_height= -((-height)>>f->chroma_v_shift);
1216 const int cx= x>>f->chroma_h_shift;
1217 const int cy= y>>f->chroma_v_shift;
1219 encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1221 if (f->chroma_planes){
1222 encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1223 encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1225 if (fs->transparency)
1226 encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1228 encode_rgb_frame(fs, (uint32_t*)(p->data[0]) + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1235 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1236 const AVFrame *pict, int *got_packet)
1238 FFV1Context *f = avctx->priv_data;
1239 RangeCoder * const c= &f->slice_context[0]->c;
1240 AVFrame * const p= &f->picture;
1242 uint8_t keystate=128;
1246 if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*((8*2+1+1)*4)/8
1247 + FF_MIN_BUFFER_SIZE)) < 0)
1250 ff_init_range_encoder(c, pkt->data, pkt->size);
1251 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1254 p->pict_type= AV_PICTURE_TYPE_I;
1256 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
1257 put_rac(c, &keystate, 1);
1262 put_rac(c, &keystate, 0);
1268 for(i=1; i<256; i++){
1269 c->one_state[i]= f->state_transition[i];
1270 c->zero_state[256-i]= 256-c->one_state[i];
1274 for(i=1; i<f->slice_count; i++){
1275 FFV1Context *fs= f->slice_context[i];
1276 uint8_t *start = pkt->data + (pkt->size-used_count)*i/f->slice_count;
1277 int len = pkt->size/f->slice_count;
1279 ff_init_range_encoder(&fs->c, start, len);
1281 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1284 for(i=0; i<f->slice_count; i++){
1285 FFV1Context *fs= f->slice_context[i];
1290 put_rac(&fs->c, &state, 0);
1291 bytes= ff_rac_terminate(&fs->c);
1293 flush_put_bits(&fs->pb); //nicer padding FIXME
1294 bytes= fs->ac_byte_count + (put_bits_count(&fs->pb)+7)/8;
1296 if(i>0 || f->version>2){
1297 av_assert0(bytes < pkt->size/f->slice_count);
1298 memmove(buf_p, fs->c.bytestream_start, bytes);
1299 av_assert0(bytes < (1<<24));
1300 AV_WB24(buf_p+bytes, bytes);
1306 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, bytes);
1307 AV_WL32(buf_p + bytes, v); bytes += 4;
1312 if((avctx->flags&CODEC_FLAG_PASS1) && (f->picture_number&31)==0){
1314 char *p= avctx->stats_out;
1315 char *end= p + STATS_OUT_SIZE;
1317 memset(f->rc_stat, 0, sizeof(f->rc_stat));
1318 for(i=0; i<f->quant_table_count; i++)
1319 memset(f->rc_stat2[i], 0, f->context_count[i]*sizeof(*f->rc_stat2[i]));
1321 for(j=0; j<f->slice_count; j++){
1322 FFV1Context *fs= f->slice_context[j];
1323 for(i=0; i<256; i++){
1324 f->rc_stat[i][0] += fs->rc_stat[i][0];
1325 f->rc_stat[i][1] += fs->rc_stat[i][1];
1327 for(i=0; i<f->quant_table_count; i++){
1328 for(k=0; k<f->context_count[i]; k++){
1329 for(m=0; m<32; m++){
1330 f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
1331 f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
1337 for(j=0; j<256; j++){
1338 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat[j][0], f->rc_stat[j][1]);
1341 snprintf(p, end-p, "\n");
1343 for(i=0; i<f->quant_table_count; i++){
1344 for(j=0; j<f->context_count[i]; j++){
1345 for(m=0; m<32; m++){
1346 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
1351 snprintf(p, end-p, "%d\n", f->gob_count);
1352 } else if(avctx->flags&CODEC_FLAG_PASS1)
1353 avctx->stats_out[0] = '\0';
1355 f->picture_number++;
1356 pkt->size = buf_p - pkt->data;
1357 pkt->flags |= AV_PKT_FLAG_KEY*p->key_frame;
1362 #endif /* CONFIG_FFV1_ENCODER */
1364 static av_cold int common_end(AVCodecContext *avctx){
1365 FFV1Context *s = avctx->priv_data;
1368 if (avctx->codec->decode && s->picture.data[0])
1369 avctx->release_buffer(avctx, &s->picture);
1370 if (avctx->codec->decode && s->last_picture.data[0])
1371 avctx->release_buffer(avctx, &s->last_picture);
1373 for(j=0; j<s->slice_count; j++){
1374 FFV1Context *fs= s->slice_context[j];
1375 for(i=0; i<s->plane_count; i++){
1376 PlaneContext *p= &fs->plane[i];
1378 av_freep(&p->state);
1379 av_freep(&p->vlc_state);
1381 av_freep(&fs->sample_buffer);
1384 av_freep(&avctx->stats_out);
1385 for(j=0; j<s->quant_table_count; j++){
1386 av_freep(&s->initial_states[j]);
1387 for(i=0; i<s->slice_count; i++){
1388 FFV1Context *sf= s->slice_context[i];
1389 av_freep(&sf->rc_stat2[j]);
1391 av_freep(&s->rc_stat2[j]);
1394 for(i=0; i<s->slice_count; i++){
1395 av_freep(&s->slice_context[i]);
1401 static av_always_inline void decode_line(FFV1Context *s, int w,
1403 int plane_index, int bits)
1405 PlaneContext * const p= &s->plane[plane_index];
1406 RangeCoder * const c= &s->c;
1410 int run_index= s->run_index;
1413 int diff, context, sign;
1415 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
1422 av_assert2(context < p->context_count);
1425 diff= get_symbol_inline(c, p->state[context], 1);
1427 if(context == 0 && run_mode==0) run_mode=1;
1430 if(run_count==0 && run_mode==1){
1431 if(get_bits1(&s->gb)){
1432 run_count = 1<<ff_log2_run[run_index];
1433 if(x + run_count <= w) run_index++;
1435 if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]);
1437 if(run_index) run_index--;
1445 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1450 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1452 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, get_bits_count(&s->gb));
1455 if(sign) diff= -diff;
1457 sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1);
1459 s->run_index= run_index;
1462 static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
1465 sample[0]=s->sample_buffer +3;
1466 sample[1]=s->sample_buffer+w+6+3;
1470 memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
1473 int16_t *temp = sample[0]; //FIXME try a normal buffer
1475 sample[0]= sample[1];
1478 sample[1][-1]= sample[0][0 ];
1479 sample[0][ w]= sample[0][w-1];
1482 if(s->avctx->bits_per_raw_sample <= 8){
1483 decode_line(s, w, sample, plane_index, 8);
1485 src[x + stride*y]= sample[1][x];
1488 decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
1489 if(s->packed_at_lsb){
1491 ((uint16_t*)(src + stride*y))[x]= sample[1][x];
1495 ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
1499 //STOP_TIMER("decode-line")}
1503 static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
1505 int16_t *sample[4][2];
1507 sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
1508 sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
1513 memset(s->sample_buffer, 0, 8*(w+6)*sizeof(*s->sample_buffer));
1516 for(p=0; p<3 + s->transparency; p++){
1517 int16_t *temp = sample[p][0]; //FIXME try a normal buffer
1519 sample[p][0]= sample[p][1];
1522 sample[p][1][-1]= sample[p][0][0 ];
1523 sample[p][0][ w]= sample[p][0][w-1];
1524 decode_line(s, w, sample[p], (p+1)/2, 9);
1527 int g= sample[0][1][x];
1528 int b= sample[1][1][x];
1529 int r= sample[2][1][x];
1530 int a= sample[3][1][x];
1532 // assert(g>=0 && b>=0 && r>=0);
1533 // assert(g<256 && b<512 && r<512);
1541 src[x + stride*y]= b + (g<<8) + (r<<16) + (a<<24);
1546 static int decode_slice_header(FFV1Context *f, FFV1Context *fs){
1547 RangeCoder *c = &fs->c;
1548 uint8_t state[CONTEXT_SIZE];
1549 unsigned ps, i, context_count;
1550 memset(state, 128, sizeof(state));
1552 av_assert0(f->version > 2);
1554 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1555 fs->slice_y = get_symbol(c, state, 0) *f->height;
1556 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1557 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1559 fs->slice_x /= f->num_h_slices;
1560 fs->slice_y /= f->num_v_slices;
1561 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1562 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1563 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1565 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1566 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1569 for(i=0; i<f->plane_count; i++){
1570 PlaneContext * const p= &fs->plane[i];
1571 int idx=get_symbol(c, state, 0);
1572 if(idx > (unsigned)f->quant_table_count){
1573 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1576 p->quant_table_index= idx;
1577 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1578 context_count= f->context_count[idx];
1580 if(p->context_count < context_count){
1581 av_freep(&p->state);
1582 av_freep(&p->vlc_state);
1584 p->context_count= context_count;
1587 ps = get_symbol(c, state, 0);
1589 f->picture.interlaced_frame = 1;
1590 f->picture.top_field_first = 1;
1592 f->picture.interlaced_frame = 1;
1593 f->picture.top_field_first = 0;
1595 f->picture.interlaced_frame = 0;
1597 f->picture.sample_aspect_ratio.num = get_symbol(c, state, 0);
1598 f->picture.sample_aspect_ratio.den = get_symbol(c, state, 0);
1603 static int decode_slice(AVCodecContext *c, void *arg){
1604 FFV1Context *fs= *(void**)arg;
1605 FFV1Context *f= fs->avctx->priv_data;
1606 int width, height, x, y;
1607 const int ps= (c->bits_per_raw_sample>8)+1;
1608 AVFrame * const p= &f->picture;
1611 if(init_slice_state(f, fs) < 0)
1612 return AVERROR(ENOMEM);
1613 if(decode_slice_header(f, fs) < 0) {
1614 fs->slice_damaged = 1;
1615 return AVERROR_INVALIDDATA;
1618 if(init_slice_state(f, fs) < 0)
1619 return AVERROR(ENOMEM);
1620 if(f->picture.key_frame)
1621 clear_slice_state(f, fs);
1622 width = fs->slice_width;
1623 height= fs->slice_height;
1628 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? fs->c.bytestream - fs->c.bytestream_start - 1 : 0;
1629 init_get_bits(&fs->gb,
1630 fs->c.bytestream_start + fs->ac_byte_count,
1631 (fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count) * 8);
1634 av_assert1(width && height);
1635 if(f->colorspace==0){
1636 const int chroma_width = -((-width )>>f->chroma_h_shift);
1637 const int chroma_height= -((-height)>>f->chroma_v_shift);
1638 const int cx= x>>f->chroma_h_shift;
1639 const int cy= y>>f->chroma_v_shift;
1640 decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1642 if (f->chroma_planes){
1643 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1644 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1646 if (fs->transparency)
1647 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1649 decode_rgb_frame(fs, (uint32_t*)p->data[0] + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1651 if(fs->ac && f->version > 2) {
1652 int v = fs->c.bytestream_end - fs->c.bytestream - 3 - 5*f->ec;
1653 if(v != -1 && v!= 0) {
1654 av_log(f->avctx, AV_LOG_ERROR, "bytestream end mismatching by %d\n", v);
1655 fs->slice_damaged = 1;
1664 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
1667 uint8_t state[CONTEXT_SIZE];
1669 memset(state, 128, sizeof(state));
1671 for(v=0; i<128 ; v++){
1672 unsigned len= get_symbol(c, state, 0) + 1;
1674 if(len > 128 - i) return -1;
1677 quant_table[i] = scale*v;
1680 //if(i%16==0) printf("\n");
1684 for(i=1; i<128; i++){
1685 quant_table[256-i]= -quant_table[i];
1687 quant_table[128]= -quant_table[127];
1692 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
1694 int context_count=1;
1697 context_count*= read_quant_table(c, quant_table[i], context_count);
1698 if(context_count > 32768U){
1702 return (context_count+1)/2;
1705 static int read_extra_header(FFV1Context *f){
1706 RangeCoder * const c= &f->c;
1707 uint8_t state[CONTEXT_SIZE];
1709 uint8_t state2[32][CONTEXT_SIZE];
1711 memset(state2, 128, sizeof(state2));
1712 memset(state, 128, sizeof(state));
1714 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1715 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1717 f->version= get_symbol(c, state, 0);
1718 if(f->version > 2) {
1719 c->bytestream_end -= 4;
1720 f->minor_version= get_symbol(c, state, 0);
1722 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1724 for(i=1; i<256; i++){
1725 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1728 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1729 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1730 f->chroma_planes= get_rac(c, state);
1731 f->chroma_h_shift= get_symbol(c, state, 0);
1732 f->chroma_v_shift= get_symbol(c, state, 0);
1733 f->transparency= get_rac(c, state);
1734 f->plane_count= 2 + f->transparency;
1735 f->num_h_slices= 1 + get_symbol(c, state, 0);
1736 f->num_v_slices= 1 + get_symbol(c, state, 0);
1737 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->height){
1738 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1742 f->quant_table_count= get_symbol(c, state, 0);
1743 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1745 for(i=0; i<f->quant_table_count; i++){
1746 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1747 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1752 if(allocate_initial_states(f) < 0)
1753 return AVERROR(ENOMEM);
1755 for(i=0; i<f->quant_table_count; i++){
1756 if(get_rac(c, state)){
1757 for(j=0; j<f->context_count[i]; j++){
1758 for(k=0; k<CONTEXT_SIZE; k++){
1759 int pred= j ? f->initial_states[i][j-1][k] : 128;
1760 f->initial_states[i][j][k]= (pred+get_symbol(c, state2[k], 1))&0xFF;
1767 f->ec = get_symbol(c, state, 0);
1772 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
1774 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v);
1775 return AVERROR_INVALIDDATA;
1782 static int read_header(FFV1Context *f){
1783 uint8_t state[CONTEXT_SIZE];
1784 int i, j, context_count;
1785 RangeCoder * const c= &f->slice_context[0]->c;
1787 memset(state, 128, sizeof(state));
1790 unsigned v= get_symbol(c, state, 0);
1792 av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
1793 return AVERROR_INVALIDDATA;
1796 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1798 for(i=1; i<256; i++){
1799 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1802 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1804 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1805 f->chroma_planes= get_rac(c, state);
1806 f->chroma_h_shift= get_symbol(c, state, 0);
1807 f->chroma_v_shift= get_symbol(c, state, 0);
1808 f->transparency= get_rac(c, state);
1809 f->plane_count= 2 + f->transparency;
1812 if(f->colorspace==0){
1813 if(!f->transparency && !f->chroma_planes){
1814 if (f->avctx->bits_per_raw_sample<=8)
1815 f->avctx->pix_fmt= PIX_FMT_GRAY8;
1817 f->avctx->pix_fmt= PIX_FMT_GRAY16;
1818 }else if(f->avctx->bits_per_raw_sample<=8 && !f->transparency){
1819 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1820 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
1821 case 0x01: f->avctx->pix_fmt= PIX_FMT_YUV440P; break;
1822 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
1823 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
1824 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
1825 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
1827 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1830 }else if(f->avctx->bits_per_raw_sample<=8 && f->transparency){
1831 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1832 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUVA444P; break;
1833 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUVA422P; break;
1834 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUVA420P; break;
1836 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1839 }else if(f->avctx->bits_per_raw_sample==9) {
1841 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1842 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P9; break;
1843 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P9; break;
1844 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P9; break;
1846 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1849 }else if(f->avctx->bits_per_raw_sample==10) {
1851 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1852 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P10; break;
1853 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P10; break;
1854 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P10; break;
1856 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1860 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1861 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1862 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1863 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P16; break;
1865 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1869 }else if(f->colorspace==1){
1870 if(f->chroma_h_shift || f->chroma_v_shift){
1871 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1874 if(f->transparency) f->avctx->pix_fmt= PIX_FMT_RGB32;
1875 else f->avctx->pix_fmt= PIX_FMT_0RGB32;
1877 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1881 //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt);
1883 context_count= read_quant_tables(c, f->quant_table);
1884 if(context_count < 0){
1885 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1888 }else if(f->version < 3){
1889 f->slice_count= get_symbol(c, state, 0);
1891 const uint8_t *p= c->bytestream_end;
1892 for(f->slice_count = 0; f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start; f->slice_count++){
1893 int trailer = 3 + 5*!!f->ec;
1894 int size = AV_RB24(p-trailer);
1895 if(size + trailer > p - c->bytestream_start)
1897 p -= size + trailer;
1900 if(f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0){
1901 av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
1905 for(j=0; j<f->slice_count; j++){
1906 FFV1Context *fs= f->slice_context[j];
1908 fs->packed_at_lsb= f->packed_at_lsb;
1910 fs->slice_damaged = 0;
1912 if(f->version == 2){
1913 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1914 fs->slice_y = get_symbol(c, state, 0) *f->height;
1915 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1916 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1918 fs->slice_x /= f->num_h_slices;
1919 fs->slice_y /= f->num_v_slices;
1920 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1921 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1922 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1924 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1925 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1929 for(i=0; i<f->plane_count; i++){
1930 PlaneContext * const p= &fs->plane[i];
1932 if(f->version == 2){
1933 int idx=get_symbol(c, state, 0);
1934 if(idx > (unsigned)f->quant_table_count){
1935 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1938 p->quant_table_index= idx;
1939 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1940 context_count= f->context_count[idx];
1942 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
1945 if(f->version <= 2){
1946 if(p->context_count < context_count){
1947 av_freep(&p->state);
1948 av_freep(&p->vlc_state);
1950 p->context_count= context_count;
1957 static av_cold int decode_init(AVCodecContext *avctx)
1959 FFV1Context *f = avctx->priv_data;
1963 if(avctx->extradata && read_extra_header(f) < 0)
1966 if(init_slice_contexts(f) < 0)
1972 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
1973 const uint8_t *buf = avpkt->data;
1974 int buf_size = avpkt->size;
1975 FFV1Context *f = avctx->priv_data;
1976 RangeCoder * const c= &f->slice_context[0]->c;
1977 AVFrame * const p= &f->picture;
1979 uint8_t keystate= 128;
1980 const uint8_t *buf_p;
1982 AVFrame *picture = data;
1984 /* release previously stored data */
1986 avctx->release_buffer(avctx, p);
1988 ff_init_range_decoder(c, buf, buf_size);
1989 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1992 p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
1993 if(get_rac(c, &keystate)){
1995 f->key_frame_ok = 0;
1996 if(read_header(f) < 0)
1998 f->key_frame_ok = 1;
2000 if (!f->key_frame_ok) {
2001 av_log(avctx, AV_LOG_ERROR, "Cant decode non keyframe without valid keyframe\n");
2002 return AVERROR_INVALIDDATA;
2007 p->reference= 3; //for error concealment
2008 if(avctx->get_buffer(avctx, p) < 0){
2009 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
2013 if(avctx->debug&FF_DEBUG_PICT_INFO)
2014 av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d\n",
2015 f->version, p->key_frame, f->ac, f->ec, f->slice_count);
2017 buf_p= buf + buf_size;
2018 for(i=f->slice_count-1; i>=0; i--){
2019 FFV1Context *fs= f->slice_context[i];
2020 int trailer = 3 + 5*!!f->ec;
2023 if(i || f->version>2) v = AV_RB24(buf_p-trailer)+trailer;
2024 else v = buf_p - c->bytestream_start;
2025 if(buf_p - c->bytestream_start < v){
2026 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
2032 unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, v);
2034 int64_t ts = avpkt->pts != AV_NOPTS_VALUE ? avpkt->pts : avpkt->dts;
2035 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!", crc);
2036 if(ts != AV_NOPTS_VALUE && avctx->pkt_timebase.num) {
2037 av_log(f->avctx, AV_LOG_ERROR, "at %f seconds\n",ts*av_q2d(avctx->pkt_timebase));
2038 } else if(ts != AV_NOPTS_VALUE) {
2039 av_log(f->avctx, AV_LOG_ERROR, "at %"PRId64"\n", ts);
2041 av_log(f->avctx, AV_LOG_ERROR, "\n");
2043 fs->slice_damaged = 1;
2048 ff_init_range_decoder(&fs->c, buf_p, v);
2050 fs->c.bytestream_end = (uint8_t *)(buf_p + v);
2053 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
2055 for(i=f->slice_count-1; i>=0; i--){
2056 FFV1Context *fs= f->slice_context[i];
2058 if(fs->slice_damaged && f->last_picture.data[0]){
2059 uint8_t *dst[4], *src[4];
2061 int sh = (j==1 || j==2) ? f->chroma_h_shift : 0;
2062 int sv = (j==1 || j==2) ? f->chroma_v_shift : 0;
2063 dst[j] = f->picture .data[j] + f->picture .linesize[j]*
2064 (fs->slice_y>>sv) + (fs->slice_x>>sh);
2065 src[j] = f->last_picture.data[j] + f->last_picture.linesize[j]*
2066 (fs->slice_y>>sv) + (fs->slice_x>>sh);
2068 av_image_copy(dst, f->picture.linesize, (const uint8_t **)src, f->last_picture.linesize,
2069 avctx->pix_fmt, fs->slice_width, fs->slice_height);
2073 f->picture_number++;
2076 *data_size = sizeof(AVFrame);
2078 FFSWAP(AVFrame, f->picture, f->last_picture);
2083 AVCodec ff_ffv1_decoder = {
2085 .type = AVMEDIA_TYPE_VIDEO,
2086 .id = AV_CODEC_ID_FFV1,
2087 .priv_data_size = sizeof(FFV1Context),
2088 .init = decode_init,
2089 .close = common_end,
2090 .decode = decode_frame,
2091 .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
2092 CODEC_CAP_SLICE_THREADS,
2093 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2096 #if CONFIG_FFV1_ENCODER
2098 #define OFFSET(x) offsetof(FFV1Context, x)
2099 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2100 static const AVOption options[] = {
2101 { "slicecrc", "Protect slices with CRCs", OFFSET(ec), AV_OPT_TYPE_INT, {-1}, -1, 1, VE},
2105 static const AVClass class = {
2106 .class_name = "ffv1 encoder",
2107 .item_name = av_default_item_name,
2109 .version = LIBAVUTIL_VERSION_INT,
2112 static const AVCodecDefault ffv1_defaults[] = {
2117 AVCodec ff_ffv1_encoder = {
2119 .type = AVMEDIA_TYPE_VIDEO,
2120 .id = AV_CODEC_ID_FFV1,
2121 .priv_data_size = sizeof(FFV1Context),
2122 .init = encode_init,
2123 .encode2 = encode_frame,
2124 .close = common_end,
2125 .capabilities = CODEC_CAP_SLICE_THREADS,
2126 .defaults = ffv1_defaults,
2127 .pix_fmts = (const enum PixelFormat[]){
2128 PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUVA422P, PIX_FMT_YUV444P,
2129 PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P,
2130 PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16,
2131 PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV444P9, PIX_FMT_YUV422P9,
2132 PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_YUV444P10,
2133 PIX_FMT_GRAY16, PIX_FMT_GRAY8,
2136 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2137 .priv_class = &class,