2 * FFV1 codec for libavcodec
4 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * FF Video Codec 1 (a lossless codec)
33 #include "rangecoder.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/crc.h"
39 #include "libavutil/opt.h"
41 #ifdef __INTEL_COMPILER
47 #define CONTEXT_SIZE 32
49 #define MAX_QUANT_TABLES 8
50 #define MAX_CONTEXT_INPUTS 5
52 extern const uint8_t ff_log2_run[41];
54 static const int8_t quant5_10bit[256]={
55 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
56 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
57 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
58 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
59 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
60 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
61 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
62 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
63 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
64 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
65 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
66 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
67 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,
68 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
69 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
70 -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0,
73 static const int8_t quant5[256]={
74 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
75 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
76 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
77 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
78 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
79 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
80 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
83 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
84 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
85 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
86 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
87 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
88 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
89 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1,
92 static const int8_t quant9_10bit[256]={
93 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
95 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
96 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
97 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
98 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
100 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
101 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
102 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
103 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
104 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
105 -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,
106 -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,
107 -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
108 -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0,
111 static const int8_t quant11[256]={
112 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
113 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
114 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
115 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
116 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
117 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
118 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
119 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
120 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
121 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
122 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
123 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
124 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
125 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4,
126 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
127 -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1,
130 static const uint8_t ver2_state[256]= {
131 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
132 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
133 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
134 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
135 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
136 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
137 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
138 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
139 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
140 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
141 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
142 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
143 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
144 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
145 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
146 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
149 typedef struct VlcState{
156 typedef struct PlaneContext{
157 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
158 int quant_table_index;
160 uint8_t (*state)[CONTEXT_SIZE];
162 uint8_t interlace_bit_state[2];
165 #define MAX_SLICES 256
167 typedef struct FFV1Context{
169 AVCodecContext *avctx;
173 uint64_t rc_stat[256][2];
174 uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
178 int chroma_h_shift, chroma_v_shift;
185 int ac; ///< 1=range coder <-> 0=golomb rice
186 int ac_byte_count; ///< number of bytes used for AC coding
187 PlaneContext plane[MAX_PLANES];
188 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
189 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
190 int context_count[MAX_QUANT_TABLES];
191 uint8_t state_transition[256];
192 uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
195 int16_t *sample_buffer;
201 int quant_table_count;
205 struct FFV1Context *slice_context[MAX_SLICES];
213 int bits_per_raw_sample;
216 static av_always_inline int fold(int diff, int bits){
228 static inline int predict(int16_t *src, int16_t *last)
230 const int LT= last[-1];
231 const int T= last[ 0];
232 const int L = src[-1];
234 return mid_pred(L, L + T - LT, T);
237 static inline int get_context(PlaneContext *p, int16_t *src,
238 int16_t *last, int16_t *last2)
240 const int LT= last[-1];
241 const int T= last[ 0];
242 const int RT= last[ 1];
243 const int L = src[-1];
245 if(p->quant_table[3][127]){
246 const int TT= last2[0];
247 const int LL= src[-2];
248 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF]
249 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xFF];
251 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF];
254 static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){
259 l2tab[i]= log2(i/256.0);
261 for(i=0; i<256; i++){
262 double best_len[256];
268 for(j=FFMAX(i-10,1); j<FFMIN(i+11,256); j++){
272 for(k=0; k<256; k++){
273 double newocc[256]={0};
274 for(m=0; m<256; m++){
276 len -=occ[m]*( p *l2tab[ m]
277 + (1-p)*l2tab[256-m]);
280 if(len < best_len[k]){
284 for(m=0; m<256; m++){
286 newocc[ one_state[ m]] += occ[m]* p ;
287 newocc[256-one_state[256-m]] += occ[m]*(1-p);
290 memcpy(occ, newocc, sizeof(occ));
296 static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
299 #define put_rac(C,S,B) \
303 rc_stat2[(S)-state][B]++;\
309 const int a= FFABS(v);
310 const int e= av_log2(a);
311 put_rac(c, state+0, 0);
314 put_rac(c, state+1+i, 1); //1..10
316 put_rac(c, state+1+i, 0);
318 for(i=e-1; i>=0; i--){
319 put_rac(c, state+22+i, (a>>i)&1); //22..31
323 put_rac(c, state+11 + e, v < 0); //11..21
326 put_rac(c, state+1+FFMIN(i,9), 1); //1..10
328 put_rac(c, state+1+9, 0);
330 for(i=e-1; i>=0; i--){
331 put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
335 put_rac(c, state+11 + 10, v < 0); //11..21
338 put_rac(c, state+0, 1);
343 static av_noinline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
344 put_symbol_inline(c, state, v, is_signed, NULL, NULL);
347 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){
348 if(get_rac(c, state+0))
353 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
358 for(i=e-1; i>=0; i--){
359 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
362 e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
367 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
368 return get_symbol_inline(c, state, is_signed);
371 static inline void update_vlc_state(VlcState * const state, const int v){
372 int drift= state->drift;
373 int count= state->count;
374 state->error_sum += FFABS(v);
377 if(count == 128){ //FIXME variable
380 state->error_sum >>= 1;
385 if(state->bias > -128) state->bias--;
391 if(state->bias < 127) state->bias++;
402 static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){
404 //printf("final: %d ", v);
405 v = fold(v - state->bias, bits);
409 while(i < state->error_sum){ //FIXME optimize
417 if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1);
420 code= v ^ ((2*state->drift + state->count)>>31);
423 //printf("v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, state->bias, state->error_sum, state->drift, state->count, k);
424 set_sr_golomb(pb, code, k, 12, bits);
426 update_vlc_state(state, v);
429 static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){
434 while(i < state->error_sum){ //FIXME optimize
441 v= get_sr_golomb(gb, k, 12, bits);
442 //printf("v:%d bias:%d error:%d drift:%d count:%d k:%d", v, state->bias, state->error_sum, state->drift, state->count, k);
445 if(k==0 && 2*state->drift <= - state->count) v ^= (-1);
447 v ^= ((2*state->drift + state->count)>>31);
450 ret= fold(v + state->bias, bits);
452 update_vlc_state(state, v);
453 //printf("final: %d\n", ret);
457 #if CONFIG_FFV1_ENCODER
458 static av_always_inline int encode_line(FFV1Context *s, int w,
460 int plane_index, int bits)
462 PlaneContext * const p= &s->plane[plane_index];
463 RangeCoder * const c= &s->c;
465 int run_index= s->run_index;
470 if(c->bytestream_end - c->bytestream < w*20){
471 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
475 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
476 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
484 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
485 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
492 diff= fold(diff, bits);
495 if(s->flags & CODEC_FLAG_PASS1){
496 put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, s->rc_stat2[p->quant_table_index][context]);
498 put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
501 if(context == 0) run_mode=1;
506 while(run_count >= 1<<ff_log2_run[run_index]){
507 run_count -= 1<<ff_log2_run[run_index];
509 put_bits(&s->pb, 1, 1);
512 put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
513 if(run_index) run_index--;
522 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, (int)put_bits_count(&s->pb));
525 put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
529 while(run_count >= 1<<ff_log2_run[run_index]){
530 run_count -= 1<<ff_log2_run[run_index];
532 put_bits(&s->pb, 1, 1);
536 put_bits(&s->pb, 1, 1);
538 s->run_index= run_index;
543 static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
545 const int ring_size= s->avctx->context_model ? 3 : 2;
549 memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
552 for(i=0; i<ring_size; i++)
553 sample[i]= s->sample_buffer + (w+6)*((h+i-y)%ring_size) + 3;
555 sample[0][-1]= sample[1][0 ];
556 sample[1][ w]= sample[1][w-1];
558 if(s->bits_per_raw_sample<=8){
560 sample[0][x]= src[x + stride*y];
562 encode_line(s, w, sample, plane_index, 8);
564 if(s->packed_at_lsb){
566 sample[0][x]= ((uint16_t*)(src + stride*y))[x];
570 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample);
573 encode_line(s, w, sample, plane_index, s->bits_per_raw_sample);
575 //STOP_TIMER("encode line")}
579 static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
581 const int ring_size= s->avctx->context_model ? 3 : 2;
582 int16_t *sample[4][3];
585 memset(s->sample_buffer, 0, ring_size*4*(w+6)*sizeof(*s->sample_buffer));
588 for(i=0; i<ring_size; i++)
590 sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
593 unsigned v= src[x + stride*y];
605 // assert(g>=0 && b>=0 && r>=0);
606 // assert(g<256 && b<512 && r<512);
612 for(p=0; p<3 + s->transparency; p++){
613 sample[p][0][-1]= sample[p][1][0 ];
614 sample[p][1][ w]= sample[p][1][w-1];
615 encode_line(s, w, sample[p], (p+1)/2, 9);
620 static void write_quant_table(RangeCoder *c, int16_t *quant_table){
623 uint8_t state[CONTEXT_SIZE];
624 memset(state, 128, sizeof(state));
626 for(i=1; i<128 ; i++){
627 if(quant_table[i] != quant_table[i-1]){
628 put_symbol(c, state, i-last-1, 0);
632 put_symbol(c, state, i-last-1, 0);
635 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
638 write_quant_table(c, quant_table[i]);
641 static void write_header(FFV1Context *f){
642 uint8_t state[CONTEXT_SIZE];
644 RangeCoder * const c= &f->slice_context[0]->c;
646 memset(state, 128, sizeof(state));
649 put_symbol(c, state, f->version, 0);
650 put_symbol(c, state, f->ac, 0);
652 for(i=1; i<256; i++){
653 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
656 put_symbol(c, state, f->colorspace, 0); //YUV cs type
658 put_symbol(c, state, f->bits_per_raw_sample, 0);
659 put_rac(c, state, f->chroma_planes);
660 put_symbol(c, state, f->chroma_h_shift, 0);
661 put_symbol(c, state, f->chroma_v_shift, 0);
662 put_rac(c, state, f->transparency);
664 write_quant_tables(c, f->quant_table);
665 }else if(f->version < 3){
666 put_symbol(c, state, f->slice_count, 0);
667 for(i=0; i<f->slice_count; i++){
668 FFV1Context *fs= f->slice_context[i];
669 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
670 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
671 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
672 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
673 for(j=0; j<f->plane_count; j++){
674 put_symbol(c, state, f->plane[j].quant_table_index, 0);
675 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
680 #endif /* CONFIG_FFV1_ENCODER */
682 static av_cold int common_init(AVCodecContext *avctx){
683 FFV1Context *s = avctx->priv_data;
686 s->flags= avctx->flags;
688 avcodec_get_frame_defaults(&s->picture);
690 ff_dsputil_init(&s->dsp, avctx);
692 s->width = avctx->width;
693 s->height= avctx->height;
695 assert(s->width && s->height);
704 static int init_slice_state(FFV1Context *f, FFV1Context *fs){
707 fs->plane_count= f->plane_count;
708 fs->transparency= f->transparency;
709 for(j=0; j<f->plane_count; j++){
710 PlaneContext * const p= &fs->plane[j];
713 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
715 return AVERROR(ENOMEM);
717 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState));
719 return AVERROR(ENOMEM);
724 //FIXME only redo if state_transition changed
725 for(j=1; j<256; j++){
726 fs->c.one_state [ j]= fs->state_transition[j];
727 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
734 static int init_slices_state(FFV1Context *f){
736 for(i=0; i<f->slice_count; i++){
737 FFV1Context *fs= f->slice_context[i];
738 if(init_slice_state(f, fs) < 0)
744 static av_cold int init_slice_contexts(FFV1Context *f){
747 f->slice_count= f->num_h_slices * f->num_v_slices;
749 for(i=0; i<f->slice_count; i++){
750 FFV1Context *fs= av_mallocz(sizeof(*fs));
751 int sx= i % f->num_h_slices;
752 int sy= i / f->num_h_slices;
753 int sxs= f->avctx->width * sx / f->num_h_slices;
754 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
755 int sys= f->avctx->height* sy / f->num_v_slices;
756 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
757 f->slice_context[i]= fs;
758 memcpy(fs, f, sizeof(*fs));
759 memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
761 fs->slice_width = sxe - sxs;
762 fs->slice_height= sye - sys;
766 fs->sample_buffer = av_malloc(3*4 * (fs->width+6) * sizeof(*fs->sample_buffer));
767 if (!fs->sample_buffer)
768 return AVERROR(ENOMEM);
773 static int allocate_initial_states(FFV1Context *f){
776 for(i=0; i<f->quant_table_count; i++){
777 f->initial_states[i]= av_malloc(f->context_count[i]*sizeof(*f->initial_states[i]));
778 if(!f->initial_states[i])
779 return AVERROR(ENOMEM);
780 memset(f->initial_states[i], 128, f->context_count[i]*sizeof(*f->initial_states[i]));
785 #if CONFIG_FFV1_ENCODER
786 static int write_extra_header(FFV1Context *f){
787 RangeCoder * const c= &f->c;
788 uint8_t state[CONTEXT_SIZE];
790 uint8_t state2[32][CONTEXT_SIZE];
793 memset(state2, 128, sizeof(state2));
794 memset(state, 128, sizeof(state));
796 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000 + (11*11*5*5*5+11*11*11)*32);
797 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
798 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
800 put_symbol(c, state, f->version, 0);
802 put_symbol(c, state, f->minor_version, 0);
803 put_symbol(c, state, f->ac, 0);
805 for(i=1; i<256; i++){
806 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
809 put_symbol(c, state, f->colorspace, 0); //YUV cs type
810 put_symbol(c, state, f->bits_per_raw_sample, 0);
811 put_rac(c, state, f->chroma_planes);
812 put_symbol(c, state, f->chroma_h_shift, 0);
813 put_symbol(c, state, f->chroma_v_shift, 0);
814 put_rac(c, state, f->transparency);
815 put_symbol(c, state, f->num_h_slices-1, 0);
816 put_symbol(c, state, f->num_v_slices-1, 0);
818 put_symbol(c, state, f->quant_table_count, 0);
819 for(i=0; i<f->quant_table_count; i++)
820 write_quant_tables(c, f->quant_tables[i]);
822 for(i=0; i<f->quant_table_count; i++){
823 for(j=0; j<f->context_count[i]*CONTEXT_SIZE; j++)
824 if(f->initial_states[i] && f->initial_states[i][0][j] != 128)
826 if(j<f->context_count[i]*CONTEXT_SIZE){
827 put_rac(c, state, 1);
828 for(j=0; j<f->context_count[i]; j++){
829 for(k=0; k<CONTEXT_SIZE; k++){
830 int pred= j ? f->initial_states[i][j-1][k] : 128;
831 put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k]-pred), 1);
835 put_rac(c, state, 0);
840 put_symbol(c, state, f->ec, 0);
843 f->avctx->extradata_size= ff_rac_terminate(c);
844 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
845 AV_WL32(f->avctx->extradata + f->avctx->extradata_size, v);
846 f->avctx->extradata_size += 4;
851 static int sort_stt(FFV1Context *s, uint8_t stt[256]){
852 int i,i2,changed,print=0;
856 for(i=12; i<244; i++){
857 for(i2=i+1; i2<245 && i2<i+4; i2++){
858 #define COST(old, new) \
859 s->rc_stat[old][0]*-log2((256-(new))/256.0)\
860 +s->rc_stat[old][1]*-log2( (new) /256.0)
862 #define COST2(old, new) \
864 +COST(256-(old), 256-(new))
866 double size0= COST2(i, i ) + COST2(i2, i2);
867 double sizeX= COST2(i, i2) + COST2(i2, i );
868 if(sizeX < size0 && i!=128 && i2!=128){
870 FFSWAP(int, stt[ i], stt[ i2]);
871 FFSWAP(int, s->rc_stat[i ][0],s->rc_stat[ i2][0]);
872 FFSWAP(int, s->rc_stat[i ][1],s->rc_stat[ i2][1]);
874 FFSWAP(int, stt[256-i], stt[256-i2]);
875 FFSWAP(int, s->rc_stat[256-i][0],s->rc_stat[256-i2][0]);
876 FFSWAP(int, s->rc_stat[256-i][1],s->rc_stat[256-i2][1]);
878 for(j=1; j<256; j++){
879 if (stt[j] == i ) stt[j] = i2;
880 else if(stt[j] == i2) stt[j] = i ;
882 if (stt[256-j] == 256-i ) stt[256-j] = 256-i2;
883 else if(stt[256-j] == 256-i2) stt[256-j] = 256-i ;
894 static av_cold int encode_init(AVCodecContext *avctx)
896 FFV1Context *s = avctx->priv_data;
903 if((avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) || avctx->slices>1)
904 s->version = FFMAX(s->version, 2);
906 if(avctx->level == 3){
911 s->ec = (s->version >= 3);
914 if(s->version >= 2 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
915 av_log(avctx, AV_LOG_ERROR, "Version 2 needed for requested features but version 2 is experimental and not enabled\n");
919 s->ac= avctx->coder_type ? 2:0;
923 s->state_transition[i]=ver2_state[i];
926 switch(avctx->pix_fmt){
927 case PIX_FMT_YUV444P9:
928 case PIX_FMT_YUV422P9:
929 case PIX_FMT_YUV420P9:
930 if (!avctx->bits_per_raw_sample)
931 s->bits_per_raw_sample = 9;
932 case PIX_FMT_YUV444P10:
933 case PIX_FMT_YUV420P10:
934 case PIX_FMT_YUV422P10:
935 s->packed_at_lsb = 1;
936 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
937 s->bits_per_raw_sample = 10;
939 case PIX_FMT_YUV444P16:
940 case PIX_FMT_YUV422P16:
941 case PIX_FMT_YUV420P16:
942 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) {
943 s->bits_per_raw_sample = 16;
944 } else if (!s->bits_per_raw_sample){
945 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
947 if(s->bits_per_raw_sample <=8){
948 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
952 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
955 s->version= FFMAX(s->version, 1);
957 case PIX_FMT_YUV444P:
958 case PIX_FMT_YUV440P:
959 case PIX_FMT_YUV422P:
960 case PIX_FMT_YUV420P:
961 case PIX_FMT_YUV411P:
962 case PIX_FMT_YUV410P:
963 s->chroma_planes= av_pix_fmt_descriptors[avctx->pix_fmt].nb_components < 3 ? 0 : 1;
966 case PIX_FMT_YUVA444P:
967 case PIX_FMT_YUVA420P:
980 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
983 if (s->transparency) {
984 av_log(avctx, AV_LOG_WARNING, "Storing alpha plane, this will require a recent FFV1 decoder to playback!\n");
986 if (avctx->context_model > 1U) {
987 av_log(avctx, AV_LOG_ERROR, "Invalid context model %d, valid values are 0 and 1\n", avctx->context_model);
988 return AVERROR(EINVAL);
991 for(i=0; i<256; i++){
992 s->quant_table_count=2;
993 if(s->bits_per_raw_sample <=8){
994 s->quant_tables[0][0][i]= quant11[i];
995 s->quant_tables[0][1][i]= 11*quant11[i];
996 s->quant_tables[0][2][i]= 11*11*quant11[i];
997 s->quant_tables[1][0][i]= quant11[i];
998 s->quant_tables[1][1][i]= 11*quant11[i];
999 s->quant_tables[1][2][i]= 11*11*quant5 [i];
1000 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
1001 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
1003 s->quant_tables[0][0][i]= quant9_10bit[i];
1004 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
1005 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
1006 s->quant_tables[1][0][i]= quant9_10bit[i];
1007 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
1008 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
1009 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
1010 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
1013 s->context_count[0]= (11*11*11+1)/2;
1014 s->context_count[1]= (11*11*5*5*5+1)/2;
1015 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table));
1017 for(i=0; i<s->plane_count; i++){
1018 PlaneContext * const p= &s->plane[i];
1020 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
1021 p->quant_table_index= avctx->context_model;
1022 p->context_count= s->context_count[p->quant_table_index];
1025 if(allocate_initial_states(s) < 0)
1026 return AVERROR(ENOMEM);
1028 avctx->coded_frame= &s->picture;
1029 if(!s->transparency)
1031 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
1033 s->picture_number=0;
1035 if(avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
1036 for(i=0; i<s->quant_table_count; i++){
1037 s->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*s->rc_stat2[i]));
1039 return AVERROR(ENOMEM);
1042 if(avctx->stats_in){
1043 char *p= avctx->stats_in;
1044 uint8_t best_state[256][256];
1048 av_assert0(s->version>=2);
1051 for(j=0; j<256; j++){
1053 s->rc_stat[j][i]= strtol(p, &next, 0);
1055 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d [%s]\n", j,i,p);
1061 for(i=0; i<s->quant_table_count; i++){
1062 for(j=0; j<s->context_count[i]; j++){
1063 for(k=0; k<32; k++){
1065 s->rc_stat2[i][j][k][m]= strtol(p, &next, 0);
1067 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d %d %d [%s]\n", i,j,k,m,p);
1075 gob_count= strtol(p, &next, 0);
1076 if(next==p || gob_count <0){
1077 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
1081 while(*p=='\n' || *p==' ') p++;
1084 sort_stt(s, s->state_transition);
1086 find_best_state(best_state, s->state_transition);
1088 for(i=0; i<s->quant_table_count; i++){
1089 for(j=0; j<s->context_count[i]; j++){
1090 for(k=0; k<32; k++){
1092 if(s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]){
1093 p=256.0*s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]);
1095 s->initial_states[i][j][k]= best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1])/gob_count, 0, 255)];
1102 for(s->num_v_slices=2; s->num_v_slices<9; s->num_v_slices++){
1103 for(s->num_h_slices=s->num_v_slices; s->num_h_slices<2*s->num_v_slices; s->num_h_slices++){
1104 if(avctx->slices == s->num_h_slices * s->num_v_slices && avctx->slices <= 64)
1108 av_log(avctx, AV_LOG_ERROR, "Unsupported number %d of slices requested, please specify a supported number with -slices (ex:4,6,9,12,16, ...)\n", avctx->slices);
1111 write_extra_header(s);
1114 if(init_slice_contexts(s) < 0)
1116 if(init_slices_state(s) < 0)
1119 #define STATS_OUT_SIZE 1024*1024*6
1120 if(avctx->flags & CODEC_FLAG_PASS1){
1121 avctx->stats_out= av_mallocz(STATS_OUT_SIZE);
1122 for(i=0; i<s->quant_table_count; i++){
1123 for(j=0; j<s->slice_count; j++){
1124 FFV1Context *sf= s->slice_context[j];
1125 av_assert0(!sf->rc_stat2[i]);
1126 sf->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*sf->rc_stat2[i]));
1127 if(!sf->rc_stat2[i])
1128 return AVERROR(ENOMEM);
1135 #endif /* CONFIG_FFV1_ENCODER */
1138 static void clear_slice_state(FFV1Context *f, FFV1Context *fs){
1141 for(i=0; i<f->plane_count; i++){
1142 PlaneContext *p= &fs->plane[i];
1144 p->interlace_bit_state[0]= 128;
1145 p->interlace_bit_state[1]= 128;
1148 if(f->initial_states[p->quant_table_index]){
1149 memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE*p->context_count);
1151 memset(p->state, 128, CONTEXT_SIZE*p->context_count);
1153 for(j=0; j<p->context_count; j++){
1154 p->vlc_state[j].drift= 0;
1155 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
1156 p->vlc_state[j].bias= 0;
1157 p->vlc_state[j].count= 1;
1163 #if CONFIG_FFV1_ENCODER
1165 static void encode_slice_header(FFV1Context *f, FFV1Context *fs){
1166 RangeCoder *c = &fs->c;
1167 uint8_t state[CONTEXT_SIZE];
1169 memset(state, 128, sizeof(state));
1171 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
1172 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
1173 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
1174 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
1175 for(j=0; j<f->plane_count; j++){
1176 put_symbol(c, state, f->plane[j].quant_table_index, 0);
1177 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
1179 if(!f->picture.interlaced_frame) put_symbol(c, state, 3, 0);
1180 else put_symbol(c, state, 1 + !f->picture.top_field_first, 0);
1181 put_symbol(c, state, f->picture.sample_aspect_ratio.num, 0);
1182 put_symbol(c, state, f->picture.sample_aspect_ratio.den, 0);
1185 static int encode_slice(AVCodecContext *c, void *arg){
1186 FFV1Context *fs= *(void**)arg;
1187 FFV1Context *f= fs->avctx->priv_data;
1188 int width = fs->slice_width;
1189 int height= fs->slice_height;
1192 AVFrame * const p= &f->picture;
1193 const int ps= (f->bits_per_raw_sample>8)+1;
1196 clear_slice_state(f, fs);
1198 encode_slice_header(f, fs);
1201 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? ff_rac_terminate(&fs->c) : 0;
1202 init_put_bits(&fs->pb, fs->c.bytestream_start + fs->ac_byte_count, fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count);
1205 if(f->colorspace==0){
1206 const int chroma_width = -((-width )>>f->chroma_h_shift);
1207 const int chroma_height= -((-height)>>f->chroma_v_shift);
1208 const int cx= x>>f->chroma_h_shift;
1209 const int cy= y>>f->chroma_v_shift;
1211 encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1213 if (f->chroma_planes){
1214 encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1215 encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1217 if (fs->transparency)
1218 encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1220 encode_rgb_frame(fs, (uint32_t*)(p->data[0]) + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1227 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1228 const AVFrame *pict, int *got_packet)
1230 FFV1Context *f = avctx->priv_data;
1231 RangeCoder * const c= &f->slice_context[0]->c;
1232 AVFrame * const p= &f->picture;
1234 uint8_t keystate=128;
1238 if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*((8*2+1+1)*4)/8
1239 + FF_MIN_BUFFER_SIZE)) < 0)
1242 ff_init_range_encoder(c, pkt->data, pkt->size);
1243 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1246 p->pict_type= AV_PICTURE_TYPE_I;
1248 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
1249 put_rac(c, &keystate, 1);
1254 put_rac(c, &keystate, 0);
1260 for(i=1; i<256; i++){
1261 c->one_state[i]= f->state_transition[i];
1262 c->zero_state[256-i]= 256-c->one_state[i];
1266 for(i=1; i<f->slice_count; i++){
1267 FFV1Context *fs= f->slice_context[i];
1268 uint8_t *start = pkt->data + (pkt->size-used_count)*i/f->slice_count;
1269 int len = pkt->size/f->slice_count;
1271 ff_init_range_encoder(&fs->c, start, len);
1273 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1276 for(i=0; i<f->slice_count; i++){
1277 FFV1Context *fs= f->slice_context[i];
1282 put_rac(&fs->c, &state, 0);
1283 bytes= ff_rac_terminate(&fs->c);
1285 flush_put_bits(&fs->pb); //nicer padding FIXME
1286 bytes= fs->ac_byte_count + (put_bits_count(&fs->pb)+7)/8;
1288 if(i>0 || f->version>2){
1289 av_assert0(bytes < pkt->size/f->slice_count);
1290 memmove(buf_p, fs->c.bytestream_start, bytes);
1291 av_assert0(bytes < (1<<24));
1292 AV_WB24(buf_p+bytes, bytes);
1298 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, bytes);
1299 AV_WL32(buf_p + bytes, v); bytes += 4;
1304 if((avctx->flags&CODEC_FLAG_PASS1) && (f->picture_number&31)==0){
1306 char *p= avctx->stats_out;
1307 char *end= p + STATS_OUT_SIZE;
1309 memset(f->rc_stat, 0, sizeof(f->rc_stat));
1310 for(i=0; i<f->quant_table_count; i++)
1311 memset(f->rc_stat2[i], 0, f->context_count[i]*sizeof(*f->rc_stat2[i]));
1313 for(j=0; j<f->slice_count; j++){
1314 FFV1Context *fs= f->slice_context[j];
1315 for(i=0; i<256; i++){
1316 f->rc_stat[i][0] += fs->rc_stat[i][0];
1317 f->rc_stat[i][1] += fs->rc_stat[i][1];
1319 for(i=0; i<f->quant_table_count; i++){
1320 for(k=0; k<f->context_count[i]; k++){
1321 for(m=0; m<32; m++){
1322 f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
1323 f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
1329 for(j=0; j<256; j++){
1330 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat[j][0], f->rc_stat[j][1]);
1333 snprintf(p, end-p, "\n");
1335 for(i=0; i<f->quant_table_count; i++){
1336 for(j=0; j<f->context_count[i]; j++){
1337 for(m=0; m<32; m++){
1338 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
1343 snprintf(p, end-p, "%d\n", f->gob_count);
1344 } else if(avctx->flags&CODEC_FLAG_PASS1)
1345 avctx->stats_out[0] = '\0';
1347 f->picture_number++;
1348 pkt->size = buf_p - pkt->data;
1349 pkt->flags |= AV_PKT_FLAG_KEY*p->key_frame;
1354 #endif /* CONFIG_FFV1_ENCODER */
1356 static av_cold int common_end(AVCodecContext *avctx){
1357 FFV1Context *s = avctx->priv_data;
1360 if (avctx->codec->decode && s->picture.data[0])
1361 avctx->release_buffer(avctx, &s->picture);
1363 for(j=0; j<s->slice_count; j++){
1364 FFV1Context *fs= s->slice_context[j];
1365 for(i=0; i<s->plane_count; i++){
1366 PlaneContext *p= &fs->plane[i];
1368 av_freep(&p->state);
1369 av_freep(&p->vlc_state);
1371 av_freep(&fs->sample_buffer);
1374 av_freep(&avctx->stats_out);
1375 for(j=0; j<s->quant_table_count; j++){
1376 av_freep(&s->initial_states[j]);
1377 for(i=0; i<s->slice_count; i++){
1378 FFV1Context *sf= s->slice_context[i];
1379 av_freep(&sf->rc_stat2[j]);
1381 av_freep(&s->rc_stat2[j]);
1384 for(i=0; i<s->slice_count; i++){
1385 av_freep(&s->slice_context[i]);
1391 static av_always_inline void decode_line(FFV1Context *s, int w,
1393 int plane_index, int bits)
1395 PlaneContext * const p= &s->plane[plane_index];
1396 RangeCoder * const c= &s->c;
1400 int run_index= s->run_index;
1403 int diff, context, sign;
1405 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
1412 av_assert2(context < p->context_count);
1415 diff= get_symbol_inline(c, p->state[context], 1);
1417 if(context == 0 && run_mode==0) run_mode=1;
1420 if(run_count==0 && run_mode==1){
1421 if(get_bits1(&s->gb)){
1422 run_count = 1<<ff_log2_run[run_index];
1423 if(x + run_count <= w) run_index++;
1425 if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]);
1427 if(run_index) run_index--;
1435 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1440 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1442 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, get_bits_count(&s->gb));
1445 if(sign) diff= -diff;
1447 sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1);
1449 s->run_index= run_index;
1452 static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
1455 sample[0]=s->sample_buffer +3;
1456 sample[1]=s->sample_buffer+w+6+3;
1460 memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
1463 int16_t *temp = sample[0]; //FIXME try a normal buffer
1465 sample[0]= sample[1];
1468 sample[1][-1]= sample[0][0 ];
1469 sample[0][ w]= sample[0][w-1];
1472 if(s->avctx->bits_per_raw_sample <= 8){
1473 decode_line(s, w, sample, plane_index, 8);
1475 src[x + stride*y]= sample[1][x];
1478 decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
1479 if(s->packed_at_lsb){
1481 ((uint16_t*)(src + stride*y))[x]= sample[1][x];
1485 ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
1489 //STOP_TIMER("decode-line")}
1493 static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
1495 int16_t *sample[4][2];
1497 sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
1498 sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
1503 memset(s->sample_buffer, 0, 8*(w+6)*sizeof(*s->sample_buffer));
1506 for(p=0; p<3 + s->transparency; p++){
1507 int16_t *temp = sample[p][0]; //FIXME try a normal buffer
1509 sample[p][0]= sample[p][1];
1512 sample[p][1][-1]= sample[p][0][0 ];
1513 sample[p][0][ w]= sample[p][0][w-1];
1514 decode_line(s, w, sample[p], (p+1)/2, 9);
1517 int g= sample[0][1][x];
1518 int b= sample[1][1][x];
1519 int r= sample[2][1][x];
1520 int a= sample[3][1][x];
1522 // assert(g>=0 && b>=0 && r>=0);
1523 // assert(g<256 && b<512 && r<512);
1531 src[x + stride*y]= b + (g<<8) + (r<<16) + (a<<24);
1536 static int decode_slice_header(FFV1Context *f, FFV1Context *fs){
1537 RangeCoder *c = &fs->c;
1538 uint8_t state[CONTEXT_SIZE];
1539 unsigned ps, i, context_count;
1540 memset(state, 128, sizeof(state));
1542 av_assert0(f->version > 2);
1544 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1545 fs->slice_y = get_symbol(c, state, 0) *f->height;
1546 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1547 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1549 fs->slice_x /= f->num_h_slices;
1550 fs->slice_y /= f->num_v_slices;
1551 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1552 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1553 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1555 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1556 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1559 for(i=0; i<f->plane_count; i++){
1560 PlaneContext * const p= &fs->plane[i];
1561 int idx=get_symbol(c, state, 0);
1562 if(idx > (unsigned)f->quant_table_count){
1563 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1566 p->quant_table_index= idx;
1567 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1568 context_count= f->context_count[idx];
1570 if(p->context_count < context_count){
1571 av_freep(&p->state);
1572 av_freep(&p->vlc_state);
1574 p->context_count= context_count;
1577 ps = get_symbol(c, state, 0);
1579 f->picture.interlaced_frame = 1;
1580 f->picture.top_field_first = 1;
1582 f->picture.interlaced_frame = 1;
1583 f->picture.top_field_first = 0;
1585 f->picture.interlaced_frame = 0;
1587 f->picture.sample_aspect_ratio.num = get_symbol(c, state, 0);
1588 f->picture.sample_aspect_ratio.den = get_symbol(c, state, 0);
1593 static int decode_slice(AVCodecContext *c, void *arg){
1594 FFV1Context *fs= *(void**)arg;
1595 FFV1Context *f= fs->avctx->priv_data;
1596 int width, height, x, y;
1597 const int ps= (c->bits_per_raw_sample>8)+1;
1598 AVFrame * const p= &f->picture;
1601 if(init_slice_state(f, fs) < 0)
1602 return AVERROR(ENOMEM);
1603 if(decode_slice_header(f, fs) < 0)
1604 return AVERROR_INVALIDDATA;
1606 if(init_slice_state(f, fs) < 0)
1607 return AVERROR(ENOMEM);
1608 if(f->picture.key_frame)
1609 clear_slice_state(f, fs);
1610 width = fs->slice_width;
1611 height= fs->slice_height;
1616 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? fs->c.bytestream - fs->c.bytestream_start - 1 : 0;
1617 init_get_bits(&fs->gb,
1618 fs->c.bytestream_start + fs->ac_byte_count,
1619 (fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count) * 8);
1622 av_assert1(width && height);
1623 if(f->colorspace==0){
1624 const int chroma_width = -((-width )>>f->chroma_h_shift);
1625 const int chroma_height= -((-height)>>f->chroma_v_shift);
1626 const int cx= x>>f->chroma_h_shift;
1627 const int cy= y>>f->chroma_v_shift;
1628 decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1630 if (f->chroma_planes){
1631 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1632 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1634 if (fs->transparency)
1635 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1637 decode_rgb_frame(fs, (uint32_t*)p->data[0] + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1645 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
1648 uint8_t state[CONTEXT_SIZE];
1650 memset(state, 128, sizeof(state));
1652 for(v=0; i<128 ; v++){
1653 int len= get_symbol(c, state, 0) + 1;
1655 if(len + i > 128) return -1;
1658 quant_table[i] = scale*v;
1661 //if(i%16==0) printf("\n");
1665 for(i=1; i<128; i++){
1666 quant_table[256-i]= -quant_table[i];
1668 quant_table[128]= -quant_table[127];
1673 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
1675 int context_count=1;
1678 context_count*= read_quant_table(c, quant_table[i], context_count);
1679 if(context_count > 32768U){
1683 return (context_count+1)/2;
1686 static int read_extra_header(FFV1Context *f){
1687 RangeCoder * const c= &f->c;
1688 uint8_t state[CONTEXT_SIZE];
1690 uint8_t state2[32][CONTEXT_SIZE];
1692 memset(state2, 128, sizeof(state2));
1693 memset(state, 128, sizeof(state));
1695 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1696 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1698 f->version= get_symbol(c, state, 0);
1700 f->minor_version= get_symbol(c, state, 0);
1701 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1703 for(i=1; i<256; i++){
1704 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1707 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1708 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1709 f->chroma_planes= get_rac(c, state);
1710 f->chroma_h_shift= get_symbol(c, state, 0);
1711 f->chroma_v_shift= get_symbol(c, state, 0);
1712 f->transparency= get_rac(c, state);
1713 f->plane_count= 2 + f->transparency;
1714 f->num_h_slices= 1 + get_symbol(c, state, 0);
1715 f->num_v_slices= 1 + get_symbol(c, state, 0);
1716 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->height){
1717 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1721 f->quant_table_count= get_symbol(c, state, 0);
1722 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1724 for(i=0; i<f->quant_table_count; i++){
1725 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1726 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1731 if(allocate_initial_states(f) < 0)
1732 return AVERROR(ENOMEM);
1734 for(i=0; i<f->quant_table_count; i++){
1735 if(get_rac(c, state)){
1736 for(j=0; j<f->context_count[i]; j++){
1737 for(k=0; k<CONTEXT_SIZE; k++){
1738 int pred= j ? f->initial_states[i][j-1][k] : 128;
1739 f->initial_states[i][j][k]= (pred+get_symbol(c, state2[k], 1))&0xFF;
1746 f->ec = get_symbol(c, state, 0);
1751 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
1753 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v);
1754 return AVERROR_INVALIDDATA;
1761 static int read_header(FFV1Context *f){
1762 uint8_t state[CONTEXT_SIZE];
1763 int i, j, context_count;
1764 RangeCoder * const c= &f->slice_context[0]->c;
1766 memset(state, 128, sizeof(state));
1769 f->version= get_symbol(c, state, 0);
1770 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1772 for(i=1; i<256; i++){
1773 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1776 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1778 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1779 f->chroma_planes= get_rac(c, state);
1780 f->chroma_h_shift= get_symbol(c, state, 0);
1781 f->chroma_v_shift= get_symbol(c, state, 0);
1782 f->transparency= get_rac(c, state);
1783 f->plane_count= 2 + f->transparency;
1786 if(f->colorspace==0){
1787 if(!f->transparency && !f->chroma_planes){
1788 if (f->avctx->bits_per_raw_sample<=8)
1789 f->avctx->pix_fmt= PIX_FMT_GRAY8;
1791 f->avctx->pix_fmt= PIX_FMT_GRAY16;
1792 }else if(f->avctx->bits_per_raw_sample<=8 && !f->transparency){
1793 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1794 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
1795 case 0x01: f->avctx->pix_fmt= PIX_FMT_YUV440P; break;
1796 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
1797 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
1798 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
1799 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
1801 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1804 }else if(f->avctx->bits_per_raw_sample<=8 && f->transparency){
1805 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1806 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUVA444P; break;
1807 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUVA420P; break;
1809 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1812 }else if(f->avctx->bits_per_raw_sample==9) {
1814 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1815 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P9; break;
1816 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P9; break;
1817 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P9; break;
1819 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1822 }else if(f->avctx->bits_per_raw_sample==10) {
1824 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1825 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P10; break;
1826 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P10; break;
1827 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P10; break;
1829 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1833 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1834 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1835 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1836 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P16; break;
1838 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1842 }else if(f->colorspace==1){
1843 if(f->chroma_h_shift || f->chroma_v_shift){
1844 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1847 if(f->transparency) f->avctx->pix_fmt= PIX_FMT_RGB32;
1848 else f->avctx->pix_fmt= PIX_FMT_0RGB32;
1850 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1854 //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt);
1856 context_count= read_quant_tables(c, f->quant_table);
1857 if(context_count < 0){
1858 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1861 }else if(f->version < 3){
1862 f->slice_count= get_symbol(c, state, 0);
1864 const uint8_t *p= c->bytestream_end;
1865 for(f->slice_count = 0; f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start; f->slice_count++){
1866 int trailer = 3 + 5*!!f->ec;
1867 int size = AV_RB24(p-trailer);
1868 if(size + trailer > p - c->bytestream_start)
1870 p -= size + trailer;
1873 if(f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0){
1874 av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
1878 for(j=0; j<f->slice_count; j++){
1879 FFV1Context *fs= f->slice_context[j];
1881 fs->packed_at_lsb= f->packed_at_lsb;
1883 if(f->version == 2){
1884 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1885 fs->slice_y = get_symbol(c, state, 0) *f->height;
1886 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1887 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1889 fs->slice_x /= f->num_h_slices;
1890 fs->slice_y /= f->num_v_slices;
1891 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1892 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1893 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1895 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1896 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1900 for(i=0; i<f->plane_count; i++){
1901 PlaneContext * const p= &fs->plane[i];
1903 if(f->version == 2){
1904 int idx=get_symbol(c, state, 0);
1905 if(idx > (unsigned)f->quant_table_count){
1906 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1909 p->quant_table_index= idx;
1910 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1911 context_count= f->context_count[idx];
1913 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
1916 if(f->version <= 2){
1917 if(p->context_count < context_count){
1918 av_freep(&p->state);
1919 av_freep(&p->vlc_state);
1921 p->context_count= context_count;
1928 static av_cold int decode_init(AVCodecContext *avctx)
1930 FFV1Context *f = avctx->priv_data;
1934 if(avctx->extradata && read_extra_header(f) < 0)
1937 if(init_slice_contexts(f) < 0)
1943 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
1944 const uint8_t *buf = avpkt->data;
1945 int buf_size = avpkt->size;
1946 FFV1Context *f = avctx->priv_data;
1947 RangeCoder * const c= &f->slice_context[0]->c;
1948 AVFrame * const p= &f->picture;
1950 uint8_t keystate= 128;
1951 const uint8_t *buf_p;
1953 AVFrame *picture = data;
1955 /* release previously stored data */
1957 avctx->release_buffer(avctx, p);
1959 ff_init_range_decoder(c, buf, buf_size);
1960 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1963 p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
1964 if(get_rac(c, &keystate)){
1966 f->key_frame_ok = 0;
1967 if(read_header(f) < 0)
1969 f->key_frame_ok = 1;
1971 if (!f->key_frame_ok) {
1972 av_log(avctx, AV_LOG_ERROR, "Cant decode non keyframe without valid keyframe\n");
1973 return AVERROR_INVALIDDATA;
1979 if(avctx->get_buffer(avctx, p) < 0){
1980 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1984 if(avctx->debug&FF_DEBUG_PICT_INFO)
1985 av_log(avctx, AV_LOG_ERROR, "keyframe:%d coder:%d\n", p->key_frame, f->ac);
1987 buf_p= buf + buf_size;
1988 for(i=f->slice_count-1; i>=0; i--){
1989 FFV1Context *fs= f->slice_context[i];
1990 int trailer = 3 + 5*!!f->ec;
1993 if(i || f->version>2) v = AV_RB24(buf_p-trailer)+trailer;
1994 else v = buf_p - c->bytestream_start;
1995 if(buf_p - c->bytestream_start < v){
1996 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
2002 unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, v);
2004 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", crc);
2009 ff_init_range_decoder(&fs->c, buf_p, v);
2013 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
2014 f->picture_number++;
2017 *data_size = sizeof(AVFrame);
2022 AVCodec ff_ffv1_decoder = {
2024 .type = AVMEDIA_TYPE_VIDEO,
2025 .id = CODEC_ID_FFV1,
2026 .priv_data_size = sizeof(FFV1Context),
2027 .init = decode_init,
2028 .close = common_end,
2029 .decode = decode_frame,
2030 .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
2031 CODEC_CAP_SLICE_THREADS,
2032 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2035 #if CONFIG_FFV1_ENCODER
2037 #define OFFSET(x) offsetof(FFV1Context, x)
2038 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2039 static const AVOption options[] = {
2040 { "slicecrc", "Protect slices with CRCs", OFFSET(ec), AV_OPT_TYPE_INT, {-1}, -1, 1, VE},
2044 static const AVClass class = {
2045 .class_name = "ffv1 encoder",
2046 .item_name = av_default_item_name,
2048 .version = LIBAVUTIL_VERSION_INT,
2051 AVCodec ff_ffv1_encoder = {
2053 .type = AVMEDIA_TYPE_VIDEO,
2054 .id = CODEC_ID_FFV1,
2055 .priv_data_size = sizeof(FFV1Context),
2056 .init = encode_init,
2057 .encode2 = encode_frame,
2058 .close = common_end,
2059 .capabilities = CODEC_CAP_SLICE_THREADS,
2060 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUV444P, PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV444P9, PIX_FMT_YUV422P9, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_YUV444P10, PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_NONE},
2061 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2062 .priv_class = &class,