2 * FFV1 codec for libavcodec
4 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * FF Video Codec 1 (a lossless codec)
32 #include "rangecoder.h"
35 #include "libavutil/avassert.h"
38 #define CONTEXT_SIZE 32
40 #define MAX_QUANT_TABLES 8
41 #define MAX_CONTEXT_INPUTS 5
43 extern const uint8_t ff_log2_run[41];
45 static const int8_t quant5_10bit[256]={
46 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
47 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
48 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
49 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
50 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
51 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
52 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
53 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
54 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
55 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
56 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
57 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
58 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,
59 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
60 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
61 -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0,
64 static const int8_t quant5[256]={
65 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
66 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
67 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
68 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
69 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
70 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
71 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
72 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
73 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
74 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
75 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
76 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
77 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
78 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
79 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
80 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1,
83 static const int8_t quant9_10bit[256]={
84 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
86 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
87 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
93 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
94 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
95 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
96 -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,
97 -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,
98 -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
99 -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0,
102 static const int8_t quant11[256]={
103 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
104 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
105 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
106 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
107 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
108 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
109 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
110 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
111 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
112 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
113 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
114 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
115 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
116 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4,
117 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
118 -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1,
121 static const uint8_t ver2_state[256]= {
122 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
123 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
124 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
125 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
126 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
127 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
128 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
129 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
130 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
131 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
132 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
133 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
134 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
135 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
136 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
137 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
140 typedef struct VlcState{
147 typedef struct PlaneContext{
148 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
149 int quant_table_index;
151 uint8_t (*state)[CONTEXT_SIZE];
153 uint8_t interlace_bit_state[2];
156 #define MAX_SLICES 256
158 typedef struct FFV1Context{
159 AVCodecContext *avctx;
163 uint64_t rc_stat[256][2];
164 uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
167 int chroma_h_shift, chroma_v_shift;
173 int ac; ///< 1=range coder <-> 0=golomb rice
174 PlaneContext plane[MAX_PLANES];
175 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
176 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
177 int context_count[MAX_QUANT_TABLES];
178 uint8_t state_transition[256];
179 uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
182 int16_t *sample_buffer;
186 int quant_table_count;
190 struct FFV1Context *slice_context[MAX_SLICES];
200 static av_always_inline int fold(int diff, int bits){
212 static inline int predict(int16_t *src, int16_t *last)
214 const int LT= last[-1];
215 const int T= last[ 0];
216 const int L = src[-1];
218 return mid_pred(L, L + T - LT, T);
221 static inline int get_context(PlaneContext *p, int16_t *src,
222 int16_t *last, int16_t *last2)
224 const int LT= last[-1];
225 const int T= last[ 0];
226 const int RT= last[ 1];
227 const int L = src[-1];
229 if(p->quant_table[3][127]){
230 const int TT= last2[0];
231 const int LL= src[-2];
232 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF]
233 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xFF];
235 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF];
238 static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){
243 l2tab[i]= log2(i/256.0);
245 for(i=0; i<256; i++){
246 double best_len[256];
252 for(j=FFMAX(i-10,1); j<FFMIN(i+11,256); j++){
256 for(k=0; k<256; k++){
257 double newocc[256]={0};
258 for(m=0; m<256; m++){
260 len -=occ[m]*( p *l2tab[ m]
261 + (1-p)*l2tab[256-m]);
264 if(len < best_len[k]){
268 for(m=0; m<256; m++){
270 newocc[ one_state[ m]] += occ[m]* p ;
271 newocc[256-one_state[256-m]] += occ[m]*(1-p);
274 memcpy(occ, newocc, sizeof(occ));
280 static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
283 #define put_rac(C,S,B) \
287 rc_stat2[(S)-state][B]++;\
293 const int a= FFABS(v);
294 const int e= av_log2(a);
295 put_rac(c, state+0, 0);
298 put_rac(c, state+1+i, 1); //1..10
300 put_rac(c, state+1+i, 0);
302 for(i=e-1; i>=0; i--){
303 put_rac(c, state+22+i, (a>>i)&1); //22..31
307 put_rac(c, state+11 + e, v < 0); //11..21
310 put_rac(c, state+1+FFMIN(i,9), 1); //1..10
312 put_rac(c, state+1+9, 0);
314 for(i=e-1; i>=0; i--){
315 put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
319 put_rac(c, state+11 + 10, v < 0); //11..21
322 put_rac(c, state+0, 1);
327 static void av_noinline put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
328 put_symbol_inline(c, state, v, is_signed, NULL, NULL);
331 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){
332 if(get_rac(c, state+0))
337 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
342 for(i=e-1; i>=0; i--){
343 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
346 e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
351 static int av_noinline get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
352 return get_symbol_inline(c, state, is_signed);
355 static inline void update_vlc_state(VlcState * const state, const int v){
356 int drift= state->drift;
357 int count= state->count;
358 state->error_sum += FFABS(v);
361 if(count == 128){ //FIXME variable
364 state->error_sum >>= 1;
369 if(state->bias > -128) state->bias--;
375 if(state->bias < 127) state->bias++;
386 static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){
388 //printf("final: %d ", v);
389 v = fold(v - state->bias, bits);
393 while(i < state->error_sum){ //FIXME optimize
401 if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1);
404 code= v ^ ((2*state->drift + state->count)>>31);
407 //printf("v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, state->bias, state->error_sum, state->drift, state->count, k);
408 set_sr_golomb(pb, code, k, 12, bits);
410 update_vlc_state(state, v);
413 static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){
418 while(i < state->error_sum){ //FIXME optimize
425 v= get_sr_golomb(gb, k, 12, bits);
426 //printf("v:%d bias:%d error:%d drift:%d count:%d k:%d", v, state->bias, state->error_sum, state->drift, state->count, k);
429 if(k==0 && 2*state->drift <= - state->count) v ^= (-1);
431 v ^= ((2*state->drift + state->count)>>31);
434 ret= fold(v + state->bias, bits);
436 update_vlc_state(state, v);
437 //printf("final: %d\n", ret);
441 #if CONFIG_FFV1_ENCODER
442 static av_always_inline int encode_line(FFV1Context *s, int w,
444 int plane_index, int bits)
446 PlaneContext * const p= &s->plane[plane_index];
447 RangeCoder * const c= &s->c;
449 int run_index= s->run_index;
454 if(c->bytestream_end - c->bytestream < w*20){
455 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
459 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
460 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
468 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
469 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
476 diff= fold(diff, bits);
479 if(s->flags & CODEC_FLAG_PASS1){
480 put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, s->rc_stat2[p->quant_table_index][context]);
482 put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
485 if(context == 0) run_mode=1;
490 while(run_count >= 1<<ff_log2_run[run_index]){
491 run_count -= 1<<ff_log2_run[run_index];
493 put_bits(&s->pb, 1, 1);
496 put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
497 if(run_index) run_index--;
506 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, (int)put_bits_count(&s->pb));
509 put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
513 while(run_count >= 1<<ff_log2_run[run_index]){
514 run_count -= 1<<ff_log2_run[run_index];
516 put_bits(&s->pb, 1, 1);
520 put_bits(&s->pb, 1, 1);
522 s->run_index= run_index;
527 static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
529 const int ring_size= s->avctx->context_model ? 3 : 2;
533 memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
536 for(i=0; i<ring_size; i++)
537 sample[i]= s->sample_buffer + (w+6)*((h+i-y)%ring_size) + 3;
539 sample[0][-1]= sample[1][0 ];
540 sample[1][ w]= sample[1][w-1];
542 if(s->avctx->bits_per_raw_sample<=8){
544 sample[0][x]= src[x + stride*y];
546 encode_line(s, w, sample, plane_index, 8);
548 if(s->packed_at_lsb){
550 sample[0][x]= ((uint16_t*)(src + stride*y))[x];
554 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->avctx->bits_per_raw_sample);
557 encode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
559 //STOP_TIMER("encode line")}
563 static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
565 const int ring_size= s->avctx->context_model ? 3 : 2;
566 int16_t *sample[4][3];
569 memset(s->sample_buffer, 0, ring_size*4*(w+6)*sizeof(*s->sample_buffer));
572 for(i=0; i<ring_size; i++)
574 sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
577 unsigned v= src[x + stride*y];
589 // assert(g>=0 && b>=0 && r>=0);
590 // assert(g<256 && b<512 && r<512);
596 for(p=0; p<3 + s->transparency; p++){
597 sample[p][0][-1]= sample[p][1][0 ];
598 sample[p][1][ w]= sample[p][1][w-1];
599 encode_line(s, w, sample[p], (p+1)/2, 9);
604 static void write_quant_table(RangeCoder *c, int16_t *quant_table){
607 uint8_t state[CONTEXT_SIZE];
608 memset(state, 128, sizeof(state));
610 for(i=1; i<128 ; i++){
611 if(quant_table[i] != quant_table[i-1]){
612 put_symbol(c, state, i-last-1, 0);
616 put_symbol(c, state, i-last-1, 0);
619 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
622 write_quant_table(c, quant_table[i]);
625 static void write_header(FFV1Context *f){
626 uint8_t state[CONTEXT_SIZE];
628 RangeCoder * const c= &f->slice_context[0]->c;
630 memset(state, 128, sizeof(state));
633 put_symbol(c, state, f->version, 0);
634 put_symbol(c, state, f->ac, 0);
636 for(i=1; i<256; i++){
637 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
640 put_symbol(c, state, f->colorspace, 0); //YUV cs type
642 put_symbol(c, state, f->avctx->bits_per_raw_sample, 0);
643 put_rac(c, state, 1); //chroma planes
644 put_symbol(c, state, f->chroma_h_shift, 0);
645 put_symbol(c, state, f->chroma_v_shift, 0);
646 put_rac(c, state, f->transparency);
648 write_quant_tables(c, f->quant_table);
650 put_symbol(c, state, f->slice_count, 0);
651 for(i=0; i<f->slice_count; i++){
652 FFV1Context *fs= f->slice_context[i];
653 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
654 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
655 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
656 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
657 for(j=0; j<f->plane_count; j++){
658 put_symbol(c, state, f->plane[j].quant_table_index, 0);
659 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
664 #endif /* CONFIG_FFV1_ENCODER */
666 static av_cold int common_init(AVCodecContext *avctx){
667 FFV1Context *s = avctx->priv_data;
670 s->flags= avctx->flags;
672 avcodec_get_frame_defaults(&s->picture);
674 dsputil_init(&s->dsp, avctx);
676 s->width = avctx->width;
677 s->height= avctx->height;
679 assert(s->width && s->height);
688 static int init_slice_state(FFV1Context *f){
691 for(i=0; i<f->slice_count; i++){
692 FFV1Context *fs= f->slice_context[i];
693 fs->plane_count= f->plane_count;
694 fs->transparency= f->transparency;
695 for(j=0; j<f->plane_count; j++){
696 PlaneContext * const p= &fs->plane[j];
699 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
701 return AVERROR(ENOMEM);
703 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState));
705 return AVERROR(ENOMEM);
710 //FIXME only redo if state_transition changed
711 for(j=1; j<256; j++){
712 fs->c.one_state [ j]= fs->state_transition[j];
713 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
721 static av_cold int init_slice_contexts(FFV1Context *f){
724 f->slice_count= f->num_h_slices * f->num_v_slices;
726 for(i=0; i<f->slice_count; i++){
727 FFV1Context *fs= av_mallocz(sizeof(*fs));
728 int sx= i % f->num_h_slices;
729 int sy= i / f->num_h_slices;
730 int sxs= f->avctx->width * sx / f->num_h_slices;
731 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
732 int sys= f->avctx->height* sy / f->num_v_slices;
733 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
734 f->slice_context[i]= fs;
735 memcpy(fs, f, sizeof(*fs));
736 memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
738 fs->slice_width = sxe - sxs;
739 fs->slice_height= sye - sys;
743 fs->sample_buffer = av_malloc(3*4 * (fs->width+6) * sizeof(*fs->sample_buffer));
744 if (!fs->sample_buffer)
745 return AVERROR(ENOMEM);
750 static int allocate_initial_states(FFV1Context *f){
753 for(i=0; i<f->quant_table_count; i++){
754 f->initial_states[i]= av_malloc(f->context_count[i]*sizeof(*f->initial_states[i]));
755 if(!f->initial_states[i])
756 return AVERROR(ENOMEM);
757 memset(f->initial_states[i], 128, f->context_count[i]*sizeof(*f->initial_states[i]));
762 #if CONFIG_FFV1_ENCODER
763 static int write_extra_header(FFV1Context *f){
764 RangeCoder * const c= &f->c;
765 uint8_t state[CONTEXT_SIZE];
767 uint8_t state2[32][CONTEXT_SIZE];
769 memset(state2, 128, sizeof(state2));
770 memset(state, 128, sizeof(state));
772 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000 + (11*11*5*5*5+11*11*11)*32);
773 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
774 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
776 put_symbol(c, state, f->version, 0);
777 put_symbol(c, state, f->ac, 0);
779 for(i=1; i<256; i++){
780 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
783 put_symbol(c, state, f->colorspace, 0); //YUV cs type
784 put_symbol(c, state, f->avctx->bits_per_raw_sample, 0);
785 put_rac(c, state, 1); //chroma planes
786 put_symbol(c, state, f->chroma_h_shift, 0);
787 put_symbol(c, state, f->chroma_v_shift, 0);
788 put_rac(c, state, f->transparency);
789 put_symbol(c, state, f->num_h_slices-1, 0);
790 put_symbol(c, state, f->num_v_slices-1, 0);
792 put_symbol(c, state, f->quant_table_count, 0);
793 for(i=0; i<f->quant_table_count; i++)
794 write_quant_tables(c, f->quant_tables[i]);
796 for(i=0; i<f->quant_table_count; i++){
797 for(j=0; j<f->context_count[i]*CONTEXT_SIZE; j++)
798 if(f->initial_states[i] && f->initial_states[i][0][j] != 128)
800 if(j<f->context_count[i]*CONTEXT_SIZE){
801 put_rac(c, state, 1);
802 for(j=0; j<f->context_count[i]; j++){
803 for(k=0; k<CONTEXT_SIZE; k++){
804 int pred= j ? f->initial_states[i][j-1][k] : 128;
805 put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k]-pred), 1);
809 put_rac(c, state, 0);
813 f->avctx->extradata_size= ff_rac_terminate(c);
818 static int sort_stt(FFV1Context *s, uint8_t stt[256]){
819 int i,i2,changed,print=0;
823 for(i=12; i<244; i++){
824 for(i2=i+1; i2<245 && i2<i+4; i2++){
825 #define COST(old, new) \
826 s->rc_stat[old][0]*-log2((256-(new))/256.0)\
827 +s->rc_stat[old][1]*-log2( (new) /256.0)
829 #define COST2(old, new) \
831 +COST(256-(old), 256-(new))
833 double size0= COST2(i, i ) + COST2(i2, i2);
834 double sizeX= COST2(i, i2) + COST2(i2, i );
835 if(sizeX < size0 && i!=128 && i2!=128){
837 FFSWAP(int, stt[ i], stt[ i2]);
838 FFSWAP(int, s->rc_stat[i ][0],s->rc_stat[ i2][0]);
839 FFSWAP(int, s->rc_stat[i ][1],s->rc_stat[ i2][1]);
841 FFSWAP(int, stt[256-i], stt[256-i2]);
842 FFSWAP(int, s->rc_stat[256-i][0],s->rc_stat[256-i2][0]);
843 FFSWAP(int, s->rc_stat[256-i][1],s->rc_stat[256-i2][1]);
845 for(j=1; j<256; j++){
846 if (stt[j] == i ) stt[j] = i2;
847 else if(stt[j] == i2) stt[j] = i ;
849 if (stt[256-j] == 256-i ) stt[256-j] = 256-i2;
850 else if(stt[256-j] == 256-i2) stt[256-j] = 256-i ;
861 static av_cold int encode_init(AVCodecContext *avctx)
863 FFV1Context *s = avctx->priv_data;
869 s->ac= avctx->coder_type ? 2:0;
873 s->state_transition[i]=ver2_state[i];
876 for(i=0; i<256; i++){
877 s->quant_table_count=2;
878 if(avctx->bits_per_raw_sample <=8){
879 s->quant_tables[0][0][i]= quant11[i];
880 s->quant_tables[0][1][i]= 11*quant11[i];
881 s->quant_tables[0][2][i]= 11*11*quant11[i];
882 s->quant_tables[1][0][i]= quant11[i];
883 s->quant_tables[1][1][i]= 11*quant11[i];
884 s->quant_tables[1][2][i]= 11*11*quant5 [i];
885 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
886 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
888 s->quant_tables[0][0][i]= quant9_10bit[i];
889 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
890 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
891 s->quant_tables[1][0][i]= quant9_10bit[i];
892 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
893 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
894 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
895 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
898 s->context_count[0]= (11*11*11+1)/2;
899 s->context_count[1]= (11*11*5*5*5+1)/2;
900 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table));
902 for(i=0; i<s->plane_count; i++){
903 PlaneContext * const p= &s->plane[i];
905 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
906 p->quant_table_index= avctx->context_model;
907 p->context_count= s->context_count[p->quant_table_index];
910 if(allocate_initial_states(s) < 0)
911 return AVERROR(ENOMEM);
913 avctx->coded_frame= &s->picture;
914 switch(avctx->pix_fmt){
915 case PIX_FMT_YUV420P9:
916 case PIX_FMT_YUV420P10:
917 case PIX_FMT_YUV422P10:
918 s->packed_at_lsb = 1;
919 case PIX_FMT_YUV444P16:
920 case PIX_FMT_YUV422P16:
921 case PIX_FMT_YUV420P16:
922 if(avctx->bits_per_raw_sample <=8){
923 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
927 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
930 s->version= FFMAX(s->version, 1);
931 case PIX_FMT_YUV444P:
932 case PIX_FMT_YUV422P:
933 case PIX_FMT_YUV420P:
934 case PIX_FMT_YUV411P:
935 case PIX_FMT_YUV410P:
946 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
951 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
955 if(avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
956 for(i=0; i<s->quant_table_count; i++){
957 s->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*s->rc_stat2[i]));
959 return AVERROR(ENOMEM);
963 char *p= avctx->stats_in;
964 uint8_t best_state[256][256];
968 av_assert0(s->version>=2);
971 for(j=0; j<256; j++){
973 s->rc_stat[j][i]= strtol(p, &next, 0);
975 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d [%s]\n", j,i,p);
981 for(i=0; i<s->quant_table_count; i++){
982 for(j=0; j<s->context_count[i]; j++){
985 s->rc_stat2[i][j][k][m]= strtol(p, &next, 0);
987 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d %d %d [%s]\n", i,j,k,m,p);
995 gob_count= strtol(p, &next, 0);
996 if(next==p || gob_count <0){
997 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
1001 while(*p=='\n' || *p==' ') p++;
1004 sort_stt(s, s->state_transition);
1006 find_best_state(best_state, s->state_transition);
1008 for(i=0; i<s->quant_table_count; i++){
1009 for(j=0; j<s->context_count[i]; j++){
1010 for(k=0; k<32; k++){
1012 if(s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]){
1013 p=256.0*s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]);
1015 s->initial_states[i][j][k]= best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1])/gob_count, 0, 255)];
1024 write_extra_header(s);
1027 if(init_slice_contexts(s) < 0)
1029 if(init_slice_state(s) < 0)
1032 #define STATS_OUT_SIZE 1024*1024*6
1033 if(avctx->flags & CODEC_FLAG_PASS1){
1034 avctx->stats_out= av_mallocz(STATS_OUT_SIZE);
1035 for(i=0; i<s->quant_table_count; i++){
1036 for(j=0; j<s->slice_count; j++){
1037 FFV1Context *sf= s->slice_context[j];
1038 av_assert0(!sf->rc_stat2[i]);
1039 sf->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*sf->rc_stat2[i]));
1040 if(!sf->rc_stat2[i])
1041 return AVERROR(ENOMEM);
1048 #endif /* CONFIG_FFV1_ENCODER */
1051 static void clear_state(FFV1Context *f){
1054 for(si=0; si<f->slice_count; si++){
1055 FFV1Context *fs= f->slice_context[si];
1056 for(i=0; i<f->plane_count; i++){
1057 PlaneContext *p= &fs->plane[i];
1059 p->interlace_bit_state[0]= 128;
1060 p->interlace_bit_state[1]= 128;
1063 if(f->initial_states[p->quant_table_index]){
1064 memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE*p->context_count);
1066 memset(p->state, 128, CONTEXT_SIZE*p->context_count);
1068 for(j=0; j<p->context_count; j++){
1069 p->vlc_state[j].drift= 0;
1070 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
1071 p->vlc_state[j].bias= 0;
1072 p->vlc_state[j].count= 1;
1079 #if CONFIG_FFV1_ENCODER
1080 static int encode_slice(AVCodecContext *c, void *arg){
1081 FFV1Context *fs= *(void**)arg;
1082 FFV1Context *f= fs->avctx->priv_data;
1083 int width = fs->slice_width;
1084 int height= fs->slice_height;
1087 AVFrame * const p= &f->picture;
1088 const int ps= (c->bits_per_raw_sample>8)+1;
1090 if(f->colorspace==0){
1091 const int chroma_width = -((-width )>>f->chroma_h_shift);
1092 const int chroma_height= -((-height)>>f->chroma_v_shift);
1093 const int cx= x>>f->chroma_h_shift;
1094 const int cy= y>>f->chroma_v_shift;
1096 encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1098 encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1099 encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1101 encode_rgb_frame(fs, (uint32_t*)(p->data[0]) + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1108 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1109 FFV1Context *f = avctx->priv_data;
1110 RangeCoder * const c= &f->slice_context[0]->c;
1111 AVFrame *pict = data;
1112 AVFrame * const p= &f->picture;
1114 uint8_t keystate=128;
1118 ff_init_range_encoder(c, buf, buf_size);
1119 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1122 p->pict_type= AV_PICTURE_TYPE_I;
1124 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
1125 put_rac(c, &keystate, 1);
1131 put_rac(c, &keystate, 0);
1136 used_count += ff_rac_terminate(c);
1137 //printf("pos=%d\n", used_count);
1138 init_put_bits(&f->slice_context[0]->pb, buf + used_count, buf_size - used_count);
1141 for(i=1; i<256; i++){
1142 c->one_state[i]= f->state_transition[i];
1143 c->zero_state[256-i]= 256-c->one_state[i];
1147 for(i=1; i<f->slice_count; i++){
1148 FFV1Context *fs= f->slice_context[i];
1149 uint8_t *start= buf + (buf_size-used_count)*i/f->slice_count;
1150 int len= buf_size/f->slice_count;
1153 ff_init_range_encoder(&fs->c, start, len);
1155 init_put_bits(&fs->pb, start, len);
1158 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1161 for(i=0; i<f->slice_count; i++){
1162 FFV1Context *fs= f->slice_context[i];
1167 put_rac(&fs->c, &state, 0);
1168 bytes= ff_rac_terminate(&fs->c);
1170 flush_put_bits(&fs->pb); //nicer padding FIXME
1171 bytes= used_count + (put_bits_count(&fs->pb)+7)/8;
1175 av_assert0(bytes < buf_size/f->slice_count);
1176 memmove(buf_p, fs->ac ? fs->c.bytestream_start : fs->pb.buf, bytes);
1177 av_assert0(bytes < (1<<24));
1178 AV_WB24(buf_p+bytes, bytes);
1184 if((avctx->flags&CODEC_FLAG_PASS1) && (f->picture_number&31)==0){
1186 char *p= avctx->stats_out;
1187 char *end= p + STATS_OUT_SIZE;
1189 memset(f->rc_stat, 0, sizeof(f->rc_stat));
1190 for(i=0; i<f->quant_table_count; i++)
1191 memset(f->rc_stat2[i], 0, f->context_count[i]*sizeof(*f->rc_stat2[i]));
1193 for(j=0; j<f->slice_count; j++){
1194 FFV1Context *fs= f->slice_context[j];
1195 for(i=0; i<256; i++){
1196 f->rc_stat[i][0] += fs->rc_stat[i][0];
1197 f->rc_stat[i][1] += fs->rc_stat[i][1];
1199 for(i=0; i<f->quant_table_count; i++){
1200 for(k=0; k<f->context_count[i]; k++){
1201 for(m=0; m<32; m++){
1202 f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
1203 f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
1209 for(j=0; j<256; j++){
1210 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat[j][0], f->rc_stat[j][1]);
1213 snprintf(p, end-p, "\n");
1215 for(i=0; i<f->quant_table_count; i++){
1216 for(j=0; j<f->context_count[i]; j++){
1217 for(m=0; m<32; m++){
1218 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
1223 snprintf(p, end-p, "%d\n", f->gob_count);
1224 } else if(avctx->flags&CODEC_FLAG_PASS1)
1225 avctx->stats_out[0] = '\0';
1227 f->picture_number++;
1230 #endif /* CONFIG_FFV1_ENCODER */
1232 static av_cold int common_end(AVCodecContext *avctx){
1233 FFV1Context *s = avctx->priv_data;
1236 if (avctx->codec->decode && s->picture.data[0])
1237 avctx->release_buffer(avctx, &s->picture);
1239 for(j=0; j<s->slice_count; j++){
1240 FFV1Context *fs= s->slice_context[j];
1241 for(i=0; i<s->plane_count; i++){
1242 PlaneContext *p= &fs->plane[i];
1244 av_freep(&p->state);
1245 av_freep(&p->vlc_state);
1247 av_freep(&fs->sample_buffer);
1250 av_freep(&avctx->stats_out);
1251 for(j=0; j<s->quant_table_count; j++){
1252 av_freep(&s->initial_states[j]);
1253 for(i=0; i<s->slice_count; i++){
1254 FFV1Context *sf= s->slice_context[i];
1255 av_freep(&sf->rc_stat2[j]);
1257 av_freep(&s->rc_stat2[j]);
1260 for(i=0; i<s->slice_count; i++){
1261 av_freep(&s->slice_context[i]);
1267 static av_always_inline void decode_line(FFV1Context *s, int w,
1269 int plane_index, int bits)
1271 PlaneContext * const p= &s->plane[plane_index];
1272 RangeCoder * const c= &s->c;
1276 int run_index= s->run_index;
1279 int diff, context, sign;
1281 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
1288 av_assert2(context < p->context_count);
1291 diff= get_symbol_inline(c, p->state[context], 1);
1293 if(context == 0 && run_mode==0) run_mode=1;
1296 if(run_count==0 && run_mode==1){
1297 if(get_bits1(&s->gb)){
1298 run_count = 1<<ff_log2_run[run_index];
1299 if(x + run_count <= w) run_index++;
1301 if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]);
1303 if(run_index) run_index--;
1311 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1316 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1318 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, get_bits_count(&s->gb));
1321 if(sign) diff= -diff;
1323 sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1);
1325 s->run_index= run_index;
1328 static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
1331 sample[0]=s->sample_buffer +3;
1332 sample[1]=s->sample_buffer+w+6+3;
1336 memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
1339 int16_t *temp = sample[0]; //FIXME try a normal buffer
1341 sample[0]= sample[1];
1344 sample[1][-1]= sample[0][0 ];
1345 sample[0][ w]= sample[0][w-1];
1348 if(s->avctx->bits_per_raw_sample <= 8){
1349 decode_line(s, w, sample, plane_index, 8);
1351 src[x + stride*y]= sample[1][x];
1354 decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
1355 if(s->packed_at_lsb){
1357 ((uint16_t*)(src + stride*y))[x]= sample[1][x];
1361 ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
1365 //STOP_TIMER("decode-line")}
1369 static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
1371 int16_t *sample[4][2];
1373 sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
1374 sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
1379 memset(s->sample_buffer, 0, 8*(w+6)*sizeof(*s->sample_buffer));
1382 for(p=0; p<3 + s->transparency; p++){
1383 int16_t *temp = sample[p][0]; //FIXME try a normal buffer
1385 sample[p][0]= sample[p][1];
1388 sample[p][1][-1]= sample[p][0][0 ];
1389 sample[p][0][ w]= sample[p][0][w-1];
1390 decode_line(s, w, sample[p], (p+1)/2, 9);
1393 int g= sample[0][1][x];
1394 int b= sample[1][1][x];
1395 int r= sample[2][1][x];
1396 int a= sample[3][1][x];
1398 // assert(g>=0 && b>=0 && r>=0);
1399 // assert(g<256 && b<512 && r<512);
1407 src[x + stride*y]= b + (g<<8) + (r<<16) + (a<<24);
1412 static int decode_slice(AVCodecContext *c, void *arg){
1413 FFV1Context *fs= *(void**)arg;
1414 FFV1Context *f= fs->avctx->priv_data;
1415 int width = fs->slice_width;
1416 int height= fs->slice_height;
1419 const int ps= (c->bits_per_raw_sample>8)+1;
1420 AVFrame * const p= &f->picture;
1422 av_assert1(width && height);
1423 if(f->colorspace==0){
1424 const int chroma_width = -((-width )>>f->chroma_h_shift);
1425 const int chroma_height= -((-height)>>f->chroma_v_shift);
1426 const int cx= x>>f->chroma_h_shift;
1427 const int cy= y>>f->chroma_v_shift;
1428 decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1430 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1431 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[2], 1);
1433 decode_rgb_frame(fs, (uint32_t*)p->data[0] + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1441 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
1444 uint8_t state[CONTEXT_SIZE];
1446 memset(state, 128, sizeof(state));
1448 for(v=0; i<128 ; v++){
1449 int len= get_symbol(c, state, 0) + 1;
1451 if(len + i > 128) return -1;
1454 quant_table[i] = scale*v;
1457 //if(i%16==0) printf("\n");
1461 for(i=1; i<128; i++){
1462 quant_table[256-i]= -quant_table[i];
1464 quant_table[128]= -quant_table[127];
1469 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
1471 int context_count=1;
1474 context_count*= read_quant_table(c, quant_table[i], context_count);
1475 if(context_count > 32768U){
1479 return (context_count+1)/2;
1482 static int read_extra_header(FFV1Context *f){
1483 RangeCoder * const c= &f->c;
1484 uint8_t state[CONTEXT_SIZE];
1486 uint8_t state2[32][CONTEXT_SIZE];
1488 memset(state2, 128, sizeof(state2));
1489 memset(state, 128, sizeof(state));
1491 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1492 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1494 f->version= get_symbol(c, state, 0);
1495 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1497 for(i=1; i<256; i++){
1498 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1501 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1502 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1503 get_rac(c, state); //no chroma = false
1504 f->chroma_h_shift= get_symbol(c, state, 0);
1505 f->chroma_v_shift= get_symbol(c, state, 0);
1506 f->transparency= get_rac(c, state);
1507 f->plane_count= 2 + f->transparency;
1508 f->num_h_slices= 1 + get_symbol(c, state, 0);
1509 f->num_v_slices= 1 + get_symbol(c, state, 0);
1510 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->height){
1511 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1515 f->quant_table_count= get_symbol(c, state, 0);
1516 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1518 for(i=0; i<f->quant_table_count; i++){
1519 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1520 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1525 if(allocate_initial_states(f) < 0)
1526 return AVERROR(ENOMEM);
1528 for(i=0; i<f->quant_table_count; i++){
1529 if(get_rac(c, state)){
1530 for(j=0; j<f->context_count[i]; j++){
1531 for(k=0; k<CONTEXT_SIZE; k++){
1532 int pred= j ? f->initial_states[i][j-1][k] : 128;
1533 f->initial_states[i][j][k]= (pred+get_symbol(c, state2[k], 1))&0xFF;
1542 static int read_header(FFV1Context *f){
1543 uint8_t state[CONTEXT_SIZE];
1544 int i, j, context_count;
1545 RangeCoder * const c= &f->slice_context[0]->c;
1547 memset(state, 128, sizeof(state));
1550 f->version= get_symbol(c, state, 0);
1551 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1553 for(i=1; i<256; i++){
1554 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1557 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1559 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1560 get_rac(c, state); //no chroma = false
1561 f->chroma_h_shift= get_symbol(c, state, 0);
1562 f->chroma_v_shift= get_symbol(c, state, 0);
1563 f->transparency= get_rac(c, state);
1564 f->plane_count= 2 + f->transparency;
1567 if(f->colorspace==0){
1568 if(f->avctx->bits_per_raw_sample<=8){
1569 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1570 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
1571 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
1572 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
1573 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
1574 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
1576 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1579 }else if(f->avctx->bits_per_raw_sample==9) {
1580 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1581 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1582 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1583 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P9 ; f->packed_at_lsb=1; break;
1585 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1588 }else if(f->avctx->bits_per_raw_sample==10) {
1589 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1590 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1591 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P10; f->packed_at_lsb=1; break;
1592 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P10; f->packed_at_lsb=1; break;
1594 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1598 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1599 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1600 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1601 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P16; break;
1603 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1607 }else if(f->colorspace==1){
1608 if(f->chroma_h_shift || f->chroma_v_shift){
1609 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1612 if(f->transparency) f->avctx->pix_fmt= PIX_FMT_RGB32;
1613 else f->avctx->pix_fmt= PIX_FMT_0RGB32;
1615 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1619 //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt);
1621 context_count= read_quant_tables(c, f->quant_table);
1622 if(context_count < 0){
1623 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1627 f->slice_count= get_symbol(c, state, 0);
1628 if(f->slice_count > (unsigned)MAX_SLICES)
1632 for(j=0; j<f->slice_count; j++){
1633 FFV1Context *fs= f->slice_context[j];
1635 fs->packed_at_lsb= f->packed_at_lsb;
1637 if(f->version >= 2){
1638 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1639 fs->slice_y = get_symbol(c, state, 0) *f->height;
1640 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1641 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1643 fs->slice_x /= f->num_h_slices;
1644 fs->slice_y /= f->num_v_slices;
1645 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1646 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1647 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1649 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1650 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1654 for(i=0; i<f->plane_count; i++){
1655 PlaneContext * const p= &fs->plane[i];
1657 if(f->version >= 2){
1658 int idx=get_symbol(c, state, 0);
1659 if(idx > (unsigned)f->quant_table_count){
1660 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1663 p->quant_table_index= idx;
1664 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1665 context_count= f->context_count[idx];
1667 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
1670 if(p->context_count < context_count){
1671 av_freep(&p->state);
1672 av_freep(&p->vlc_state);
1674 p->context_count= context_count;
1681 static av_cold int decode_init(AVCodecContext *avctx)
1683 FFV1Context *f = avctx->priv_data;
1687 if(avctx->extradata && read_extra_header(f) < 0)
1690 if(init_slice_contexts(f) < 0)
1696 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
1697 const uint8_t *buf = avpkt->data;
1698 int buf_size = avpkt->size;
1699 FFV1Context *f = avctx->priv_data;
1700 RangeCoder * const c= &f->slice_context[0]->c;
1701 AVFrame * const p= &f->picture;
1703 uint8_t keystate= 128;
1704 const uint8_t *buf_p;
1706 AVFrame *picture = data;
1708 /* release previously stored data */
1710 avctx->release_buffer(avctx, p);
1712 ff_init_range_decoder(c, buf, buf_size);
1713 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1716 p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
1717 if(get_rac(c, &keystate)){
1719 if(read_header(f) < 0)
1721 if(init_slice_state(f) < 0)
1730 for(i=1; i<256; i++){
1731 c->one_state[i]= f->state_transition[i];
1732 c->zero_state[256-i]= 256-c->one_state[i];
1737 if(avctx->get_buffer(avctx, p) < 0){
1738 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1742 if(avctx->debug&FF_DEBUG_PICT_INFO)
1743 av_log(avctx, AV_LOG_ERROR, "keyframe:%d coder:%d\n", p->key_frame, f->ac);
1746 bytes_read = c->bytestream - c->bytestream_start - 1;
1747 if(bytes_read ==0) av_log(avctx, AV_LOG_ERROR, "error at end of AC stream\n"); //FIXME
1748 //printf("pos=%d\n", bytes_read);
1749 init_get_bits(&f->slice_context[0]->gb, buf + bytes_read, (buf_size - bytes_read) * 8);
1751 bytes_read = 0; /* avoid warning */
1754 buf_p= buf + buf_size;
1755 for(i=f->slice_count-1; i>0; i--){
1756 FFV1Context *fs= f->slice_context[i];
1757 int v= AV_RB24(buf_p-3)+3;
1758 if(buf_p - buf <= v){
1759 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
1764 ff_init_range_decoder(&fs->c, buf_p, v);
1766 init_get_bits(&fs->gb, buf_p, v * 8);
1770 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1771 f->picture_number++;
1774 *data_size = sizeof(AVFrame);
1779 AVCodec ff_ffv1_decoder = {
1781 .type = AVMEDIA_TYPE_VIDEO,
1782 .id = CODEC_ID_FFV1,
1783 .priv_data_size = sizeof(FFV1Context),
1784 .init = decode_init,
1785 .close = common_end,
1786 .decode = decode_frame,
1787 .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | CODEC_CAP_SLICE_THREADS,
1788 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
1791 #if CONFIG_FFV1_ENCODER
1792 AVCodec ff_ffv1_encoder = {
1794 .type = AVMEDIA_TYPE_VIDEO,
1795 .id = CODEC_ID_FFV1,
1796 .priv_data_size = sizeof(FFV1Context),
1797 .init = encode_init,
1798 .encode = encode_frame,
1799 .close = common_end,
1800 .capabilities = CODEC_CAP_SLICE_THREADS,
1801 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_NONE},
1802 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),