2 * FFV1 codec for libavcodec
4 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * FF Video Codec 1 (a lossless codec)
32 #include "rangecoder.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/avassert.h"
39 #define CONTEXT_SIZE 32
41 #define MAX_QUANT_TABLES 8
42 #define MAX_CONTEXT_INPUTS 5
44 extern const uint8_t ff_log2_run[41];
46 static const int8_t quant5_10bit[256]={
47 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
48 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
49 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
50 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
51 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
52 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
53 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
54 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
55 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
56 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
57 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
58 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
59 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,
60 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
61 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
62 -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0,
65 static const int8_t quant5[256]={
66 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
67 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
68 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
69 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
70 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
71 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
72 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
73 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
74 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
75 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
76 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
77 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
78 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
79 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
80 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
81 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1,
84 static const int8_t quant9_10bit[256]={
85 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
87 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
88 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
94 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
95 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
96 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
97 -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,
98 -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,
99 -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
100 -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0,
103 static const int8_t quant11[256]={
104 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
105 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
106 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
107 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
108 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
109 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
110 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
111 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
112 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
113 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
114 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
115 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
116 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
117 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4,
118 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
119 -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1,
122 static const uint8_t ver2_state[256]= {
123 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
124 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
125 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
126 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
127 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
128 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
129 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
130 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
131 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
132 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
133 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
134 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
135 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
136 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
137 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
138 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
141 typedef struct VlcState{
148 typedef struct PlaneContext{
149 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
150 int quant_table_index;
152 uint8_t (*state)[CONTEXT_SIZE];
154 uint8_t interlace_bit_state[2];
157 #define MAX_SLICES 256
159 typedef struct FFV1Context{
160 AVCodecContext *avctx;
164 uint64_t rc_stat[256][2];
165 uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
168 int chroma_h_shift, chroma_v_shift;
175 int ac; ///< 1=range coder <-> 0=golomb rice
176 PlaneContext plane[MAX_PLANES];
177 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
178 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
179 int context_count[MAX_QUANT_TABLES];
180 uint8_t state_transition[256];
181 uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
184 int16_t *sample_buffer;
188 int quant_table_count;
192 struct FFV1Context *slice_context[MAX_SLICES];
200 int bits_per_raw_sample;
203 static av_always_inline int fold(int diff, int bits){
215 static inline int predict(int16_t *src, int16_t *last)
217 const int LT= last[-1];
218 const int T= last[ 0];
219 const int L = src[-1];
221 return mid_pred(L, L + T - LT, T);
224 static inline int get_context(PlaneContext *p, int16_t *src,
225 int16_t *last, int16_t *last2)
227 const int LT= last[-1];
228 const int T= last[ 0];
229 const int RT= last[ 1];
230 const int L = src[-1];
232 if(p->quant_table[3][127]){
233 const int TT= last2[0];
234 const int LL= src[-2];
235 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF]
236 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xFF];
238 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF];
241 static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){
246 l2tab[i]= log2(i/256.0);
248 for(i=0; i<256; i++){
249 double best_len[256];
255 for(j=FFMAX(i-10,1); j<FFMIN(i+11,256); j++){
259 for(k=0; k<256; k++){
260 double newocc[256]={0};
261 for(m=0; m<256; m++){
263 len -=occ[m]*( p *l2tab[ m]
264 + (1-p)*l2tab[256-m]);
267 if(len < best_len[k]){
271 for(m=0; m<256; m++){
273 newocc[ one_state[ m]] += occ[m]* p ;
274 newocc[256-one_state[256-m]] += occ[m]*(1-p);
277 memcpy(occ, newocc, sizeof(occ));
283 static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
286 #define put_rac(C,S,B) \
290 rc_stat2[(S)-state][B]++;\
296 const int a= FFABS(v);
297 const int e= av_log2(a);
298 put_rac(c, state+0, 0);
301 put_rac(c, state+1+i, 1); //1..10
303 put_rac(c, state+1+i, 0);
305 for(i=e-1; i>=0; i--){
306 put_rac(c, state+22+i, (a>>i)&1); //22..31
310 put_rac(c, state+11 + e, v < 0); //11..21
313 put_rac(c, state+1+FFMIN(i,9), 1); //1..10
315 put_rac(c, state+1+9, 0);
317 for(i=e-1; i>=0; i--){
318 put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
322 put_rac(c, state+11 + 10, v < 0); //11..21
325 put_rac(c, state+0, 1);
330 static void av_noinline put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
331 put_symbol_inline(c, state, v, is_signed, NULL, NULL);
334 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){
335 if(get_rac(c, state+0))
340 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
345 for(i=e-1; i>=0; i--){
346 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
349 e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
354 static int av_noinline get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
355 return get_symbol_inline(c, state, is_signed);
358 static inline void update_vlc_state(VlcState * const state, const int v){
359 int drift= state->drift;
360 int count= state->count;
361 state->error_sum += FFABS(v);
364 if(count == 128){ //FIXME variable
367 state->error_sum >>= 1;
372 if(state->bias > -128) state->bias--;
378 if(state->bias < 127) state->bias++;
389 static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){
391 //printf("final: %d ", v);
392 v = fold(v - state->bias, bits);
396 while(i < state->error_sum){ //FIXME optimize
404 if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1);
407 code= v ^ ((2*state->drift + state->count)>>31);
410 //printf("v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, state->bias, state->error_sum, state->drift, state->count, k);
411 set_sr_golomb(pb, code, k, 12, bits);
413 update_vlc_state(state, v);
416 static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){
421 while(i < state->error_sum){ //FIXME optimize
428 v= get_sr_golomb(gb, k, 12, bits);
429 //printf("v:%d bias:%d error:%d drift:%d count:%d k:%d", v, state->bias, state->error_sum, state->drift, state->count, k);
432 if(k==0 && 2*state->drift <= - state->count) v ^= (-1);
434 v ^= ((2*state->drift + state->count)>>31);
437 ret= fold(v + state->bias, bits);
439 update_vlc_state(state, v);
440 //printf("final: %d\n", ret);
444 #if CONFIG_FFV1_ENCODER
445 static av_always_inline int encode_line(FFV1Context *s, int w,
447 int plane_index, int bits)
449 PlaneContext * const p= &s->plane[plane_index];
450 RangeCoder * const c= &s->c;
452 int run_index= s->run_index;
457 if(c->bytestream_end - c->bytestream < w*20){
458 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
462 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
463 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
471 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
472 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
479 diff= fold(diff, bits);
482 if(s->flags & CODEC_FLAG_PASS1){
483 put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, s->rc_stat2[p->quant_table_index][context]);
485 put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
488 if(context == 0) run_mode=1;
493 while(run_count >= 1<<ff_log2_run[run_index]){
494 run_count -= 1<<ff_log2_run[run_index];
496 put_bits(&s->pb, 1, 1);
499 put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
500 if(run_index) run_index--;
509 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, (int)put_bits_count(&s->pb));
512 put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
516 while(run_count >= 1<<ff_log2_run[run_index]){
517 run_count -= 1<<ff_log2_run[run_index];
519 put_bits(&s->pb, 1, 1);
523 put_bits(&s->pb, 1, 1);
525 s->run_index= run_index;
530 static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
532 const int ring_size= s->avctx->context_model ? 3 : 2;
536 memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
539 for(i=0; i<ring_size; i++)
540 sample[i]= s->sample_buffer + (w+6)*((h+i-y)%ring_size) + 3;
542 sample[0][-1]= sample[1][0 ];
543 sample[1][ w]= sample[1][w-1];
545 if(s->bits_per_raw_sample<=8){
547 sample[0][x]= src[x + stride*y];
549 encode_line(s, w, sample, plane_index, 8);
551 if(s->packed_at_lsb){
553 sample[0][x]= ((uint16_t*)(src + stride*y))[x];
557 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample);
560 encode_line(s, w, sample, plane_index, s->bits_per_raw_sample);
562 //STOP_TIMER("encode line")}
566 static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
568 const int ring_size= s->avctx->context_model ? 3 : 2;
569 int16_t *sample[4][3];
572 memset(s->sample_buffer, 0, ring_size*4*(w+6)*sizeof(*s->sample_buffer));
575 for(i=0; i<ring_size; i++)
577 sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
580 unsigned v= src[x + stride*y];
592 // assert(g>=0 && b>=0 && r>=0);
593 // assert(g<256 && b<512 && r<512);
599 for(p=0; p<3 + s->transparency; p++){
600 sample[p][0][-1]= sample[p][1][0 ];
601 sample[p][1][ w]= sample[p][1][w-1];
602 encode_line(s, w, sample[p], (p+1)/2, 9);
607 static void write_quant_table(RangeCoder *c, int16_t *quant_table){
610 uint8_t state[CONTEXT_SIZE];
611 memset(state, 128, sizeof(state));
613 for(i=1; i<128 ; i++){
614 if(quant_table[i] != quant_table[i-1]){
615 put_symbol(c, state, i-last-1, 0);
619 put_symbol(c, state, i-last-1, 0);
622 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
625 write_quant_table(c, quant_table[i]);
628 static void write_header(FFV1Context *f){
629 uint8_t state[CONTEXT_SIZE];
631 RangeCoder * const c= &f->slice_context[0]->c;
633 memset(state, 128, sizeof(state));
636 put_symbol(c, state, f->version, 0);
637 put_symbol(c, state, f->ac, 0);
639 for(i=1; i<256; i++){
640 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
643 put_symbol(c, state, f->colorspace, 0); //YUV cs type
645 put_symbol(c, state, f->bits_per_raw_sample, 0);
646 put_rac(c, state, f->chroma_planes);
647 put_symbol(c, state, f->chroma_h_shift, 0);
648 put_symbol(c, state, f->chroma_v_shift, 0);
649 put_rac(c, state, f->transparency);
651 write_quant_tables(c, f->quant_table);
653 put_symbol(c, state, f->slice_count, 0);
654 for(i=0; i<f->slice_count; i++){
655 FFV1Context *fs= f->slice_context[i];
656 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
657 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
658 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
659 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
660 for(j=0; j<f->plane_count; j++){
661 put_symbol(c, state, f->plane[j].quant_table_index, 0);
662 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
667 #endif /* CONFIG_FFV1_ENCODER */
669 static av_cold int common_init(AVCodecContext *avctx){
670 FFV1Context *s = avctx->priv_data;
673 s->flags= avctx->flags;
675 avcodec_get_frame_defaults(&s->picture);
677 ff_dsputil_init(&s->dsp, avctx);
679 s->width = avctx->width;
680 s->height= avctx->height;
682 assert(s->width && s->height);
691 static int init_slice_state(FFV1Context *f){
694 for(i=0; i<f->slice_count; i++){
695 FFV1Context *fs= f->slice_context[i];
696 fs->plane_count= f->plane_count;
697 fs->transparency= f->transparency;
698 for(j=0; j<f->plane_count; j++){
699 PlaneContext * const p= &fs->plane[j];
702 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
704 return AVERROR(ENOMEM);
706 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState));
708 return AVERROR(ENOMEM);
713 //FIXME only redo if state_transition changed
714 for(j=1; j<256; j++){
715 fs->c.one_state [ j]= fs->state_transition[j];
716 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
724 static av_cold int init_slice_contexts(FFV1Context *f){
727 f->slice_count= f->num_h_slices * f->num_v_slices;
729 for(i=0; i<f->slice_count; i++){
730 FFV1Context *fs= av_mallocz(sizeof(*fs));
731 int sx= i % f->num_h_slices;
732 int sy= i / f->num_h_slices;
733 int sxs= f->avctx->width * sx / f->num_h_slices;
734 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
735 int sys= f->avctx->height* sy / f->num_v_slices;
736 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
737 f->slice_context[i]= fs;
738 memcpy(fs, f, sizeof(*fs));
739 memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
741 fs->slice_width = sxe - sxs;
742 fs->slice_height= sye - sys;
746 fs->sample_buffer = av_malloc(3*4 * (fs->width+6) * sizeof(*fs->sample_buffer));
747 if (!fs->sample_buffer)
748 return AVERROR(ENOMEM);
753 static int allocate_initial_states(FFV1Context *f){
756 for(i=0; i<f->quant_table_count; i++){
757 f->initial_states[i]= av_malloc(f->context_count[i]*sizeof(*f->initial_states[i]));
758 if(!f->initial_states[i])
759 return AVERROR(ENOMEM);
760 memset(f->initial_states[i], 128, f->context_count[i]*sizeof(*f->initial_states[i]));
765 #if CONFIG_FFV1_ENCODER
766 static int write_extra_header(FFV1Context *f){
767 RangeCoder * const c= &f->c;
768 uint8_t state[CONTEXT_SIZE];
770 uint8_t state2[32][CONTEXT_SIZE];
772 memset(state2, 128, sizeof(state2));
773 memset(state, 128, sizeof(state));
775 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000 + (11*11*5*5*5+11*11*11)*32);
776 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
777 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
779 put_symbol(c, state, f->version, 0);
780 put_symbol(c, state, f->ac, 0);
782 for(i=1; i<256; i++){
783 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
786 put_symbol(c, state, f->colorspace, 0); //YUV cs type
787 put_symbol(c, state, f->bits_per_raw_sample, 0);
788 put_rac(c, state, f->chroma_planes);
789 put_symbol(c, state, f->chroma_h_shift, 0);
790 put_symbol(c, state, f->chroma_v_shift, 0);
791 put_rac(c, state, f->transparency);
792 put_symbol(c, state, f->num_h_slices-1, 0);
793 put_symbol(c, state, f->num_v_slices-1, 0);
795 put_symbol(c, state, f->quant_table_count, 0);
796 for(i=0; i<f->quant_table_count; i++)
797 write_quant_tables(c, f->quant_tables[i]);
799 for(i=0; i<f->quant_table_count; i++){
800 for(j=0; j<f->context_count[i]*CONTEXT_SIZE; j++)
801 if(f->initial_states[i] && f->initial_states[i][0][j] != 128)
803 if(j<f->context_count[i]*CONTEXT_SIZE){
804 put_rac(c, state, 1);
805 for(j=0; j<f->context_count[i]; j++){
806 for(k=0; k<CONTEXT_SIZE; k++){
807 int pred= j ? f->initial_states[i][j-1][k] : 128;
808 put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k]-pred), 1);
812 put_rac(c, state, 0);
816 f->avctx->extradata_size= ff_rac_terminate(c);
821 static int sort_stt(FFV1Context *s, uint8_t stt[256]){
822 int i,i2,changed,print=0;
826 for(i=12; i<244; i++){
827 for(i2=i+1; i2<245 && i2<i+4; i2++){
828 #define COST(old, new) \
829 s->rc_stat[old][0]*-log2((256-(new))/256.0)\
830 +s->rc_stat[old][1]*-log2( (new) /256.0)
832 #define COST2(old, new) \
834 +COST(256-(old), 256-(new))
836 double size0= COST2(i, i ) + COST2(i2, i2);
837 double sizeX= COST2(i, i2) + COST2(i2, i );
838 if(sizeX < size0 && i!=128 && i2!=128){
840 FFSWAP(int, stt[ i], stt[ i2]);
841 FFSWAP(int, s->rc_stat[i ][0],s->rc_stat[ i2][0]);
842 FFSWAP(int, s->rc_stat[i ][1],s->rc_stat[ i2][1]);
844 FFSWAP(int, stt[256-i], stt[256-i2]);
845 FFSWAP(int, s->rc_stat[256-i][0],s->rc_stat[256-i2][0]);
846 FFSWAP(int, s->rc_stat[256-i][1],s->rc_stat[256-i2][1]);
848 for(j=1; j<256; j++){
849 if (stt[j] == i ) stt[j] = i2;
850 else if(stt[j] == i2) stt[j] = i ;
852 if (stt[256-j] == 256-i ) stt[256-j] = 256-i2;
853 else if(stt[256-j] == 256-i2) stt[256-j] = 256-i ;
864 static av_cold int encode_init(AVCodecContext *avctx)
866 FFV1Context *s = avctx->priv_data;
872 s->ac= avctx->coder_type ? 2:0;
876 s->state_transition[i]=ver2_state[i];
879 switch(avctx->pix_fmt){
880 case PIX_FMT_YUV444P9:
881 case PIX_FMT_YUV422P9:
882 case PIX_FMT_YUV420P9:
883 if (!avctx->bits_per_raw_sample)
884 s->bits_per_raw_sample = 9;
885 case PIX_FMT_YUV444P10:
886 case PIX_FMT_YUV420P10:
887 case PIX_FMT_YUV422P10:
888 s->packed_at_lsb = 1;
889 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
890 s->bits_per_raw_sample = 10;
892 case PIX_FMT_YUV444P16:
893 case PIX_FMT_YUV422P16:
894 case PIX_FMT_YUV420P16:
895 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) {
896 s->bits_per_raw_sample = 16;
897 } else if (!s->bits_per_raw_sample){
898 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
900 if(s->bits_per_raw_sample <=8){
901 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
905 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
908 s->version= FFMAX(s->version, 1);
910 case PIX_FMT_YUV444P:
911 case PIX_FMT_YUV440P:
912 case PIX_FMT_YUV422P:
913 case PIX_FMT_YUV420P:
914 case PIX_FMT_YUV411P:
915 case PIX_FMT_YUV410P:
916 s->chroma_planes= av_pix_fmt_descriptors[avctx->pix_fmt].nb_components < 3 ? 0 : 1;
919 case PIX_FMT_YUVA444P:
920 case PIX_FMT_YUVA420P:
933 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
936 if (s->transparency) {
937 av_log(avctx, AV_LOG_WARNING, "Storing alpha plane, this will require a recent FFV1 decoder to playback!\n");
940 for(i=0; i<256; i++){
941 s->quant_table_count=2;
942 if(s->bits_per_raw_sample <=8){
943 s->quant_tables[0][0][i]= quant11[i];
944 s->quant_tables[0][1][i]= 11*quant11[i];
945 s->quant_tables[0][2][i]= 11*11*quant11[i];
946 s->quant_tables[1][0][i]= quant11[i];
947 s->quant_tables[1][1][i]= 11*quant11[i];
948 s->quant_tables[1][2][i]= 11*11*quant5 [i];
949 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
950 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
952 s->quant_tables[0][0][i]= quant9_10bit[i];
953 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
954 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
955 s->quant_tables[1][0][i]= quant9_10bit[i];
956 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
957 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
958 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
959 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
962 s->context_count[0]= (11*11*11+1)/2;
963 s->context_count[1]= (11*11*5*5*5+1)/2;
964 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table));
966 for(i=0; i<s->plane_count; i++){
967 PlaneContext * const p= &s->plane[i];
969 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
970 p->quant_table_index= avctx->context_model;
971 p->context_count= s->context_count[p->quant_table_index];
974 if(allocate_initial_states(s) < 0)
975 return AVERROR(ENOMEM);
977 avctx->coded_frame= &s->picture;
980 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
984 if(avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
985 for(i=0; i<s->quant_table_count; i++){
986 s->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*s->rc_stat2[i]));
988 return AVERROR(ENOMEM);
992 char *p= avctx->stats_in;
993 uint8_t best_state[256][256];
997 av_assert0(s->version>=2);
1000 for(j=0; j<256; j++){
1002 s->rc_stat[j][i]= strtol(p, &next, 0);
1004 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d [%s]\n", j,i,p);
1010 for(i=0; i<s->quant_table_count; i++){
1011 for(j=0; j<s->context_count[i]; j++){
1012 for(k=0; k<32; k++){
1014 s->rc_stat2[i][j][k][m]= strtol(p, &next, 0);
1016 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d %d %d [%s]\n", i,j,k,m,p);
1024 gob_count= strtol(p, &next, 0);
1025 if(next==p || gob_count <0){
1026 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
1030 while(*p=='\n' || *p==' ') p++;
1033 sort_stt(s, s->state_transition);
1035 find_best_state(best_state, s->state_transition);
1037 for(i=0; i<s->quant_table_count; i++){
1038 for(j=0; j<s->context_count[i]; j++){
1039 for(k=0; k<32; k++){
1041 if(s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]){
1042 p=256.0*s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]);
1044 s->initial_states[i][j][k]= best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1])/gob_count, 0, 255)];
1053 write_extra_header(s);
1056 if(init_slice_contexts(s) < 0)
1058 if(init_slice_state(s) < 0)
1061 #define STATS_OUT_SIZE 1024*1024*6
1062 if(avctx->flags & CODEC_FLAG_PASS1){
1063 avctx->stats_out= av_mallocz(STATS_OUT_SIZE);
1064 for(i=0; i<s->quant_table_count; i++){
1065 for(j=0; j<s->slice_count; j++){
1066 FFV1Context *sf= s->slice_context[j];
1067 av_assert0(!sf->rc_stat2[i]);
1068 sf->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*sf->rc_stat2[i]));
1069 if(!sf->rc_stat2[i])
1070 return AVERROR(ENOMEM);
1077 #endif /* CONFIG_FFV1_ENCODER */
1080 static void clear_state(FFV1Context *f){
1083 for(si=0; si<f->slice_count; si++){
1084 FFV1Context *fs= f->slice_context[si];
1085 for(i=0; i<f->plane_count; i++){
1086 PlaneContext *p= &fs->plane[i];
1088 p->interlace_bit_state[0]= 128;
1089 p->interlace_bit_state[1]= 128;
1092 if(f->initial_states[p->quant_table_index]){
1093 memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE*p->context_count);
1095 memset(p->state, 128, CONTEXT_SIZE*p->context_count);
1097 for(j=0; j<p->context_count; j++){
1098 p->vlc_state[j].drift= 0;
1099 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
1100 p->vlc_state[j].bias= 0;
1101 p->vlc_state[j].count= 1;
1108 #if CONFIG_FFV1_ENCODER
1109 static int encode_slice(AVCodecContext *c, void *arg){
1110 FFV1Context *fs= *(void**)arg;
1111 FFV1Context *f= fs->avctx->priv_data;
1112 int width = fs->slice_width;
1113 int height= fs->slice_height;
1116 AVFrame * const p= &f->picture;
1117 const int ps= (f->bits_per_raw_sample>8)+1;
1119 if(f->colorspace==0){
1120 const int chroma_width = -((-width )>>f->chroma_h_shift);
1121 const int chroma_height= -((-height)>>f->chroma_v_shift);
1122 const int cx= x>>f->chroma_h_shift;
1123 const int cy= y>>f->chroma_v_shift;
1125 encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1127 if (f->chroma_planes){
1128 encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1129 encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1131 if (fs->transparency)
1132 encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1134 encode_rgb_frame(fs, (uint32_t*)(p->data[0]) + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1141 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1142 const AVFrame *pict, int *got_packet)
1144 FFV1Context *f = avctx->priv_data;
1145 RangeCoder * const c= &f->slice_context[0]->c;
1146 AVFrame * const p= &f->picture;
1148 uint8_t keystate=128;
1153 (ret = av_new_packet(pkt, avctx->width*avctx->height*((8*2+1+1)*4)/8
1154 + FF_MIN_BUFFER_SIZE)) < 0) {
1155 av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
1159 ff_init_range_encoder(c, pkt->data, pkt->size);
1160 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1163 p->pict_type= AV_PICTURE_TYPE_I;
1165 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
1166 put_rac(c, &keystate, 1);
1172 put_rac(c, &keystate, 0);
1177 used_count += ff_rac_terminate(c);
1178 //printf("pos=%d\n", used_count);
1179 init_put_bits(&f->slice_context[0]->pb, pkt->data + used_count, pkt->size - used_count);
1182 for(i=1; i<256; i++){
1183 c->one_state[i]= f->state_transition[i];
1184 c->zero_state[256-i]= 256-c->one_state[i];
1188 for(i=1; i<f->slice_count; i++){
1189 FFV1Context *fs= f->slice_context[i];
1190 uint8_t *start = pkt->data + (pkt->size-used_count)*i/f->slice_count;
1191 int len = pkt->size/f->slice_count;
1194 ff_init_range_encoder(&fs->c, start, len);
1196 init_put_bits(&fs->pb, start, len);
1199 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1202 for(i=0; i<f->slice_count; i++){
1203 FFV1Context *fs= f->slice_context[i];
1208 put_rac(&fs->c, &state, 0);
1209 bytes= ff_rac_terminate(&fs->c);
1211 flush_put_bits(&fs->pb); //nicer padding FIXME
1212 bytes= used_count + (put_bits_count(&fs->pb)+7)/8;
1216 av_assert0(bytes < pkt->size/f->slice_count);
1217 memmove(buf_p, fs->ac ? fs->c.bytestream_start : fs->pb.buf, bytes);
1218 av_assert0(bytes < (1<<24));
1219 AV_WB24(buf_p+bytes, bytes);
1225 if((avctx->flags&CODEC_FLAG_PASS1) && (f->picture_number&31)==0){
1227 char *p= avctx->stats_out;
1228 char *end= p + STATS_OUT_SIZE;
1230 memset(f->rc_stat, 0, sizeof(f->rc_stat));
1231 for(i=0; i<f->quant_table_count; i++)
1232 memset(f->rc_stat2[i], 0, f->context_count[i]*sizeof(*f->rc_stat2[i]));
1234 for(j=0; j<f->slice_count; j++){
1235 FFV1Context *fs= f->slice_context[j];
1236 for(i=0; i<256; i++){
1237 f->rc_stat[i][0] += fs->rc_stat[i][0];
1238 f->rc_stat[i][1] += fs->rc_stat[i][1];
1240 for(i=0; i<f->quant_table_count; i++){
1241 for(k=0; k<f->context_count[i]; k++){
1242 for(m=0; m<32; m++){
1243 f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
1244 f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
1250 for(j=0; j<256; j++){
1251 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat[j][0], f->rc_stat[j][1]);
1254 snprintf(p, end-p, "\n");
1256 for(i=0; i<f->quant_table_count; i++){
1257 for(j=0; j<f->context_count[i]; j++){
1258 for(m=0; m<32; m++){
1259 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
1264 snprintf(p, end-p, "%d\n", f->gob_count);
1265 } else if(avctx->flags&CODEC_FLAG_PASS1)
1266 avctx->stats_out[0] = '\0';
1268 f->picture_number++;
1269 pkt->size = buf_p - pkt->data;
1270 pkt->flags |= AV_PKT_FLAG_KEY*p->key_frame;
1275 #endif /* CONFIG_FFV1_ENCODER */
1277 static av_cold int common_end(AVCodecContext *avctx){
1278 FFV1Context *s = avctx->priv_data;
1281 if (avctx->codec->decode && s->picture.data[0])
1282 avctx->release_buffer(avctx, &s->picture);
1284 for(j=0; j<s->slice_count; j++){
1285 FFV1Context *fs= s->slice_context[j];
1286 for(i=0; i<s->plane_count; i++){
1287 PlaneContext *p= &fs->plane[i];
1289 av_freep(&p->state);
1290 av_freep(&p->vlc_state);
1292 av_freep(&fs->sample_buffer);
1295 av_freep(&avctx->stats_out);
1296 for(j=0; j<s->quant_table_count; j++){
1297 av_freep(&s->initial_states[j]);
1298 for(i=0; i<s->slice_count; i++){
1299 FFV1Context *sf= s->slice_context[i];
1300 av_freep(&sf->rc_stat2[j]);
1302 av_freep(&s->rc_stat2[j]);
1305 for(i=0; i<s->slice_count; i++){
1306 av_freep(&s->slice_context[i]);
1312 static av_always_inline void decode_line(FFV1Context *s, int w,
1314 int plane_index, int bits)
1316 PlaneContext * const p= &s->plane[plane_index];
1317 RangeCoder * const c= &s->c;
1321 int run_index= s->run_index;
1324 int diff, context, sign;
1326 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
1333 av_assert2(context < p->context_count);
1336 diff= get_symbol_inline(c, p->state[context], 1);
1338 if(context == 0 && run_mode==0) run_mode=1;
1341 if(run_count==0 && run_mode==1){
1342 if(get_bits1(&s->gb)){
1343 run_count = 1<<ff_log2_run[run_index];
1344 if(x + run_count <= w) run_index++;
1346 if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]);
1348 if(run_index) run_index--;
1356 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1361 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1363 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, get_bits_count(&s->gb));
1366 if(sign) diff= -diff;
1368 sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1);
1370 s->run_index= run_index;
1373 static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
1376 sample[0]=s->sample_buffer +3;
1377 sample[1]=s->sample_buffer+w+6+3;
1381 memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
1384 int16_t *temp = sample[0]; //FIXME try a normal buffer
1386 sample[0]= sample[1];
1389 sample[1][-1]= sample[0][0 ];
1390 sample[0][ w]= sample[0][w-1];
1393 if(s->avctx->bits_per_raw_sample <= 8){
1394 decode_line(s, w, sample, plane_index, 8);
1396 src[x + stride*y]= sample[1][x];
1399 decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
1400 if(s->packed_at_lsb){
1402 ((uint16_t*)(src + stride*y))[x]= sample[1][x];
1406 ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
1410 //STOP_TIMER("decode-line")}
1414 static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
1416 int16_t *sample[4][2];
1418 sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
1419 sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
1424 memset(s->sample_buffer, 0, 8*(w+6)*sizeof(*s->sample_buffer));
1427 for(p=0; p<3 + s->transparency; p++){
1428 int16_t *temp = sample[p][0]; //FIXME try a normal buffer
1430 sample[p][0]= sample[p][1];
1433 sample[p][1][-1]= sample[p][0][0 ];
1434 sample[p][0][ w]= sample[p][0][w-1];
1435 decode_line(s, w, sample[p], (p+1)/2, 9);
1438 int g= sample[0][1][x];
1439 int b= sample[1][1][x];
1440 int r= sample[2][1][x];
1441 int a= sample[3][1][x];
1443 // assert(g>=0 && b>=0 && r>=0);
1444 // assert(g<256 && b<512 && r<512);
1452 src[x + stride*y]= b + (g<<8) + (r<<16) + (a<<24);
1457 static int decode_slice(AVCodecContext *c, void *arg){
1458 FFV1Context *fs= *(void**)arg;
1459 FFV1Context *f= fs->avctx->priv_data;
1460 int width = fs->slice_width;
1461 int height= fs->slice_height;
1464 const int ps= (c->bits_per_raw_sample>8)+1;
1465 AVFrame * const p= &f->picture;
1467 av_assert1(width && height);
1468 if(f->colorspace==0){
1469 const int chroma_width = -((-width )>>f->chroma_h_shift);
1470 const int chroma_height= -((-height)>>f->chroma_v_shift);
1471 const int cx= x>>f->chroma_h_shift;
1472 const int cy= y>>f->chroma_v_shift;
1473 decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1475 if (f->chroma_planes){
1476 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1477 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1479 if (fs->transparency)
1480 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1482 decode_rgb_frame(fs, (uint32_t*)p->data[0] + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1490 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
1493 uint8_t state[CONTEXT_SIZE];
1495 memset(state, 128, sizeof(state));
1497 for(v=0; i<128 ; v++){
1498 int len= get_symbol(c, state, 0) + 1;
1500 if(len + i > 128) return -1;
1503 quant_table[i] = scale*v;
1506 //if(i%16==0) printf("\n");
1510 for(i=1; i<128; i++){
1511 quant_table[256-i]= -quant_table[i];
1513 quant_table[128]= -quant_table[127];
1518 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
1520 int context_count=1;
1523 context_count*= read_quant_table(c, quant_table[i], context_count);
1524 if(context_count > 32768U){
1528 return (context_count+1)/2;
1531 static int read_extra_header(FFV1Context *f){
1532 RangeCoder * const c= &f->c;
1533 uint8_t state[CONTEXT_SIZE];
1535 uint8_t state2[32][CONTEXT_SIZE];
1537 memset(state2, 128, sizeof(state2));
1538 memset(state, 128, sizeof(state));
1540 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1541 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1543 f->version= get_symbol(c, state, 0);
1544 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1546 for(i=1; i<256; i++){
1547 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1550 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1551 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1552 get_rac(c, state); //no chroma = false
1553 f->chroma_h_shift= get_symbol(c, state, 0);
1554 f->chroma_v_shift= get_symbol(c, state, 0);
1555 f->transparency= get_rac(c, state);
1556 f->plane_count= 2 + f->transparency;
1557 f->num_h_slices= 1 + get_symbol(c, state, 0);
1558 f->num_v_slices= 1 + get_symbol(c, state, 0);
1559 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->height){
1560 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1564 f->quant_table_count= get_symbol(c, state, 0);
1565 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1567 for(i=0; i<f->quant_table_count; i++){
1568 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1569 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1574 if(allocate_initial_states(f) < 0)
1575 return AVERROR(ENOMEM);
1577 for(i=0; i<f->quant_table_count; i++){
1578 if(get_rac(c, state)){
1579 for(j=0; j<f->context_count[i]; j++){
1580 for(k=0; k<CONTEXT_SIZE; k++){
1581 int pred= j ? f->initial_states[i][j-1][k] : 128;
1582 f->initial_states[i][j][k]= (pred+get_symbol(c, state2[k], 1))&0xFF;
1591 static int read_header(FFV1Context *f){
1592 uint8_t state[CONTEXT_SIZE];
1593 int i, j, context_count;
1594 RangeCoder * const c= &f->slice_context[0]->c;
1596 memset(state, 128, sizeof(state));
1599 f->version= get_symbol(c, state, 0);
1600 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1602 for(i=1; i<256; i++){
1603 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1606 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1608 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1609 f->chroma_planes= get_rac(c, state);
1610 f->chroma_h_shift= get_symbol(c, state, 0);
1611 f->chroma_v_shift= get_symbol(c, state, 0);
1612 f->transparency= get_rac(c, state);
1613 f->plane_count= 2 + f->transparency;
1616 if(f->colorspace==0){
1617 if(!f->transparency && !f->chroma_planes){
1618 if (f->avctx->bits_per_raw_sample<=8)
1619 f->avctx->pix_fmt= PIX_FMT_GRAY8;
1621 f->avctx->pix_fmt= PIX_FMT_GRAY16;
1622 }else if(f->avctx->bits_per_raw_sample<=8 && !f->transparency){
1623 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1624 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
1625 case 0x01: f->avctx->pix_fmt= PIX_FMT_YUV440P; break;
1626 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
1627 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
1628 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
1629 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
1631 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1634 }else if(f->avctx->bits_per_raw_sample<=8 && f->transparency){
1635 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1636 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUVA444P; break;
1637 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUVA420P; break;
1639 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1642 }else if(f->avctx->bits_per_raw_sample==9) {
1644 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1645 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P9; break;
1646 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P9; break;
1647 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P9; break;
1649 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1652 }else if(f->avctx->bits_per_raw_sample==10) {
1654 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1655 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P10; break;
1656 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P10; break;
1657 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P10; break;
1659 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1663 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1664 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1665 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1666 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P16; break;
1668 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1672 }else if(f->colorspace==1){
1673 if(f->chroma_h_shift || f->chroma_v_shift){
1674 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1677 if(f->transparency) f->avctx->pix_fmt= PIX_FMT_RGB32;
1678 else f->avctx->pix_fmt= PIX_FMT_0RGB32;
1680 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1684 //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt);
1686 context_count= read_quant_tables(c, f->quant_table);
1687 if(context_count < 0){
1688 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1692 f->slice_count= get_symbol(c, state, 0);
1693 if(f->slice_count > (unsigned)MAX_SLICES)
1697 for(j=0; j<f->slice_count; j++){
1698 FFV1Context *fs= f->slice_context[j];
1700 fs->packed_at_lsb= f->packed_at_lsb;
1702 if(f->version >= 2){
1703 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1704 fs->slice_y = get_symbol(c, state, 0) *f->height;
1705 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1706 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1708 fs->slice_x /= f->num_h_slices;
1709 fs->slice_y /= f->num_v_slices;
1710 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1711 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1712 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1714 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1715 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1719 for(i=0; i<f->plane_count; i++){
1720 PlaneContext * const p= &fs->plane[i];
1722 if(f->version >= 2){
1723 int idx=get_symbol(c, state, 0);
1724 if(idx > (unsigned)f->quant_table_count){
1725 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1728 p->quant_table_index= idx;
1729 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1730 context_count= f->context_count[idx];
1732 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
1735 if(p->context_count < context_count){
1736 av_freep(&p->state);
1737 av_freep(&p->vlc_state);
1739 p->context_count= context_count;
1746 static av_cold int decode_init(AVCodecContext *avctx)
1748 FFV1Context *f = avctx->priv_data;
1752 if(avctx->extradata && read_extra_header(f) < 0)
1755 if(init_slice_contexts(f) < 0)
1761 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
1762 const uint8_t *buf = avpkt->data;
1763 int buf_size = avpkt->size;
1764 FFV1Context *f = avctx->priv_data;
1765 RangeCoder * const c= &f->slice_context[0]->c;
1766 AVFrame * const p= &f->picture;
1768 uint8_t keystate= 128;
1769 const uint8_t *buf_p;
1771 AVFrame *picture = data;
1773 /* release previously stored data */
1775 avctx->release_buffer(avctx, p);
1777 ff_init_range_decoder(c, buf, buf_size);
1778 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1781 p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
1782 if(get_rac(c, &keystate)){
1784 if(read_header(f) < 0)
1786 if(init_slice_state(f) < 0)
1795 for(i=1; i<256; i++){
1796 c->one_state[i]= f->state_transition[i];
1797 c->zero_state[256-i]= 256-c->one_state[i];
1802 if(avctx->get_buffer(avctx, p) < 0){
1803 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1807 if(avctx->debug&FF_DEBUG_PICT_INFO)
1808 av_log(avctx, AV_LOG_ERROR, "keyframe:%d coder:%d\n", p->key_frame, f->ac);
1811 bytes_read = c->bytestream - c->bytestream_start - 1;
1812 if(bytes_read ==0) av_log(avctx, AV_LOG_ERROR, "error at end of AC stream\n"); //FIXME
1813 //printf("pos=%d\n", bytes_read);
1814 init_get_bits(&f->slice_context[0]->gb, buf + bytes_read, (buf_size - bytes_read) * 8);
1816 bytes_read = 0; /* avoid warning */
1819 buf_p= buf + buf_size;
1820 for(i=f->slice_count-1; i>0; i--){
1821 FFV1Context *fs= f->slice_context[i];
1822 int v= AV_RB24(buf_p-3)+3;
1823 if(buf_p - buf <= v){
1824 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
1829 ff_init_range_decoder(&fs->c, buf_p, v);
1831 init_get_bits(&fs->gb, buf_p, v * 8);
1835 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1836 f->picture_number++;
1839 *data_size = sizeof(AVFrame);
1844 AVCodec ff_ffv1_decoder = {
1846 .type = AVMEDIA_TYPE_VIDEO,
1847 .id = CODEC_ID_FFV1,
1848 .priv_data_size = sizeof(FFV1Context),
1849 .init = decode_init,
1850 .close = common_end,
1851 .decode = decode_frame,
1852 .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | CODEC_CAP_SLICE_THREADS,
1853 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
1856 #if CONFIG_FFV1_ENCODER
1857 AVCodec ff_ffv1_encoder = {
1859 .type = AVMEDIA_TYPE_VIDEO,
1860 .id = CODEC_ID_FFV1,
1861 .priv_data_size = sizeof(FFV1Context),
1862 .init = encode_init,
1863 .encode2 = encode_frame,
1864 .close = common_end,
1865 .capabilities = CODEC_CAP_SLICE_THREADS,
1866 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUV444P, PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV444P9, PIX_FMT_YUV422P9, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_YUV444P10, PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_NONE},
1867 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),