2 * FFV1 codec for libavcodec
4 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * FF Video Codec 1 (a lossless codec)
32 #include "rangecoder.h"
35 #include "libavutil/avassert.h"
38 #define CONTEXT_SIZE 32
40 #define MAX_QUANT_TABLES 8
41 #define MAX_CONTEXT_INPUTS 5
43 extern const uint8_t ff_log2_run[41];
45 static const int8_t quant5_10bit[256]={
46 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
47 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
48 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
49 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
50 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
51 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
52 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
53 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
54 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
55 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
56 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
57 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
58 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,
59 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
60 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
61 -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0,
64 static const int8_t quant5[256]={
65 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
66 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
67 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
68 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
69 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
70 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
71 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
72 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
73 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
74 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
75 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
76 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
77 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
78 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
79 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
80 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1,
83 static const int8_t quant9_10bit[256]={
84 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
86 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
87 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
93 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
94 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
95 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
96 -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,
97 -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,
98 -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
99 -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0,
102 static const int8_t quant11[256]={
103 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
104 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
105 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
106 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
107 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
108 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
109 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
110 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
111 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
112 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
113 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
114 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
115 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
116 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4,
117 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
118 -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1,
121 static const uint8_t ver2_state[256]= {
122 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
123 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
124 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
125 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
126 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
127 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
128 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
129 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
130 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
131 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
132 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
133 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
134 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
135 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
136 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
137 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
140 typedef struct VlcState{
147 typedef struct PlaneContext{
148 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
149 int quant_table_index;
151 uint8_t (*state)[CONTEXT_SIZE];
153 uint8_t interlace_bit_state[2];
156 #define MAX_SLICES 256
158 typedef struct FFV1Context{
159 AVCodecContext *avctx;
163 uint64_t rc_stat[256][2];
164 uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
167 int chroma_h_shift, chroma_v_shift;
174 int ac; ///< 1=range coder <-> 0=golomb rice
175 PlaneContext plane[MAX_PLANES];
176 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
177 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
178 int context_count[MAX_QUANT_TABLES];
179 uint8_t state_transition[256];
180 uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
183 int16_t *sample_buffer;
187 int quant_table_count;
191 struct FFV1Context *slice_context[MAX_SLICES];
201 static av_always_inline int fold(int diff, int bits){
213 static inline int predict(int16_t *src, int16_t *last)
215 const int LT= last[-1];
216 const int T= last[ 0];
217 const int L = src[-1];
219 return mid_pred(L, L + T - LT, T);
222 static inline int get_context(PlaneContext *p, int16_t *src,
223 int16_t *last, int16_t *last2)
225 const int LT= last[-1];
226 const int T= last[ 0];
227 const int RT= last[ 1];
228 const int L = src[-1];
230 if(p->quant_table[3][127]){
231 const int TT= last2[0];
232 const int LL= src[-2];
233 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF]
234 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xFF];
236 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF];
239 static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){
244 l2tab[i]= log2(i/256.0);
246 for(i=0; i<256; i++){
247 double best_len[256];
253 for(j=FFMAX(i-10,1); j<FFMIN(i+11,256); j++){
257 for(k=0; k<256; k++){
258 double newocc[256]={0};
259 for(m=0; m<256; m++){
261 len -=occ[m]*( p *l2tab[ m]
262 + (1-p)*l2tab[256-m]);
265 if(len < best_len[k]){
269 for(m=0; m<256; m++){
271 newocc[ one_state[ m]] += occ[m]* p ;
272 newocc[256-one_state[256-m]] += occ[m]*(1-p);
275 memcpy(occ, newocc, sizeof(occ));
281 static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
284 #define put_rac(C,S,B) \
288 rc_stat2[(S)-state][B]++;\
294 const int a= FFABS(v);
295 const int e= av_log2(a);
296 put_rac(c, state+0, 0);
299 put_rac(c, state+1+i, 1); //1..10
301 put_rac(c, state+1+i, 0);
303 for(i=e-1; i>=0; i--){
304 put_rac(c, state+22+i, (a>>i)&1); //22..31
308 put_rac(c, state+11 + e, v < 0); //11..21
311 put_rac(c, state+1+FFMIN(i,9), 1); //1..10
313 put_rac(c, state+1+9, 0);
315 for(i=e-1; i>=0; i--){
316 put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
320 put_rac(c, state+11 + 10, v < 0); //11..21
323 put_rac(c, state+0, 1);
328 static void av_noinline put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
329 put_symbol_inline(c, state, v, is_signed, NULL, NULL);
332 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){
333 if(get_rac(c, state+0))
338 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
343 for(i=e-1; i>=0; i--){
344 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
347 e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
352 static int av_noinline get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
353 return get_symbol_inline(c, state, is_signed);
356 static inline void update_vlc_state(VlcState * const state, const int v){
357 int drift= state->drift;
358 int count= state->count;
359 state->error_sum += FFABS(v);
362 if(count == 128){ //FIXME variable
365 state->error_sum >>= 1;
370 if(state->bias > -128) state->bias--;
376 if(state->bias < 127) state->bias++;
387 static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){
389 //printf("final: %d ", v);
390 v = fold(v - state->bias, bits);
394 while(i < state->error_sum){ //FIXME optimize
402 if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1);
405 code= v ^ ((2*state->drift + state->count)>>31);
408 //printf("v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, state->bias, state->error_sum, state->drift, state->count, k);
409 set_sr_golomb(pb, code, k, 12, bits);
411 update_vlc_state(state, v);
414 static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){
419 while(i < state->error_sum){ //FIXME optimize
426 v= get_sr_golomb(gb, k, 12, bits);
427 //printf("v:%d bias:%d error:%d drift:%d count:%d k:%d", v, state->bias, state->error_sum, state->drift, state->count, k);
430 if(k==0 && 2*state->drift <= - state->count) v ^= (-1);
432 v ^= ((2*state->drift + state->count)>>31);
435 ret= fold(v + state->bias, bits);
437 update_vlc_state(state, v);
438 //printf("final: %d\n", ret);
442 #if CONFIG_FFV1_ENCODER
443 static av_always_inline int encode_line(FFV1Context *s, int w,
445 int plane_index, int bits)
447 PlaneContext * const p= &s->plane[plane_index];
448 RangeCoder * const c= &s->c;
450 int run_index= s->run_index;
455 if(c->bytestream_end - c->bytestream < w*20){
456 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
460 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
461 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
469 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
470 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
477 diff= fold(diff, bits);
480 if(s->flags & CODEC_FLAG_PASS1){
481 put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, s->rc_stat2[p->quant_table_index][context]);
483 put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
486 if(context == 0) run_mode=1;
491 while(run_count >= 1<<ff_log2_run[run_index]){
492 run_count -= 1<<ff_log2_run[run_index];
494 put_bits(&s->pb, 1, 1);
497 put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
498 if(run_index) run_index--;
507 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, (int)put_bits_count(&s->pb));
510 put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
514 while(run_count >= 1<<ff_log2_run[run_index]){
515 run_count -= 1<<ff_log2_run[run_index];
517 put_bits(&s->pb, 1, 1);
521 put_bits(&s->pb, 1, 1);
523 s->run_index= run_index;
528 static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
530 const int ring_size= s->avctx->context_model ? 3 : 2;
534 memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
537 for(i=0; i<ring_size; i++)
538 sample[i]= s->sample_buffer + (w+6)*((h+i-y)%ring_size) + 3;
540 sample[0][-1]= sample[1][0 ];
541 sample[1][ w]= sample[1][w-1];
543 if(s->avctx->bits_per_raw_sample<=8){
545 sample[0][x]= src[x + stride*y];
547 encode_line(s, w, sample, plane_index, 8);
549 if(s->packed_at_lsb){
551 sample[0][x]= ((uint16_t*)(src + stride*y))[x];
555 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->avctx->bits_per_raw_sample);
558 encode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
560 //STOP_TIMER("encode line")}
564 static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
566 const int ring_size= s->avctx->context_model ? 3 : 2;
567 int16_t *sample[4][3];
570 memset(s->sample_buffer, 0, ring_size*4*(w+6)*sizeof(*s->sample_buffer));
573 for(i=0; i<ring_size; i++)
575 sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
578 unsigned v= src[x + stride*y];
590 // assert(g>=0 && b>=0 && r>=0);
591 // assert(g<256 && b<512 && r<512);
597 for(p=0; p<3 + s->transparency; p++){
598 sample[p][0][-1]= sample[p][1][0 ];
599 sample[p][1][ w]= sample[p][1][w-1];
600 encode_line(s, w, sample[p], (p+1)/2, 9);
605 static void write_quant_table(RangeCoder *c, int16_t *quant_table){
608 uint8_t state[CONTEXT_SIZE];
609 memset(state, 128, sizeof(state));
611 for(i=1; i<128 ; i++){
612 if(quant_table[i] != quant_table[i-1]){
613 put_symbol(c, state, i-last-1, 0);
617 put_symbol(c, state, i-last-1, 0);
620 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
623 write_quant_table(c, quant_table[i]);
626 static void write_header(FFV1Context *f){
627 uint8_t state[CONTEXT_SIZE];
629 RangeCoder * const c= &f->slice_context[0]->c;
631 memset(state, 128, sizeof(state));
634 put_symbol(c, state, f->version, 0);
635 put_symbol(c, state, f->ac, 0);
637 for(i=1; i<256; i++){
638 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
641 put_symbol(c, state, f->colorspace, 0); //YUV cs type
643 put_symbol(c, state, f->avctx->bits_per_raw_sample, 0);
644 put_rac(c, state, f->chroma_planes);
645 put_symbol(c, state, f->chroma_h_shift, 0);
646 put_symbol(c, state, f->chroma_v_shift, 0);
647 put_rac(c, state, f->transparency);
649 write_quant_tables(c, f->quant_table);
651 put_symbol(c, state, f->slice_count, 0);
652 for(i=0; i<f->slice_count; i++){
653 FFV1Context *fs= f->slice_context[i];
654 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
655 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
656 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
657 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
658 for(j=0; j<f->plane_count; j++){
659 put_symbol(c, state, f->plane[j].quant_table_index, 0);
660 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
665 #endif /* CONFIG_FFV1_ENCODER */
667 static av_cold int common_init(AVCodecContext *avctx){
668 FFV1Context *s = avctx->priv_data;
671 s->flags= avctx->flags;
673 avcodec_get_frame_defaults(&s->picture);
675 dsputil_init(&s->dsp, avctx);
677 s->width = avctx->width;
678 s->height= avctx->height;
680 assert(s->width && s->height);
689 static int init_slice_state(FFV1Context *f){
692 for(i=0; i<f->slice_count; i++){
693 FFV1Context *fs= f->slice_context[i];
694 fs->plane_count= f->plane_count;
695 fs->transparency= f->transparency;
696 for(j=0; j<f->plane_count; j++){
697 PlaneContext * const p= &fs->plane[j];
700 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
702 return AVERROR(ENOMEM);
704 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState));
706 return AVERROR(ENOMEM);
711 //FIXME only redo if state_transition changed
712 for(j=1; j<256; j++){
713 fs->c.one_state [ j]= fs->state_transition[j];
714 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
722 static av_cold int init_slice_contexts(FFV1Context *f){
725 f->slice_count= f->num_h_slices * f->num_v_slices;
727 for(i=0; i<f->slice_count; i++){
728 FFV1Context *fs= av_mallocz(sizeof(*fs));
729 int sx= i % f->num_h_slices;
730 int sy= i / f->num_h_slices;
731 int sxs= f->avctx->width * sx / f->num_h_slices;
732 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
733 int sys= f->avctx->height* sy / f->num_v_slices;
734 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
735 f->slice_context[i]= fs;
736 memcpy(fs, f, sizeof(*fs));
737 memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
739 fs->slice_width = sxe - sxs;
740 fs->slice_height= sye - sys;
744 fs->sample_buffer = av_malloc(3*4 * (fs->width+6) * sizeof(*fs->sample_buffer));
745 if (!fs->sample_buffer)
746 return AVERROR(ENOMEM);
751 static int allocate_initial_states(FFV1Context *f){
754 for(i=0; i<f->quant_table_count; i++){
755 f->initial_states[i]= av_malloc(f->context_count[i]*sizeof(*f->initial_states[i]));
756 if(!f->initial_states[i])
757 return AVERROR(ENOMEM);
758 memset(f->initial_states[i], 128, f->context_count[i]*sizeof(*f->initial_states[i]));
763 #if CONFIG_FFV1_ENCODER
764 static int write_extra_header(FFV1Context *f){
765 RangeCoder * const c= &f->c;
766 uint8_t state[CONTEXT_SIZE];
768 uint8_t state2[32][CONTEXT_SIZE];
770 memset(state2, 128, sizeof(state2));
771 memset(state, 128, sizeof(state));
773 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000 + (11*11*5*5*5+11*11*11)*32);
774 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
775 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
777 put_symbol(c, state, f->version, 0);
778 put_symbol(c, state, f->ac, 0);
780 for(i=1; i<256; i++){
781 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
784 put_symbol(c, state, f->colorspace, 0); //YUV cs type
785 put_symbol(c, state, f->avctx->bits_per_raw_sample, 0);
786 put_rac(c, state, f->chroma_planes);
787 put_symbol(c, state, f->chroma_h_shift, 0);
788 put_symbol(c, state, f->chroma_v_shift, 0);
789 put_rac(c, state, f->transparency);
790 put_symbol(c, state, f->num_h_slices-1, 0);
791 put_symbol(c, state, f->num_v_slices-1, 0);
793 put_symbol(c, state, f->quant_table_count, 0);
794 for(i=0; i<f->quant_table_count; i++)
795 write_quant_tables(c, f->quant_tables[i]);
797 for(i=0; i<f->quant_table_count; i++){
798 for(j=0; j<f->context_count[i]*CONTEXT_SIZE; j++)
799 if(f->initial_states[i] && f->initial_states[i][0][j] != 128)
801 if(j<f->context_count[i]*CONTEXT_SIZE){
802 put_rac(c, state, 1);
803 for(j=0; j<f->context_count[i]; j++){
804 for(k=0; k<CONTEXT_SIZE; k++){
805 int pred= j ? f->initial_states[i][j-1][k] : 128;
806 put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k]-pred), 1);
810 put_rac(c, state, 0);
814 f->avctx->extradata_size= ff_rac_terminate(c);
819 static int sort_stt(FFV1Context *s, uint8_t stt[256]){
820 int i,i2,changed,print=0;
824 for(i=12; i<244; i++){
825 for(i2=i+1; i2<245 && i2<i+4; i2++){
826 #define COST(old, new) \
827 s->rc_stat[old][0]*-log2((256-(new))/256.0)\
828 +s->rc_stat[old][1]*-log2( (new) /256.0)
830 #define COST2(old, new) \
832 +COST(256-(old), 256-(new))
834 double size0= COST2(i, i ) + COST2(i2, i2);
835 double sizeX= COST2(i, i2) + COST2(i2, i );
836 if(sizeX < size0 && i!=128 && i2!=128){
838 FFSWAP(int, stt[ i], stt[ i2]);
839 FFSWAP(int, s->rc_stat[i ][0],s->rc_stat[ i2][0]);
840 FFSWAP(int, s->rc_stat[i ][1],s->rc_stat[ i2][1]);
842 FFSWAP(int, stt[256-i], stt[256-i2]);
843 FFSWAP(int, s->rc_stat[256-i][0],s->rc_stat[256-i2][0]);
844 FFSWAP(int, s->rc_stat[256-i][1],s->rc_stat[256-i2][1]);
846 for(j=1; j<256; j++){
847 if (stt[j] == i ) stt[j] = i2;
848 else if(stt[j] == i2) stt[j] = i ;
850 if (stt[256-j] == 256-i ) stt[256-j] = 256-i2;
851 else if(stt[256-j] == 256-i2) stt[256-j] = 256-i ;
862 static av_cold int encode_init(AVCodecContext *avctx)
864 FFV1Context *s = avctx->priv_data;
870 s->ac= avctx->coder_type ? 2:0;
874 s->state_transition[i]=ver2_state[i];
877 for(i=0; i<256; i++){
878 s->quant_table_count=2;
879 if(avctx->bits_per_raw_sample <=8){
880 s->quant_tables[0][0][i]= quant11[i];
881 s->quant_tables[0][1][i]= 11*quant11[i];
882 s->quant_tables[0][2][i]= 11*11*quant11[i];
883 s->quant_tables[1][0][i]= quant11[i];
884 s->quant_tables[1][1][i]= 11*quant11[i];
885 s->quant_tables[1][2][i]= 11*11*quant5 [i];
886 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
887 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
889 s->quant_tables[0][0][i]= quant9_10bit[i];
890 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
891 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
892 s->quant_tables[1][0][i]= quant9_10bit[i];
893 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
894 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
895 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
896 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
899 s->context_count[0]= (11*11*11+1)/2;
900 s->context_count[1]= (11*11*5*5*5+1)/2;
901 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table));
903 for(i=0; i<s->plane_count; i++){
904 PlaneContext * const p= &s->plane[i];
906 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
907 p->quant_table_index= avctx->context_model;
908 p->context_count= s->context_count[p->quant_table_index];
911 if(allocate_initial_states(s) < 0)
912 return AVERROR(ENOMEM);
914 avctx->coded_frame= &s->picture;
915 switch(avctx->pix_fmt){
916 case PIX_FMT_YUV420P9:
917 case PIX_FMT_YUV420P10:
918 case PIX_FMT_YUV422P10:
919 s->packed_at_lsb = 1;
921 case PIX_FMT_YUV444P16:
922 case PIX_FMT_YUV422P16:
923 case PIX_FMT_YUV420P16:
924 if(avctx->bits_per_raw_sample <=8){
925 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
929 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
932 s->version= FFMAX(s->version, 1);
933 case PIX_FMT_YUV444P:
934 case PIX_FMT_YUV440P:
935 case PIX_FMT_YUV422P:
936 case PIX_FMT_YUV420P:
937 case PIX_FMT_YUV411P:
938 case PIX_FMT_YUV410P:
939 s->chroma_planes= avctx->pix_fmt == PIX_FMT_GRAY16 ? 0 : 1;
942 case PIX_FMT_YUVA444P:
943 case PIX_FMT_YUVA420P:
956 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
961 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
965 if(avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
966 for(i=0; i<s->quant_table_count; i++){
967 s->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*s->rc_stat2[i]));
969 return AVERROR(ENOMEM);
973 char *p= avctx->stats_in;
974 uint8_t best_state[256][256];
978 av_assert0(s->version>=2);
981 for(j=0; j<256; j++){
983 s->rc_stat[j][i]= strtol(p, &next, 0);
985 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d [%s]\n", j,i,p);
991 for(i=0; i<s->quant_table_count; i++){
992 for(j=0; j<s->context_count[i]; j++){
995 s->rc_stat2[i][j][k][m]= strtol(p, &next, 0);
997 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d %d %d [%s]\n", i,j,k,m,p);
1005 gob_count= strtol(p, &next, 0);
1006 if(next==p || gob_count <0){
1007 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
1011 while(*p=='\n' || *p==' ') p++;
1014 sort_stt(s, s->state_transition);
1016 find_best_state(best_state, s->state_transition);
1018 for(i=0; i<s->quant_table_count; i++){
1019 for(j=0; j<s->context_count[i]; j++){
1020 for(k=0; k<32; k++){
1022 if(s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]){
1023 p=256.0*s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]);
1025 s->initial_states[i][j][k]= best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1])/gob_count, 0, 255)];
1034 write_extra_header(s);
1037 if(init_slice_contexts(s) < 0)
1039 if(init_slice_state(s) < 0)
1042 #define STATS_OUT_SIZE 1024*1024*6
1043 if(avctx->flags & CODEC_FLAG_PASS1){
1044 avctx->stats_out= av_mallocz(STATS_OUT_SIZE);
1045 for(i=0; i<s->quant_table_count; i++){
1046 for(j=0; j<s->slice_count; j++){
1047 FFV1Context *sf= s->slice_context[j];
1048 av_assert0(!sf->rc_stat2[i]);
1049 sf->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*sf->rc_stat2[i]));
1050 if(!sf->rc_stat2[i])
1051 return AVERROR(ENOMEM);
1058 #endif /* CONFIG_FFV1_ENCODER */
1061 static void clear_state(FFV1Context *f){
1064 for(si=0; si<f->slice_count; si++){
1065 FFV1Context *fs= f->slice_context[si];
1066 for(i=0; i<f->plane_count; i++){
1067 PlaneContext *p= &fs->plane[i];
1069 p->interlace_bit_state[0]= 128;
1070 p->interlace_bit_state[1]= 128;
1073 if(f->initial_states[p->quant_table_index]){
1074 memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE*p->context_count);
1076 memset(p->state, 128, CONTEXT_SIZE*p->context_count);
1078 for(j=0; j<p->context_count; j++){
1079 p->vlc_state[j].drift= 0;
1080 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
1081 p->vlc_state[j].bias= 0;
1082 p->vlc_state[j].count= 1;
1089 #if CONFIG_FFV1_ENCODER
1090 static int encode_slice(AVCodecContext *c, void *arg){
1091 FFV1Context *fs= *(void**)arg;
1092 FFV1Context *f= fs->avctx->priv_data;
1093 int width = fs->slice_width;
1094 int height= fs->slice_height;
1097 AVFrame * const p= &f->picture;
1098 const int ps= (c->bits_per_raw_sample>8)+1;
1100 if(f->colorspace==0){
1101 const int chroma_width = -((-width )>>f->chroma_h_shift);
1102 const int chroma_height= -((-height)>>f->chroma_v_shift);
1103 const int cx= x>>f->chroma_h_shift;
1104 const int cy= y>>f->chroma_v_shift;
1106 encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1108 if (f->chroma_planes){
1109 encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1110 encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1112 if (fs->transparency)
1113 encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1115 encode_rgb_frame(fs, (uint32_t*)(p->data[0]) + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1122 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1123 FFV1Context *f = avctx->priv_data;
1124 RangeCoder * const c= &f->slice_context[0]->c;
1125 AVFrame *pict = data;
1126 AVFrame * const p= &f->picture;
1128 uint8_t keystate=128;
1132 ff_init_range_encoder(c, buf, buf_size);
1133 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1136 p->pict_type= AV_PICTURE_TYPE_I;
1138 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
1139 put_rac(c, &keystate, 1);
1145 put_rac(c, &keystate, 0);
1150 used_count += ff_rac_terminate(c);
1151 //printf("pos=%d\n", used_count);
1152 init_put_bits(&f->slice_context[0]->pb, buf + used_count, buf_size - used_count);
1155 for(i=1; i<256; i++){
1156 c->one_state[i]= f->state_transition[i];
1157 c->zero_state[256-i]= 256-c->one_state[i];
1161 for(i=1; i<f->slice_count; i++){
1162 FFV1Context *fs= f->slice_context[i];
1163 uint8_t *start= buf + (buf_size-used_count)*i/f->slice_count;
1164 int len= buf_size/f->slice_count;
1167 ff_init_range_encoder(&fs->c, start, len);
1169 init_put_bits(&fs->pb, start, len);
1172 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1175 for(i=0; i<f->slice_count; i++){
1176 FFV1Context *fs= f->slice_context[i];
1181 put_rac(&fs->c, &state, 0);
1182 bytes= ff_rac_terminate(&fs->c);
1184 flush_put_bits(&fs->pb); //nicer padding FIXME
1185 bytes= used_count + (put_bits_count(&fs->pb)+7)/8;
1189 av_assert0(bytes < buf_size/f->slice_count);
1190 memmove(buf_p, fs->ac ? fs->c.bytestream_start : fs->pb.buf, bytes);
1191 av_assert0(bytes < (1<<24));
1192 AV_WB24(buf_p+bytes, bytes);
1198 if((avctx->flags&CODEC_FLAG_PASS1) && (f->picture_number&31)==0){
1200 char *p= avctx->stats_out;
1201 char *end= p + STATS_OUT_SIZE;
1203 memset(f->rc_stat, 0, sizeof(f->rc_stat));
1204 for(i=0; i<f->quant_table_count; i++)
1205 memset(f->rc_stat2[i], 0, f->context_count[i]*sizeof(*f->rc_stat2[i]));
1207 for(j=0; j<f->slice_count; j++){
1208 FFV1Context *fs= f->slice_context[j];
1209 for(i=0; i<256; i++){
1210 f->rc_stat[i][0] += fs->rc_stat[i][0];
1211 f->rc_stat[i][1] += fs->rc_stat[i][1];
1213 for(i=0; i<f->quant_table_count; i++){
1214 for(k=0; k<f->context_count[i]; k++){
1215 for(m=0; m<32; m++){
1216 f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
1217 f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
1223 for(j=0; j<256; j++){
1224 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat[j][0], f->rc_stat[j][1]);
1227 snprintf(p, end-p, "\n");
1229 for(i=0; i<f->quant_table_count; i++){
1230 for(j=0; j<f->context_count[i]; j++){
1231 for(m=0; m<32; m++){
1232 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
1237 snprintf(p, end-p, "%d\n", f->gob_count);
1238 } else if(avctx->flags&CODEC_FLAG_PASS1)
1239 avctx->stats_out[0] = '\0';
1241 f->picture_number++;
1244 #endif /* CONFIG_FFV1_ENCODER */
1246 static av_cold int common_end(AVCodecContext *avctx){
1247 FFV1Context *s = avctx->priv_data;
1250 if (avctx->codec->decode && s->picture.data[0])
1251 avctx->release_buffer(avctx, &s->picture);
1253 for(j=0; j<s->slice_count; j++){
1254 FFV1Context *fs= s->slice_context[j];
1255 for(i=0; i<s->plane_count; i++){
1256 PlaneContext *p= &fs->plane[i];
1258 av_freep(&p->state);
1259 av_freep(&p->vlc_state);
1261 av_freep(&fs->sample_buffer);
1264 av_freep(&avctx->stats_out);
1265 for(j=0; j<s->quant_table_count; j++){
1266 av_freep(&s->initial_states[j]);
1267 for(i=0; i<s->slice_count; i++){
1268 FFV1Context *sf= s->slice_context[i];
1269 av_freep(&sf->rc_stat2[j]);
1271 av_freep(&s->rc_stat2[j]);
1274 for(i=0; i<s->slice_count; i++){
1275 av_freep(&s->slice_context[i]);
1281 static av_always_inline void decode_line(FFV1Context *s, int w,
1283 int plane_index, int bits)
1285 PlaneContext * const p= &s->plane[plane_index];
1286 RangeCoder * const c= &s->c;
1290 int run_index= s->run_index;
1293 int diff, context, sign;
1295 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
1302 av_assert2(context < p->context_count);
1305 diff= get_symbol_inline(c, p->state[context], 1);
1307 if(context == 0 && run_mode==0) run_mode=1;
1310 if(run_count==0 && run_mode==1){
1311 if(get_bits1(&s->gb)){
1312 run_count = 1<<ff_log2_run[run_index];
1313 if(x + run_count <= w) run_index++;
1315 if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]);
1317 if(run_index) run_index--;
1325 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1330 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1332 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, get_bits_count(&s->gb));
1335 if(sign) diff= -diff;
1337 sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1);
1339 s->run_index= run_index;
1342 static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
1345 sample[0]=s->sample_buffer +3;
1346 sample[1]=s->sample_buffer+w+6+3;
1350 memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
1353 int16_t *temp = sample[0]; //FIXME try a normal buffer
1355 sample[0]= sample[1];
1358 sample[1][-1]= sample[0][0 ];
1359 sample[0][ w]= sample[0][w-1];
1362 if(s->avctx->bits_per_raw_sample <= 8){
1363 decode_line(s, w, sample, plane_index, 8);
1365 src[x + stride*y]= sample[1][x];
1368 decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
1369 if(s->packed_at_lsb){
1371 ((uint16_t*)(src + stride*y))[x]= sample[1][x];
1375 ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
1379 //STOP_TIMER("decode-line")}
1383 static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
1385 int16_t *sample[4][2];
1387 sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
1388 sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
1393 memset(s->sample_buffer, 0, 8*(w+6)*sizeof(*s->sample_buffer));
1396 for(p=0; p<3 + s->transparency; p++){
1397 int16_t *temp = sample[p][0]; //FIXME try a normal buffer
1399 sample[p][0]= sample[p][1];
1402 sample[p][1][-1]= sample[p][0][0 ];
1403 sample[p][0][ w]= sample[p][0][w-1];
1404 decode_line(s, w, sample[p], (p+1)/2, 9);
1407 int g= sample[0][1][x];
1408 int b= sample[1][1][x];
1409 int r= sample[2][1][x];
1410 int a= sample[3][1][x];
1412 // assert(g>=0 && b>=0 && r>=0);
1413 // assert(g<256 && b<512 && r<512);
1421 src[x + stride*y]= b + (g<<8) + (r<<16) + (a<<24);
1426 static int decode_slice(AVCodecContext *c, void *arg){
1427 FFV1Context *fs= *(void**)arg;
1428 FFV1Context *f= fs->avctx->priv_data;
1429 int width = fs->slice_width;
1430 int height= fs->slice_height;
1433 const int ps= (c->bits_per_raw_sample>8)+1;
1434 AVFrame * const p= &f->picture;
1436 av_assert1(width && height);
1437 if(f->colorspace==0){
1438 const int chroma_width = -((-width )>>f->chroma_h_shift);
1439 const int chroma_height= -((-height)>>f->chroma_v_shift);
1440 const int cx= x>>f->chroma_h_shift;
1441 const int cy= y>>f->chroma_v_shift;
1442 decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1444 if (f->chroma_planes){
1445 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1446 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1448 if (fs->transparency)
1449 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1451 decode_rgb_frame(fs, (uint32_t*)p->data[0] + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1459 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
1462 uint8_t state[CONTEXT_SIZE];
1464 memset(state, 128, sizeof(state));
1466 for(v=0; i<128 ; v++){
1467 int len= get_symbol(c, state, 0) + 1;
1469 if(len + i > 128) return -1;
1472 quant_table[i] = scale*v;
1475 //if(i%16==0) printf("\n");
1479 for(i=1; i<128; i++){
1480 quant_table[256-i]= -quant_table[i];
1482 quant_table[128]= -quant_table[127];
1487 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
1489 int context_count=1;
1492 context_count*= read_quant_table(c, quant_table[i], context_count);
1493 if(context_count > 32768U){
1497 return (context_count+1)/2;
1500 static int read_extra_header(FFV1Context *f){
1501 RangeCoder * const c= &f->c;
1502 uint8_t state[CONTEXT_SIZE];
1504 uint8_t state2[32][CONTEXT_SIZE];
1506 memset(state2, 128, sizeof(state2));
1507 memset(state, 128, sizeof(state));
1509 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1510 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1512 f->version= get_symbol(c, state, 0);
1513 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1515 for(i=1; i<256; i++){
1516 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1519 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1520 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1521 get_rac(c, state); //no chroma = false
1522 f->chroma_h_shift= get_symbol(c, state, 0);
1523 f->chroma_v_shift= get_symbol(c, state, 0);
1524 f->transparency= get_rac(c, state);
1525 f->plane_count= 2 + f->transparency;
1526 f->num_h_slices= 1 + get_symbol(c, state, 0);
1527 f->num_v_slices= 1 + get_symbol(c, state, 0);
1528 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->height){
1529 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1533 f->quant_table_count= get_symbol(c, state, 0);
1534 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1536 for(i=0; i<f->quant_table_count; i++){
1537 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1538 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1543 if(allocate_initial_states(f) < 0)
1544 return AVERROR(ENOMEM);
1546 for(i=0; i<f->quant_table_count; i++){
1547 if(get_rac(c, state)){
1548 for(j=0; j<f->context_count[i]; j++){
1549 for(k=0; k<CONTEXT_SIZE; k++){
1550 int pred= j ? f->initial_states[i][j-1][k] : 128;
1551 f->initial_states[i][j][k]= (pred+get_symbol(c, state2[k], 1))&0xFF;
1560 static int read_header(FFV1Context *f){
1561 uint8_t state[CONTEXT_SIZE];
1562 int i, j, context_count;
1563 RangeCoder * const c= &f->slice_context[0]->c;
1565 memset(state, 128, sizeof(state));
1568 f->version= get_symbol(c, state, 0);
1569 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1571 for(i=1; i<256; i++){
1572 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1575 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1577 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1578 f->chroma_planes= get_rac(c, state);
1579 f->chroma_h_shift= get_symbol(c, state, 0);
1580 f->chroma_v_shift= get_symbol(c, state, 0);
1581 f->transparency= get_rac(c, state);
1582 f->plane_count= 2 + f->transparency;
1585 if(f->colorspace==0){
1586 if(f->avctx->bits_per_raw_sample>8 && !f->transparency && !f->chroma_planes){
1587 f->avctx->pix_fmt= PIX_FMT_GRAY16;
1588 }else if(f->avctx->bits_per_raw_sample<=8 && !f->transparency){
1589 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1590 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
1591 case 0x01: f->avctx->pix_fmt= PIX_FMT_YUV440P; break;
1592 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
1593 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
1594 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
1595 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
1597 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1600 }else if(f->avctx->bits_per_raw_sample<=8 && f->transparency){
1601 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1602 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUVA444P; break;
1603 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUVA420P; break;
1605 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1608 }else if(f->avctx->bits_per_raw_sample==9) {
1609 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1610 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1611 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1612 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P9 ; f->packed_at_lsb=1; break;
1614 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1617 }else if(f->avctx->bits_per_raw_sample==10) {
1618 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1619 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1620 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P10; f->packed_at_lsb=1; break;
1621 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P10; f->packed_at_lsb=1; break;
1623 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1627 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1628 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1629 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1630 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P16; break;
1632 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1636 }else if(f->colorspace==1){
1637 if(f->chroma_h_shift || f->chroma_v_shift){
1638 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1641 if(f->transparency) f->avctx->pix_fmt= PIX_FMT_RGB32;
1642 else f->avctx->pix_fmt= PIX_FMT_0RGB32;
1644 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1648 //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt);
1650 context_count= read_quant_tables(c, f->quant_table);
1651 if(context_count < 0){
1652 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1656 f->slice_count= get_symbol(c, state, 0);
1657 if(f->slice_count > (unsigned)MAX_SLICES)
1661 for(j=0; j<f->slice_count; j++){
1662 FFV1Context *fs= f->slice_context[j];
1664 fs->packed_at_lsb= f->packed_at_lsb;
1666 if(f->version >= 2){
1667 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1668 fs->slice_y = get_symbol(c, state, 0) *f->height;
1669 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1670 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1672 fs->slice_x /= f->num_h_slices;
1673 fs->slice_y /= f->num_v_slices;
1674 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1675 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1676 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1678 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1679 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1683 for(i=0; i<f->plane_count; i++){
1684 PlaneContext * const p= &fs->plane[i];
1686 if(f->version >= 2){
1687 int idx=get_symbol(c, state, 0);
1688 if(idx > (unsigned)f->quant_table_count){
1689 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1692 p->quant_table_index= idx;
1693 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1694 context_count= f->context_count[idx];
1696 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
1699 if(p->context_count < context_count){
1700 av_freep(&p->state);
1701 av_freep(&p->vlc_state);
1703 p->context_count= context_count;
1710 static av_cold int decode_init(AVCodecContext *avctx)
1712 FFV1Context *f = avctx->priv_data;
1716 if(avctx->extradata && read_extra_header(f) < 0)
1719 if(init_slice_contexts(f) < 0)
1725 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
1726 const uint8_t *buf = avpkt->data;
1727 int buf_size = avpkt->size;
1728 FFV1Context *f = avctx->priv_data;
1729 RangeCoder * const c= &f->slice_context[0]->c;
1730 AVFrame * const p= &f->picture;
1732 uint8_t keystate= 128;
1733 const uint8_t *buf_p;
1735 AVFrame *picture = data;
1737 /* release previously stored data */
1739 avctx->release_buffer(avctx, p);
1741 ff_init_range_decoder(c, buf, buf_size);
1742 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1745 p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
1746 if(get_rac(c, &keystate)){
1748 if(read_header(f) < 0)
1750 if(init_slice_state(f) < 0)
1759 for(i=1; i<256; i++){
1760 c->one_state[i]= f->state_transition[i];
1761 c->zero_state[256-i]= 256-c->one_state[i];
1766 if(avctx->get_buffer(avctx, p) < 0){
1767 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1771 if(avctx->debug&FF_DEBUG_PICT_INFO)
1772 av_log(avctx, AV_LOG_ERROR, "keyframe:%d coder:%d\n", p->key_frame, f->ac);
1775 bytes_read = c->bytestream - c->bytestream_start - 1;
1776 if(bytes_read ==0) av_log(avctx, AV_LOG_ERROR, "error at end of AC stream\n"); //FIXME
1777 //printf("pos=%d\n", bytes_read);
1778 init_get_bits(&f->slice_context[0]->gb, buf + bytes_read, (buf_size - bytes_read) * 8);
1780 bytes_read = 0; /* avoid warning */
1783 buf_p= buf + buf_size;
1784 for(i=f->slice_count-1; i>0; i--){
1785 FFV1Context *fs= f->slice_context[i];
1786 int v= AV_RB24(buf_p-3)+3;
1787 if(buf_p - buf <= v){
1788 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
1793 ff_init_range_decoder(&fs->c, buf_p, v);
1795 init_get_bits(&fs->gb, buf_p, v * 8);
1799 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1800 f->picture_number++;
1803 *data_size = sizeof(AVFrame);
1808 AVCodec ff_ffv1_decoder = {
1810 .type = AVMEDIA_TYPE_VIDEO,
1811 .id = CODEC_ID_FFV1,
1812 .priv_data_size = sizeof(FFV1Context),
1813 .init = decode_init,
1814 .close = common_end,
1815 .decode = decode_frame,
1816 .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | CODEC_CAP_SLICE_THREADS,
1817 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
1820 #if CONFIG_FFV1_ENCODER
1821 AVCodec ff_ffv1_encoder = {
1823 .type = AVMEDIA_TYPE_VIDEO,
1824 .id = CODEC_ID_FFV1,
1825 .priv_data_size = sizeof(FFV1Context),
1826 .init = encode_init,
1827 .encode = encode_frame,
1828 .close = common_end,
1829 .capabilities = CODEC_CAP_SLICE_THREADS,
1830 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUV444P, PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_GRAY16, PIX_FMT_NONE},
1831 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),