2 * FFV1 codec for libavcodec
4 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * FF Video Codec 1 (a lossless codec)
33 #include "rangecoder.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/avassert.h"
40 #define CONTEXT_SIZE 32
42 #define MAX_QUANT_TABLES 8
43 #define MAX_CONTEXT_INPUTS 5
45 extern const uint8_t ff_log2_run[41];
47 static const int8_t quant5_10bit[256]={
48 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
49 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
50 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
51 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
52 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
53 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
54 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
55 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
56 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
57 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
58 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
59 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
60 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,
61 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
62 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
63 -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0,
66 static const int8_t quant5[256]={
67 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
68 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
69 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
70 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
71 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
72 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
73 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
74 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
75 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
76 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
77 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
78 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
79 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
80 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
81 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
82 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1,
85 static const int8_t quant9_10bit[256]={
86 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
88 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
89 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
95 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
96 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
97 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
98 -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,
99 -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,
100 -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
101 -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0,
104 static const int8_t quant11[256]={
105 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
106 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
107 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
108 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
109 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
110 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
111 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
112 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
113 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
114 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
115 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
116 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
117 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
118 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4,
119 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
120 -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1,
123 static const uint8_t ver2_state[256]= {
124 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
125 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
126 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
127 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
128 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
129 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
130 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
131 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
132 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
133 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
134 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
135 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
136 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
137 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
138 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
139 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
142 typedef struct VlcState{
149 typedef struct PlaneContext{
150 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
151 int quant_table_index;
153 uint8_t (*state)[CONTEXT_SIZE];
155 uint8_t interlace_bit_state[2];
158 #define MAX_SLICES 256
160 typedef struct FFV1Context{
161 AVCodecContext *avctx;
165 uint64_t rc_stat[256][2];
166 uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
169 int chroma_h_shift, chroma_v_shift;
176 int ac; ///< 1=range coder <-> 0=golomb rice
177 PlaneContext plane[MAX_PLANES];
178 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
179 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
180 int context_count[MAX_QUANT_TABLES];
181 uint8_t state_transition[256];
182 uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
185 int16_t *sample_buffer;
189 int quant_table_count;
193 struct FFV1Context *slice_context[MAX_SLICES];
201 int bits_per_raw_sample;
204 static av_always_inline int fold(int diff, int bits){
216 static inline int predict(int16_t *src, int16_t *last)
218 const int LT= last[-1];
219 const int T= last[ 0];
220 const int L = src[-1];
222 return mid_pred(L, L + T - LT, T);
225 static inline int get_context(PlaneContext *p, int16_t *src,
226 int16_t *last, int16_t *last2)
228 const int LT= last[-1];
229 const int T= last[ 0];
230 const int RT= last[ 1];
231 const int L = src[-1];
233 if(p->quant_table[3][127]){
234 const int TT= last2[0];
235 const int LL= src[-2];
236 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF]
237 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xFF];
239 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF];
242 static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){
247 l2tab[i]= log2(i/256.0);
249 for(i=0; i<256; i++){
250 double best_len[256];
256 for(j=FFMAX(i-10,1); j<FFMIN(i+11,256); j++){
260 for(k=0; k<256; k++){
261 double newocc[256]={0};
262 for(m=0; m<256; m++){
264 len -=occ[m]*( p *l2tab[ m]
265 + (1-p)*l2tab[256-m]);
268 if(len < best_len[k]){
272 for(m=0; m<256; m++){
274 newocc[ one_state[ m]] += occ[m]* p ;
275 newocc[256-one_state[256-m]] += occ[m]*(1-p);
278 memcpy(occ, newocc, sizeof(occ));
284 static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
287 #define put_rac(C,S,B) \
291 rc_stat2[(S)-state][B]++;\
297 const int a= FFABS(v);
298 const int e= av_log2(a);
299 put_rac(c, state+0, 0);
302 put_rac(c, state+1+i, 1); //1..10
304 put_rac(c, state+1+i, 0);
306 for(i=e-1; i>=0; i--){
307 put_rac(c, state+22+i, (a>>i)&1); //22..31
311 put_rac(c, state+11 + e, v < 0); //11..21
314 put_rac(c, state+1+FFMIN(i,9), 1); //1..10
316 put_rac(c, state+1+9, 0);
318 for(i=e-1; i>=0; i--){
319 put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
323 put_rac(c, state+11 + 10, v < 0); //11..21
326 put_rac(c, state+0, 1);
331 static void av_noinline put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
332 put_symbol_inline(c, state, v, is_signed, NULL, NULL);
335 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){
336 if(get_rac(c, state+0))
341 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
346 for(i=e-1; i>=0; i--){
347 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
350 e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
355 static int av_noinline get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
356 return get_symbol_inline(c, state, is_signed);
359 static inline void update_vlc_state(VlcState * const state, const int v){
360 int drift= state->drift;
361 int count= state->count;
362 state->error_sum += FFABS(v);
365 if(count == 128){ //FIXME variable
368 state->error_sum >>= 1;
373 if(state->bias > -128) state->bias--;
379 if(state->bias < 127) state->bias++;
390 static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){
392 //printf("final: %d ", v);
393 v = fold(v - state->bias, bits);
397 while(i < state->error_sum){ //FIXME optimize
405 if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1);
408 code= v ^ ((2*state->drift + state->count)>>31);
411 //printf("v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, state->bias, state->error_sum, state->drift, state->count, k);
412 set_sr_golomb(pb, code, k, 12, bits);
414 update_vlc_state(state, v);
417 static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){
422 while(i < state->error_sum){ //FIXME optimize
429 v= get_sr_golomb(gb, k, 12, bits);
430 //printf("v:%d bias:%d error:%d drift:%d count:%d k:%d", v, state->bias, state->error_sum, state->drift, state->count, k);
433 if(k==0 && 2*state->drift <= - state->count) v ^= (-1);
435 v ^= ((2*state->drift + state->count)>>31);
438 ret= fold(v + state->bias, bits);
440 update_vlc_state(state, v);
441 //printf("final: %d\n", ret);
445 #if CONFIG_FFV1_ENCODER
446 static av_always_inline int encode_line(FFV1Context *s, int w,
448 int plane_index, int bits)
450 PlaneContext * const p= &s->plane[plane_index];
451 RangeCoder * const c= &s->c;
453 int run_index= s->run_index;
458 if(c->bytestream_end - c->bytestream < w*20){
459 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
463 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
464 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
472 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
473 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
480 diff= fold(diff, bits);
483 if(s->flags & CODEC_FLAG_PASS1){
484 put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, s->rc_stat2[p->quant_table_index][context]);
486 put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
489 if(context == 0) run_mode=1;
494 while(run_count >= 1<<ff_log2_run[run_index]){
495 run_count -= 1<<ff_log2_run[run_index];
497 put_bits(&s->pb, 1, 1);
500 put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
501 if(run_index) run_index--;
510 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, (int)put_bits_count(&s->pb));
513 put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
517 while(run_count >= 1<<ff_log2_run[run_index]){
518 run_count -= 1<<ff_log2_run[run_index];
520 put_bits(&s->pb, 1, 1);
524 put_bits(&s->pb, 1, 1);
526 s->run_index= run_index;
531 static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
533 const int ring_size= s->avctx->context_model ? 3 : 2;
537 memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
540 for(i=0; i<ring_size; i++)
541 sample[i]= s->sample_buffer + (w+6)*((h+i-y)%ring_size) + 3;
543 sample[0][-1]= sample[1][0 ];
544 sample[1][ w]= sample[1][w-1];
546 if(s->bits_per_raw_sample<=8){
548 sample[0][x]= src[x + stride*y];
550 encode_line(s, w, sample, plane_index, 8);
552 if(s->packed_at_lsb){
554 sample[0][x]= ((uint16_t*)(src + stride*y))[x];
558 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample);
561 encode_line(s, w, sample, plane_index, s->bits_per_raw_sample);
563 //STOP_TIMER("encode line")}
567 static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
569 const int ring_size= s->avctx->context_model ? 3 : 2;
570 int16_t *sample[4][3];
573 memset(s->sample_buffer, 0, ring_size*4*(w+6)*sizeof(*s->sample_buffer));
576 for(i=0; i<ring_size; i++)
578 sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
581 unsigned v= src[x + stride*y];
593 // assert(g>=0 && b>=0 && r>=0);
594 // assert(g<256 && b<512 && r<512);
600 for(p=0; p<3 + s->transparency; p++){
601 sample[p][0][-1]= sample[p][1][0 ];
602 sample[p][1][ w]= sample[p][1][w-1];
603 encode_line(s, w, sample[p], (p+1)/2, 9);
608 static void write_quant_table(RangeCoder *c, int16_t *quant_table){
611 uint8_t state[CONTEXT_SIZE];
612 memset(state, 128, sizeof(state));
614 for(i=1; i<128 ; i++){
615 if(quant_table[i] != quant_table[i-1]){
616 put_symbol(c, state, i-last-1, 0);
620 put_symbol(c, state, i-last-1, 0);
623 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
626 write_quant_table(c, quant_table[i]);
629 static void write_header(FFV1Context *f){
630 uint8_t state[CONTEXT_SIZE];
632 RangeCoder * const c= &f->slice_context[0]->c;
634 memset(state, 128, sizeof(state));
637 put_symbol(c, state, f->version, 0);
638 put_symbol(c, state, f->ac, 0);
640 for(i=1; i<256; i++){
641 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
644 put_symbol(c, state, f->colorspace, 0); //YUV cs type
646 put_symbol(c, state, f->bits_per_raw_sample, 0);
647 put_rac(c, state, f->chroma_planes);
648 put_symbol(c, state, f->chroma_h_shift, 0);
649 put_symbol(c, state, f->chroma_v_shift, 0);
650 put_rac(c, state, f->transparency);
652 write_quant_tables(c, f->quant_table);
654 put_symbol(c, state, f->slice_count, 0);
655 for(i=0; i<f->slice_count; i++){
656 FFV1Context *fs= f->slice_context[i];
657 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
658 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
659 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
660 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
661 for(j=0; j<f->plane_count; j++){
662 put_symbol(c, state, f->plane[j].quant_table_index, 0);
663 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
668 #endif /* CONFIG_FFV1_ENCODER */
670 static av_cold int common_init(AVCodecContext *avctx){
671 FFV1Context *s = avctx->priv_data;
674 s->flags= avctx->flags;
676 avcodec_get_frame_defaults(&s->picture);
678 ff_dsputil_init(&s->dsp, avctx);
680 s->width = avctx->width;
681 s->height= avctx->height;
683 assert(s->width && s->height);
692 static int init_slice_state(FFV1Context *f){
695 for(i=0; i<f->slice_count; i++){
696 FFV1Context *fs= f->slice_context[i];
697 fs->plane_count= f->plane_count;
698 fs->transparency= f->transparency;
699 for(j=0; j<f->plane_count; j++){
700 PlaneContext * const p= &fs->plane[j];
703 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
705 return AVERROR(ENOMEM);
707 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState));
709 return AVERROR(ENOMEM);
714 //FIXME only redo if state_transition changed
715 for(j=1; j<256; j++){
716 fs->c.one_state [ j]= fs->state_transition[j];
717 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
725 static av_cold int init_slice_contexts(FFV1Context *f){
728 f->slice_count= f->num_h_slices * f->num_v_slices;
730 for(i=0; i<f->slice_count; i++){
731 FFV1Context *fs= av_mallocz(sizeof(*fs));
732 int sx= i % f->num_h_slices;
733 int sy= i / f->num_h_slices;
734 int sxs= f->avctx->width * sx / f->num_h_slices;
735 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
736 int sys= f->avctx->height* sy / f->num_v_slices;
737 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
738 f->slice_context[i]= fs;
739 memcpy(fs, f, sizeof(*fs));
740 memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
742 fs->slice_width = sxe - sxs;
743 fs->slice_height= sye - sys;
747 fs->sample_buffer = av_malloc(3*4 * (fs->width+6) * sizeof(*fs->sample_buffer));
748 if (!fs->sample_buffer)
749 return AVERROR(ENOMEM);
754 static int allocate_initial_states(FFV1Context *f){
757 for(i=0; i<f->quant_table_count; i++){
758 f->initial_states[i]= av_malloc(f->context_count[i]*sizeof(*f->initial_states[i]));
759 if(!f->initial_states[i])
760 return AVERROR(ENOMEM);
761 memset(f->initial_states[i], 128, f->context_count[i]*sizeof(*f->initial_states[i]));
766 #if CONFIG_FFV1_ENCODER
767 static int write_extra_header(FFV1Context *f){
768 RangeCoder * const c= &f->c;
769 uint8_t state[CONTEXT_SIZE];
771 uint8_t state2[32][CONTEXT_SIZE];
773 memset(state2, 128, sizeof(state2));
774 memset(state, 128, sizeof(state));
776 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000 + (11*11*5*5*5+11*11*11)*32);
777 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
778 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
780 put_symbol(c, state, f->version, 0);
781 put_symbol(c, state, f->ac, 0);
783 for(i=1; i<256; i++){
784 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
787 put_symbol(c, state, f->colorspace, 0); //YUV cs type
788 put_symbol(c, state, f->bits_per_raw_sample, 0);
789 put_rac(c, state, f->chroma_planes);
790 put_symbol(c, state, f->chroma_h_shift, 0);
791 put_symbol(c, state, f->chroma_v_shift, 0);
792 put_rac(c, state, f->transparency);
793 put_symbol(c, state, f->num_h_slices-1, 0);
794 put_symbol(c, state, f->num_v_slices-1, 0);
796 put_symbol(c, state, f->quant_table_count, 0);
797 for(i=0; i<f->quant_table_count; i++)
798 write_quant_tables(c, f->quant_tables[i]);
800 for(i=0; i<f->quant_table_count; i++){
801 for(j=0; j<f->context_count[i]*CONTEXT_SIZE; j++)
802 if(f->initial_states[i] && f->initial_states[i][0][j] != 128)
804 if(j<f->context_count[i]*CONTEXT_SIZE){
805 put_rac(c, state, 1);
806 for(j=0; j<f->context_count[i]; j++){
807 for(k=0; k<CONTEXT_SIZE; k++){
808 int pred= j ? f->initial_states[i][j-1][k] : 128;
809 put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k]-pred), 1);
813 put_rac(c, state, 0);
817 f->avctx->extradata_size= ff_rac_terminate(c);
822 static int sort_stt(FFV1Context *s, uint8_t stt[256]){
823 int i,i2,changed,print=0;
827 for(i=12; i<244; i++){
828 for(i2=i+1; i2<245 && i2<i+4; i2++){
829 #define COST(old, new) \
830 s->rc_stat[old][0]*-log2((256-(new))/256.0)\
831 +s->rc_stat[old][1]*-log2( (new) /256.0)
833 #define COST2(old, new) \
835 +COST(256-(old), 256-(new))
837 double size0= COST2(i, i ) + COST2(i2, i2);
838 double sizeX= COST2(i, i2) + COST2(i2, i );
839 if(sizeX < size0 && i!=128 && i2!=128){
841 FFSWAP(int, stt[ i], stt[ i2]);
842 FFSWAP(int, s->rc_stat[i ][0],s->rc_stat[ i2][0]);
843 FFSWAP(int, s->rc_stat[i ][1],s->rc_stat[ i2][1]);
845 FFSWAP(int, stt[256-i], stt[256-i2]);
846 FFSWAP(int, s->rc_stat[256-i][0],s->rc_stat[256-i2][0]);
847 FFSWAP(int, s->rc_stat[256-i][1],s->rc_stat[256-i2][1]);
849 for(j=1; j<256; j++){
850 if (stt[j] == i ) stt[j] = i2;
851 else if(stt[j] == i2) stt[j] = i ;
853 if (stt[256-j] == 256-i ) stt[256-j] = 256-i2;
854 else if(stt[256-j] == 256-i2) stt[256-j] = 256-i ;
865 static av_cold int encode_init(AVCodecContext *avctx)
867 FFV1Context *s = avctx->priv_data;
873 s->ac= avctx->coder_type ? 2:0;
877 s->state_transition[i]=ver2_state[i];
880 switch(avctx->pix_fmt){
881 case PIX_FMT_YUV444P9:
882 case PIX_FMT_YUV422P9:
883 case PIX_FMT_YUV420P9:
884 if (!avctx->bits_per_raw_sample)
885 s->bits_per_raw_sample = 9;
886 case PIX_FMT_YUV444P10:
887 case PIX_FMT_YUV420P10:
888 case PIX_FMT_YUV422P10:
889 s->packed_at_lsb = 1;
890 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
891 s->bits_per_raw_sample = 10;
893 case PIX_FMT_YUV444P16:
894 case PIX_FMT_YUV422P16:
895 case PIX_FMT_YUV420P16:
896 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) {
897 s->bits_per_raw_sample = 16;
898 } else if (!s->bits_per_raw_sample){
899 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
901 if(s->bits_per_raw_sample <=8){
902 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
906 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
909 s->version= FFMAX(s->version, 1);
911 case PIX_FMT_YUV444P:
912 case PIX_FMT_YUV440P:
913 case PIX_FMT_YUV422P:
914 case PIX_FMT_YUV420P:
915 case PIX_FMT_YUV411P:
916 case PIX_FMT_YUV410P:
917 s->chroma_planes= av_pix_fmt_descriptors[avctx->pix_fmt].nb_components < 3 ? 0 : 1;
920 case PIX_FMT_YUVA444P:
921 case PIX_FMT_YUVA420P:
934 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
937 if (s->transparency) {
938 av_log(avctx, AV_LOG_WARNING, "Storing alpha plane, this will require a recent FFV1 decoder to playback!\n");
940 if (avctx->context_model > 1U) {
941 av_log(avctx, AV_LOG_ERROR, "Invalid context model %d, valid values are 0 and 1\n", avctx->context_model);
942 return AVERROR(EINVAL);
945 for(i=0; i<256; i++){
946 s->quant_table_count=2;
947 if(s->bits_per_raw_sample <=8){
948 s->quant_tables[0][0][i]= quant11[i];
949 s->quant_tables[0][1][i]= 11*quant11[i];
950 s->quant_tables[0][2][i]= 11*11*quant11[i];
951 s->quant_tables[1][0][i]= quant11[i];
952 s->quant_tables[1][1][i]= 11*quant11[i];
953 s->quant_tables[1][2][i]= 11*11*quant5 [i];
954 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
955 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
957 s->quant_tables[0][0][i]= quant9_10bit[i];
958 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
959 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
960 s->quant_tables[1][0][i]= quant9_10bit[i];
961 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
962 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
963 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
964 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
967 s->context_count[0]= (11*11*11+1)/2;
968 s->context_count[1]= (11*11*5*5*5+1)/2;
969 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table));
971 for(i=0; i<s->plane_count; i++){
972 PlaneContext * const p= &s->plane[i];
974 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
975 p->quant_table_index= avctx->context_model;
976 p->context_count= s->context_count[p->quant_table_index];
979 if(allocate_initial_states(s) < 0)
980 return AVERROR(ENOMEM);
982 avctx->coded_frame= &s->picture;
985 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
989 if(avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
990 for(i=0; i<s->quant_table_count; i++){
991 s->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*s->rc_stat2[i]));
993 return AVERROR(ENOMEM);
997 char *p= avctx->stats_in;
998 uint8_t best_state[256][256];
1002 av_assert0(s->version>=2);
1005 for(j=0; j<256; j++){
1007 s->rc_stat[j][i]= strtol(p, &next, 0);
1009 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d [%s]\n", j,i,p);
1015 for(i=0; i<s->quant_table_count; i++){
1016 for(j=0; j<s->context_count[i]; j++){
1017 for(k=0; k<32; k++){
1019 s->rc_stat2[i][j][k][m]= strtol(p, &next, 0);
1021 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d %d %d [%s]\n", i,j,k,m,p);
1029 gob_count= strtol(p, &next, 0);
1030 if(next==p || gob_count <0){
1031 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
1035 while(*p=='\n' || *p==' ') p++;
1038 sort_stt(s, s->state_transition);
1040 find_best_state(best_state, s->state_transition);
1042 for(i=0; i<s->quant_table_count; i++){
1043 for(j=0; j<s->context_count[i]; j++){
1044 for(k=0; k<32; k++){
1046 if(s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]){
1047 p=256.0*s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]);
1049 s->initial_states[i][j][k]= best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1])/gob_count, 0, 255)];
1058 write_extra_header(s);
1061 if(init_slice_contexts(s) < 0)
1063 if(init_slice_state(s) < 0)
1066 #define STATS_OUT_SIZE 1024*1024*6
1067 if(avctx->flags & CODEC_FLAG_PASS1){
1068 avctx->stats_out= av_mallocz(STATS_OUT_SIZE);
1069 for(i=0; i<s->quant_table_count; i++){
1070 for(j=0; j<s->slice_count; j++){
1071 FFV1Context *sf= s->slice_context[j];
1072 av_assert0(!sf->rc_stat2[i]);
1073 sf->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*sf->rc_stat2[i]));
1074 if(!sf->rc_stat2[i])
1075 return AVERROR(ENOMEM);
1082 #endif /* CONFIG_FFV1_ENCODER */
1085 static void clear_state(FFV1Context *f){
1088 for(si=0; si<f->slice_count; si++){
1089 FFV1Context *fs= f->slice_context[si];
1090 for(i=0; i<f->plane_count; i++){
1091 PlaneContext *p= &fs->plane[i];
1093 p->interlace_bit_state[0]= 128;
1094 p->interlace_bit_state[1]= 128;
1097 if(f->initial_states[p->quant_table_index]){
1098 memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE*p->context_count);
1100 memset(p->state, 128, CONTEXT_SIZE*p->context_count);
1102 for(j=0; j<p->context_count; j++){
1103 p->vlc_state[j].drift= 0;
1104 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
1105 p->vlc_state[j].bias= 0;
1106 p->vlc_state[j].count= 1;
1113 #if CONFIG_FFV1_ENCODER
1114 static int encode_slice(AVCodecContext *c, void *arg){
1115 FFV1Context *fs= *(void**)arg;
1116 FFV1Context *f= fs->avctx->priv_data;
1117 int width = fs->slice_width;
1118 int height= fs->slice_height;
1121 AVFrame * const p= &f->picture;
1122 const int ps= (f->bits_per_raw_sample>8)+1;
1124 if(f->colorspace==0){
1125 const int chroma_width = -((-width )>>f->chroma_h_shift);
1126 const int chroma_height= -((-height)>>f->chroma_v_shift);
1127 const int cx= x>>f->chroma_h_shift;
1128 const int cy= y>>f->chroma_v_shift;
1130 encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1132 if (f->chroma_planes){
1133 encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1134 encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1136 if (fs->transparency)
1137 encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1139 encode_rgb_frame(fs, (uint32_t*)(p->data[0]) + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1146 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1147 const AVFrame *pict, int *got_packet)
1149 FFV1Context *f = avctx->priv_data;
1150 RangeCoder * const c= &f->slice_context[0]->c;
1151 AVFrame * const p= &f->picture;
1153 uint8_t keystate=128;
1157 if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*((8*2+1+1)*4)/8
1158 + FF_MIN_BUFFER_SIZE)) < 0) {
1162 ff_init_range_encoder(c, pkt->data, pkt->size);
1163 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1166 p->pict_type= AV_PICTURE_TYPE_I;
1168 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
1169 put_rac(c, &keystate, 1);
1175 put_rac(c, &keystate, 0);
1180 used_count += ff_rac_terminate(c);
1181 //printf("pos=%d\n", used_count);
1182 init_put_bits(&f->slice_context[0]->pb, pkt->data + used_count, pkt->size - used_count);
1185 for(i=1; i<256; i++){
1186 c->one_state[i]= f->state_transition[i];
1187 c->zero_state[256-i]= 256-c->one_state[i];
1191 for(i=1; i<f->slice_count; i++){
1192 FFV1Context *fs= f->slice_context[i];
1193 uint8_t *start = pkt->data + (pkt->size-used_count)*i/f->slice_count;
1194 int len = pkt->size/f->slice_count;
1197 ff_init_range_encoder(&fs->c, start, len);
1199 init_put_bits(&fs->pb, start, len);
1202 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1205 for(i=0; i<f->slice_count; i++){
1206 FFV1Context *fs= f->slice_context[i];
1211 put_rac(&fs->c, &state, 0);
1212 bytes= ff_rac_terminate(&fs->c);
1214 flush_put_bits(&fs->pb); //nicer padding FIXME
1215 bytes= used_count + (put_bits_count(&fs->pb)+7)/8;
1219 av_assert0(bytes < pkt->size/f->slice_count);
1220 memmove(buf_p, fs->ac ? fs->c.bytestream_start : fs->pb.buf, bytes);
1221 av_assert0(bytes < (1<<24));
1222 AV_WB24(buf_p+bytes, bytes);
1228 if((avctx->flags&CODEC_FLAG_PASS1) && (f->picture_number&31)==0){
1230 char *p= avctx->stats_out;
1231 char *end= p + STATS_OUT_SIZE;
1233 memset(f->rc_stat, 0, sizeof(f->rc_stat));
1234 for(i=0; i<f->quant_table_count; i++)
1235 memset(f->rc_stat2[i], 0, f->context_count[i]*sizeof(*f->rc_stat2[i]));
1237 for(j=0; j<f->slice_count; j++){
1238 FFV1Context *fs= f->slice_context[j];
1239 for(i=0; i<256; i++){
1240 f->rc_stat[i][0] += fs->rc_stat[i][0];
1241 f->rc_stat[i][1] += fs->rc_stat[i][1];
1243 for(i=0; i<f->quant_table_count; i++){
1244 for(k=0; k<f->context_count[i]; k++){
1245 for(m=0; m<32; m++){
1246 f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
1247 f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
1253 for(j=0; j<256; j++){
1254 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat[j][0], f->rc_stat[j][1]);
1257 snprintf(p, end-p, "\n");
1259 for(i=0; i<f->quant_table_count; i++){
1260 for(j=0; j<f->context_count[i]; j++){
1261 for(m=0; m<32; m++){
1262 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
1267 snprintf(p, end-p, "%d\n", f->gob_count);
1268 } else if(avctx->flags&CODEC_FLAG_PASS1)
1269 avctx->stats_out[0] = '\0';
1271 f->picture_number++;
1272 pkt->size = buf_p - pkt->data;
1273 pkt->flags |= AV_PKT_FLAG_KEY*p->key_frame;
1278 #endif /* CONFIG_FFV1_ENCODER */
1280 static av_cold int common_end(AVCodecContext *avctx){
1281 FFV1Context *s = avctx->priv_data;
1284 if (avctx->codec->decode && s->picture.data[0])
1285 avctx->release_buffer(avctx, &s->picture);
1287 for(j=0; j<s->slice_count; j++){
1288 FFV1Context *fs= s->slice_context[j];
1289 for(i=0; i<s->plane_count; i++){
1290 PlaneContext *p= &fs->plane[i];
1292 av_freep(&p->state);
1293 av_freep(&p->vlc_state);
1295 av_freep(&fs->sample_buffer);
1298 av_freep(&avctx->stats_out);
1299 for(j=0; j<s->quant_table_count; j++){
1300 av_freep(&s->initial_states[j]);
1301 for(i=0; i<s->slice_count; i++){
1302 FFV1Context *sf= s->slice_context[i];
1303 av_freep(&sf->rc_stat2[j]);
1305 av_freep(&s->rc_stat2[j]);
1308 for(i=0; i<s->slice_count; i++){
1309 av_freep(&s->slice_context[i]);
1315 static av_always_inline void decode_line(FFV1Context *s, int w,
1317 int plane_index, int bits)
1319 PlaneContext * const p= &s->plane[plane_index];
1320 RangeCoder * const c= &s->c;
1324 int run_index= s->run_index;
1327 int diff, context, sign;
1329 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
1336 av_assert2(context < p->context_count);
1339 diff= get_symbol_inline(c, p->state[context], 1);
1341 if(context == 0 && run_mode==0) run_mode=1;
1344 if(run_count==0 && run_mode==1){
1345 if(get_bits1(&s->gb)){
1346 run_count = 1<<ff_log2_run[run_index];
1347 if(x + run_count <= w) run_index++;
1349 if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]);
1351 if(run_index) run_index--;
1359 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1364 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1366 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, get_bits_count(&s->gb));
1369 if(sign) diff= -diff;
1371 sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1);
1373 s->run_index= run_index;
1376 static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
1379 sample[0]=s->sample_buffer +3;
1380 sample[1]=s->sample_buffer+w+6+3;
1384 memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
1387 int16_t *temp = sample[0]; //FIXME try a normal buffer
1389 sample[0]= sample[1];
1392 sample[1][-1]= sample[0][0 ];
1393 sample[0][ w]= sample[0][w-1];
1396 if(s->avctx->bits_per_raw_sample <= 8){
1397 decode_line(s, w, sample, plane_index, 8);
1399 src[x + stride*y]= sample[1][x];
1402 decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
1403 if(s->packed_at_lsb){
1405 ((uint16_t*)(src + stride*y))[x]= sample[1][x];
1409 ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
1413 //STOP_TIMER("decode-line")}
1417 static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
1419 int16_t *sample[4][2];
1421 sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
1422 sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
1427 memset(s->sample_buffer, 0, 8*(w+6)*sizeof(*s->sample_buffer));
1430 for(p=0; p<3 + s->transparency; p++){
1431 int16_t *temp = sample[p][0]; //FIXME try a normal buffer
1433 sample[p][0]= sample[p][1];
1436 sample[p][1][-1]= sample[p][0][0 ];
1437 sample[p][0][ w]= sample[p][0][w-1];
1438 decode_line(s, w, sample[p], (p+1)/2, 9);
1441 int g= sample[0][1][x];
1442 int b= sample[1][1][x];
1443 int r= sample[2][1][x];
1444 int a= sample[3][1][x];
1446 // assert(g>=0 && b>=0 && r>=0);
1447 // assert(g<256 && b<512 && r<512);
1455 src[x + stride*y]= b + (g<<8) + (r<<16) + (a<<24);
1460 static int decode_slice(AVCodecContext *c, void *arg){
1461 FFV1Context *fs= *(void**)arg;
1462 FFV1Context *f= fs->avctx->priv_data;
1463 int width = fs->slice_width;
1464 int height= fs->slice_height;
1467 const int ps= (c->bits_per_raw_sample>8)+1;
1468 AVFrame * const p= &f->picture;
1470 av_assert1(width && height);
1471 if(f->colorspace==0){
1472 const int chroma_width = -((-width )>>f->chroma_h_shift);
1473 const int chroma_height= -((-height)>>f->chroma_v_shift);
1474 const int cx= x>>f->chroma_h_shift;
1475 const int cy= y>>f->chroma_v_shift;
1476 decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1478 if (f->chroma_planes){
1479 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1480 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1482 if (fs->transparency)
1483 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1485 decode_rgb_frame(fs, (uint32_t*)p->data[0] + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1493 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
1496 uint8_t state[CONTEXT_SIZE];
1498 memset(state, 128, sizeof(state));
1500 for(v=0; i<128 ; v++){
1501 int len= get_symbol(c, state, 0) + 1;
1503 if(len + i > 128) return -1;
1506 quant_table[i] = scale*v;
1509 //if(i%16==0) printf("\n");
1513 for(i=1; i<128; i++){
1514 quant_table[256-i]= -quant_table[i];
1516 quant_table[128]= -quant_table[127];
1521 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
1523 int context_count=1;
1526 context_count*= read_quant_table(c, quant_table[i], context_count);
1527 if(context_count > 32768U){
1531 return (context_count+1)/2;
1534 static int read_extra_header(FFV1Context *f){
1535 RangeCoder * const c= &f->c;
1536 uint8_t state[CONTEXT_SIZE];
1538 uint8_t state2[32][CONTEXT_SIZE];
1540 memset(state2, 128, sizeof(state2));
1541 memset(state, 128, sizeof(state));
1543 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1544 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1546 f->version= get_symbol(c, state, 0);
1547 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1549 for(i=1; i<256; i++){
1550 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1553 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1554 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1555 get_rac(c, state); //no chroma = false
1556 f->chroma_h_shift= get_symbol(c, state, 0);
1557 f->chroma_v_shift= get_symbol(c, state, 0);
1558 f->transparency= get_rac(c, state);
1559 f->plane_count= 2 + f->transparency;
1560 f->num_h_slices= 1 + get_symbol(c, state, 0);
1561 f->num_v_slices= 1 + get_symbol(c, state, 0);
1562 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->height){
1563 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1567 f->quant_table_count= get_symbol(c, state, 0);
1568 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1570 for(i=0; i<f->quant_table_count; i++){
1571 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1572 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1577 if(allocate_initial_states(f) < 0)
1578 return AVERROR(ENOMEM);
1580 for(i=0; i<f->quant_table_count; i++){
1581 if(get_rac(c, state)){
1582 for(j=0; j<f->context_count[i]; j++){
1583 for(k=0; k<CONTEXT_SIZE; k++){
1584 int pred= j ? f->initial_states[i][j-1][k] : 128;
1585 f->initial_states[i][j][k]= (pred+get_symbol(c, state2[k], 1))&0xFF;
1594 static int read_header(FFV1Context *f){
1595 uint8_t state[CONTEXT_SIZE];
1596 int i, j, context_count;
1597 RangeCoder * const c= &f->slice_context[0]->c;
1599 memset(state, 128, sizeof(state));
1602 f->version= get_symbol(c, state, 0);
1603 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1605 for(i=1; i<256; i++){
1606 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1609 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1611 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1612 f->chroma_planes= get_rac(c, state);
1613 f->chroma_h_shift= get_symbol(c, state, 0);
1614 f->chroma_v_shift= get_symbol(c, state, 0);
1615 f->transparency= get_rac(c, state);
1616 f->plane_count= 2 + f->transparency;
1619 if(f->colorspace==0){
1620 if(!f->transparency && !f->chroma_planes){
1621 if (f->avctx->bits_per_raw_sample<=8)
1622 f->avctx->pix_fmt= PIX_FMT_GRAY8;
1624 f->avctx->pix_fmt= PIX_FMT_GRAY16;
1625 }else if(f->avctx->bits_per_raw_sample<=8 && !f->transparency){
1626 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1627 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
1628 case 0x01: f->avctx->pix_fmt= PIX_FMT_YUV440P; break;
1629 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
1630 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
1631 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
1632 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
1634 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1637 }else if(f->avctx->bits_per_raw_sample<=8 && f->transparency){
1638 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1639 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUVA444P; break;
1640 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUVA420P; break;
1642 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1645 }else if(f->avctx->bits_per_raw_sample==9) {
1647 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1648 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P9; break;
1649 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P9; break;
1650 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P9; break;
1652 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1655 }else if(f->avctx->bits_per_raw_sample==10) {
1657 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1658 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P10; break;
1659 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P10; break;
1660 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P10; break;
1662 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1666 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1667 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1668 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1669 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P16; break;
1671 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1675 }else if(f->colorspace==1){
1676 if(f->chroma_h_shift || f->chroma_v_shift){
1677 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1680 if(f->transparency) f->avctx->pix_fmt= PIX_FMT_RGB32;
1681 else f->avctx->pix_fmt= PIX_FMT_0RGB32;
1683 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1687 //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt);
1689 context_count= read_quant_tables(c, f->quant_table);
1690 if(context_count < 0){
1691 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1695 f->slice_count= get_symbol(c, state, 0);
1696 if(f->slice_count > (unsigned)MAX_SLICES)
1700 for(j=0; j<f->slice_count; j++){
1701 FFV1Context *fs= f->slice_context[j];
1703 fs->packed_at_lsb= f->packed_at_lsb;
1705 if(f->version >= 2){
1706 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1707 fs->slice_y = get_symbol(c, state, 0) *f->height;
1708 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1709 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1711 fs->slice_x /= f->num_h_slices;
1712 fs->slice_y /= f->num_v_slices;
1713 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1714 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1715 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1717 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1718 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1722 for(i=0; i<f->plane_count; i++){
1723 PlaneContext * const p= &fs->plane[i];
1725 if(f->version >= 2){
1726 int idx=get_symbol(c, state, 0);
1727 if(idx > (unsigned)f->quant_table_count){
1728 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1731 p->quant_table_index= idx;
1732 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1733 context_count= f->context_count[idx];
1735 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
1738 if(p->context_count < context_count){
1739 av_freep(&p->state);
1740 av_freep(&p->vlc_state);
1742 p->context_count= context_count;
1749 static av_cold int decode_init(AVCodecContext *avctx)
1751 FFV1Context *f = avctx->priv_data;
1755 if(avctx->extradata && read_extra_header(f) < 0)
1758 if(init_slice_contexts(f) < 0)
1764 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
1765 const uint8_t *buf = avpkt->data;
1766 int buf_size = avpkt->size;
1767 FFV1Context *f = avctx->priv_data;
1768 RangeCoder * const c= &f->slice_context[0]->c;
1769 AVFrame * const p= &f->picture;
1771 uint8_t keystate= 128;
1772 const uint8_t *buf_p;
1774 AVFrame *picture = data;
1776 /* release previously stored data */
1778 avctx->release_buffer(avctx, p);
1780 ff_init_range_decoder(c, buf, buf_size);
1781 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1784 p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
1785 if(get_rac(c, &keystate)){
1787 if(read_header(f) < 0)
1789 if(init_slice_state(f) < 0)
1798 for(i=1; i<256; i++){
1799 c->one_state[i]= f->state_transition[i];
1800 c->zero_state[256-i]= 256-c->one_state[i];
1805 if(avctx->get_buffer(avctx, p) < 0){
1806 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1810 if(avctx->debug&FF_DEBUG_PICT_INFO)
1811 av_log(avctx, AV_LOG_ERROR, "keyframe:%d coder:%d\n", p->key_frame, f->ac);
1814 bytes_read = c->bytestream - c->bytestream_start - 1;
1815 if(bytes_read ==0) av_log(avctx, AV_LOG_ERROR, "error at end of AC stream\n"); //FIXME
1816 //printf("pos=%d\n", bytes_read);
1817 init_get_bits(&f->slice_context[0]->gb, buf + bytes_read, (buf_size - bytes_read) * 8);
1819 bytes_read = 0; /* avoid warning */
1822 buf_p= buf + buf_size;
1823 for(i=f->slice_count-1; i>0; i--){
1824 FFV1Context *fs= f->slice_context[i];
1825 int v= AV_RB24(buf_p-3)+3;
1826 if(buf_p - buf <= v){
1827 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
1832 ff_init_range_decoder(&fs->c, buf_p, v);
1834 init_get_bits(&fs->gb, buf_p, v * 8);
1838 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1839 f->picture_number++;
1842 *data_size = sizeof(AVFrame);
1847 AVCodec ff_ffv1_decoder = {
1849 .type = AVMEDIA_TYPE_VIDEO,
1850 .id = CODEC_ID_FFV1,
1851 .priv_data_size = sizeof(FFV1Context),
1852 .init = decode_init,
1853 .close = common_end,
1854 .decode = decode_frame,
1855 .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | CODEC_CAP_SLICE_THREADS,
1856 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
1859 #if CONFIG_FFV1_ENCODER
1860 AVCodec ff_ffv1_encoder = {
1862 .type = AVMEDIA_TYPE_VIDEO,
1863 .id = CODEC_ID_FFV1,
1864 .priv_data_size = sizeof(FFV1Context),
1865 .init = encode_init,
1866 .encode2 = encode_frame,
1867 .close = common_end,
1868 .capabilities = CODEC_CAP_SLICE_THREADS,
1869 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUV444P, PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV444P9, PIX_FMT_YUV422P9, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_YUV444P10, PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_NONE},
1870 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),