2 * FFV1 codec for libavcodec
4 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * FF Video Codec 1 (a lossless codec)
33 #include "rangecoder.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/avassert.h"
40 #define CONTEXT_SIZE 32
42 #define MAX_QUANT_TABLES 8
43 #define MAX_CONTEXT_INPUTS 5
45 extern const uint8_t ff_log2_run[41];
47 static const int8_t quant5_10bit[256]={
48 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
49 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
50 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
51 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
52 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
53 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
54 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
55 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
56 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
57 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
58 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
59 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
60 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,
61 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
62 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
63 -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0,
66 static const int8_t quant5[256]={
67 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
68 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
69 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
70 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
71 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
72 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
73 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
74 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
75 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
76 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
77 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
78 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
79 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
80 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
81 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
82 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1,
85 static const int8_t quant9_10bit[256]={
86 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
88 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
89 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
95 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
96 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
97 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
98 -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,
99 -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,
100 -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
101 -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0,
104 static const int8_t quant11[256]={
105 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
106 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
107 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
108 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
109 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
110 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
111 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
112 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
113 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
114 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
115 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
116 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
117 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
118 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4,
119 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
120 -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1,
123 static const uint8_t ver2_state[256]= {
124 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
125 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
126 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
127 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
128 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
129 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
130 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
131 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
132 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
133 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
134 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
135 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
136 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
137 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
138 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
139 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
142 typedef struct VlcState{
149 typedef struct PlaneContext{
150 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
151 int quant_table_index;
153 uint8_t (*state)[CONTEXT_SIZE];
155 uint8_t interlace_bit_state[2];
158 #define MAX_SLICES 256
160 typedef struct FFV1Context{
161 AVCodecContext *avctx;
165 uint64_t rc_stat[256][2];
166 uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
169 int chroma_h_shift, chroma_v_shift;
176 int ac; ///< 1=range coder <-> 0=golomb rice
177 PlaneContext plane[MAX_PLANES];
178 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
179 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
180 int context_count[MAX_QUANT_TABLES];
181 uint8_t state_transition[256];
182 uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
185 int16_t *sample_buffer;
189 int quant_table_count;
193 struct FFV1Context *slice_context[MAX_SLICES];
201 int bits_per_raw_sample;
204 static av_always_inline int fold(int diff, int bits){
216 static inline int predict(int16_t *src, int16_t *last)
218 const int LT= last[-1];
219 const int T= last[ 0];
220 const int L = src[-1];
222 return mid_pred(L, L + T - LT, T);
225 static inline int get_context(PlaneContext *p, int16_t *src,
226 int16_t *last, int16_t *last2)
228 const int LT= last[-1];
229 const int T= last[ 0];
230 const int RT= last[ 1];
231 const int L = src[-1];
233 if(p->quant_table[3][127]){
234 const int TT= last2[0];
235 const int LL= src[-2];
236 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF]
237 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xFF];
239 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF];
242 static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){
247 l2tab[i]= log2(i/256.0);
249 for(i=0; i<256; i++){
250 double best_len[256];
256 for(j=FFMAX(i-10,1); j<FFMIN(i+11,256); j++){
260 for(k=0; k<256; k++){
261 double newocc[256]={0};
262 for(m=0; m<256; m++){
264 len -=occ[m]*( p *l2tab[ m]
265 + (1-p)*l2tab[256-m]);
268 if(len < best_len[k]){
272 for(m=0; m<256; m++){
274 newocc[ one_state[ m]] += occ[m]* p ;
275 newocc[256-one_state[256-m]] += occ[m]*(1-p);
278 memcpy(occ, newocc, sizeof(occ));
284 static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
287 #define put_rac(C,S,B) \
291 rc_stat2[(S)-state][B]++;\
297 const int a= FFABS(v);
298 const int e= av_log2(a);
299 put_rac(c, state+0, 0);
302 put_rac(c, state+1+i, 1); //1..10
304 put_rac(c, state+1+i, 0);
306 for(i=e-1; i>=0; i--){
307 put_rac(c, state+22+i, (a>>i)&1); //22..31
311 put_rac(c, state+11 + e, v < 0); //11..21
314 put_rac(c, state+1+FFMIN(i,9), 1); //1..10
316 put_rac(c, state+1+9, 0);
318 for(i=e-1; i>=0; i--){
319 put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
323 put_rac(c, state+11 + 10, v < 0); //11..21
326 put_rac(c, state+0, 1);
331 static av_noinline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
332 put_symbol_inline(c, state, v, is_signed, NULL, NULL);
335 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){
336 if(get_rac(c, state+0))
341 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
346 for(i=e-1; i>=0; i--){
347 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
350 e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
355 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
356 return get_symbol_inline(c, state, is_signed);
359 static inline void update_vlc_state(VlcState * const state, const int v){
360 int drift= state->drift;
361 int count= state->count;
362 state->error_sum += FFABS(v);
365 if(count == 128){ //FIXME variable
368 state->error_sum >>= 1;
373 if(state->bias > -128) state->bias--;
379 if(state->bias < 127) state->bias++;
390 static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){
392 //printf("final: %d ", v);
393 v = fold(v - state->bias, bits);
397 while(i < state->error_sum){ //FIXME optimize
405 if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1);
408 code= v ^ ((2*state->drift + state->count)>>31);
411 //printf("v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, state->bias, state->error_sum, state->drift, state->count, k);
412 set_sr_golomb(pb, code, k, 12, bits);
414 update_vlc_state(state, v);
417 static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){
422 while(i < state->error_sum){ //FIXME optimize
429 v= get_sr_golomb(gb, k, 12, bits);
430 //printf("v:%d bias:%d error:%d drift:%d count:%d k:%d", v, state->bias, state->error_sum, state->drift, state->count, k);
433 if(k==0 && 2*state->drift <= - state->count) v ^= (-1);
435 v ^= ((2*state->drift + state->count)>>31);
438 ret= fold(v + state->bias, bits);
440 update_vlc_state(state, v);
441 //printf("final: %d\n", ret);
445 #if CONFIG_FFV1_ENCODER
446 static av_always_inline int encode_line(FFV1Context *s, int w,
448 int plane_index, int bits)
450 PlaneContext * const p= &s->plane[plane_index];
451 RangeCoder * const c= &s->c;
453 int run_index= s->run_index;
458 if(c->bytestream_end - c->bytestream < w*20){
459 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
463 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
464 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
472 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
473 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
480 diff= fold(diff, bits);
483 if(s->flags & CODEC_FLAG_PASS1){
484 put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, s->rc_stat2[p->quant_table_index][context]);
486 put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
489 if(context == 0) run_mode=1;
494 while(run_count >= 1<<ff_log2_run[run_index]){
495 run_count -= 1<<ff_log2_run[run_index];
497 put_bits(&s->pb, 1, 1);
500 put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
501 if(run_index) run_index--;
510 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, (int)put_bits_count(&s->pb));
513 put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
517 while(run_count >= 1<<ff_log2_run[run_index]){
518 run_count -= 1<<ff_log2_run[run_index];
520 put_bits(&s->pb, 1, 1);
524 put_bits(&s->pb, 1, 1);
526 s->run_index= run_index;
531 static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
533 const int ring_size= s->avctx->context_model ? 3 : 2;
537 memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
540 for(i=0; i<ring_size; i++)
541 sample[i]= s->sample_buffer + (w+6)*((h+i-y)%ring_size) + 3;
543 sample[0][-1]= sample[1][0 ];
544 sample[1][ w]= sample[1][w-1];
546 if(s->bits_per_raw_sample<=8){
548 sample[0][x]= src[x + stride*y];
550 encode_line(s, w, sample, plane_index, 8);
552 if(s->packed_at_lsb){
554 sample[0][x]= ((uint16_t*)(src + stride*y))[x];
558 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample);
561 encode_line(s, w, sample, plane_index, s->bits_per_raw_sample);
563 //STOP_TIMER("encode line")}
567 static void encode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
569 const int ring_size= s->avctx->context_model ? 3 : 2;
570 int16_t *sample[4][3];
573 memset(s->sample_buffer, 0, ring_size*4*(w+6)*sizeof(*s->sample_buffer));
576 for(i=0; i<ring_size; i++)
578 sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
581 unsigned v= src[x + stride*y];
593 // assert(g>=0 && b>=0 && r>=0);
594 // assert(g<256 && b<512 && r<512);
600 for(p=0; p<3 + s->transparency; p++){
601 sample[p][0][-1]= sample[p][1][0 ];
602 sample[p][1][ w]= sample[p][1][w-1];
603 encode_line(s, w, sample[p], (p+1)/2, 9);
608 static void write_quant_table(RangeCoder *c, int16_t *quant_table){
611 uint8_t state[CONTEXT_SIZE];
612 memset(state, 128, sizeof(state));
614 for(i=1; i<128 ; i++){
615 if(quant_table[i] != quant_table[i-1]){
616 put_symbol(c, state, i-last-1, 0);
620 put_symbol(c, state, i-last-1, 0);
623 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
626 write_quant_table(c, quant_table[i]);
629 static void write_header(FFV1Context *f){
630 uint8_t state[CONTEXT_SIZE];
632 RangeCoder * const c= &f->slice_context[0]->c;
634 memset(state, 128, sizeof(state));
637 put_symbol(c, state, f->version, 0);
638 put_symbol(c, state, f->ac, 0);
640 for(i=1; i<256; i++){
641 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
644 put_symbol(c, state, f->colorspace, 0); //YUV cs type
646 put_symbol(c, state, f->bits_per_raw_sample, 0);
647 put_rac(c, state, f->chroma_planes);
648 put_symbol(c, state, f->chroma_h_shift, 0);
649 put_symbol(c, state, f->chroma_v_shift, 0);
650 put_rac(c, state, f->transparency);
652 write_quant_tables(c, f->quant_table);
654 put_symbol(c, state, f->slice_count, 0);
655 for(i=0; i<f->slice_count; i++){
656 FFV1Context *fs= f->slice_context[i];
657 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
658 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
659 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
660 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
661 for(j=0; j<f->plane_count; j++){
662 put_symbol(c, state, f->plane[j].quant_table_index, 0);
663 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
668 #endif /* CONFIG_FFV1_ENCODER */
670 static av_cold int common_init(AVCodecContext *avctx){
671 FFV1Context *s = avctx->priv_data;
674 s->flags= avctx->flags;
676 avcodec_get_frame_defaults(&s->picture);
678 ff_dsputil_init(&s->dsp, avctx);
680 s->width = avctx->width;
681 s->height= avctx->height;
683 assert(s->width && s->height);
692 static int init_slice_state(FFV1Context *f){
695 for(i=0; i<f->slice_count; i++){
696 FFV1Context *fs= f->slice_context[i];
697 fs->plane_count= f->plane_count;
698 fs->transparency= f->transparency;
699 for(j=0; j<f->plane_count; j++){
700 PlaneContext * const p= &fs->plane[j];
703 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
705 return AVERROR(ENOMEM);
707 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState));
709 return AVERROR(ENOMEM);
714 //FIXME only redo if state_transition changed
715 for(j=1; j<256; j++){
716 fs->c.one_state [ j]= fs->state_transition[j];
717 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
725 static av_cold int init_slice_contexts(FFV1Context *f){
728 f->slice_count= f->num_h_slices * f->num_v_slices;
730 for(i=0; i<f->slice_count; i++){
731 FFV1Context *fs= av_mallocz(sizeof(*fs));
732 int sx= i % f->num_h_slices;
733 int sy= i / f->num_h_slices;
734 int sxs= f->avctx->width * sx / f->num_h_slices;
735 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
736 int sys= f->avctx->height* sy / f->num_v_slices;
737 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
738 f->slice_context[i]= fs;
739 memcpy(fs, f, sizeof(*fs));
740 memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
742 fs->slice_width = sxe - sxs;
743 fs->slice_height= sye - sys;
747 fs->sample_buffer = av_malloc(3*4 * (fs->width+6) * sizeof(*fs->sample_buffer));
748 if (!fs->sample_buffer)
749 return AVERROR(ENOMEM);
754 static int allocate_initial_states(FFV1Context *f){
757 for(i=0; i<f->quant_table_count; i++){
758 f->initial_states[i]= av_malloc(f->context_count[i]*sizeof(*f->initial_states[i]));
759 if(!f->initial_states[i])
760 return AVERROR(ENOMEM);
761 memset(f->initial_states[i], 128, f->context_count[i]*sizeof(*f->initial_states[i]));
766 #if CONFIG_FFV1_ENCODER
767 static int write_extra_header(FFV1Context *f){
768 RangeCoder * const c= &f->c;
769 uint8_t state[CONTEXT_SIZE];
771 uint8_t state2[32][CONTEXT_SIZE];
773 memset(state2, 128, sizeof(state2));
774 memset(state, 128, sizeof(state));
776 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000 + (11*11*5*5*5+11*11*11)*32);
777 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
778 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
780 put_symbol(c, state, f->version, 0);
781 put_symbol(c, state, f->ac, 0);
783 for(i=1; i<256; i++){
784 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
787 put_symbol(c, state, f->colorspace, 0); //YUV cs type
788 put_symbol(c, state, f->bits_per_raw_sample, 0);
789 put_rac(c, state, f->chroma_planes);
790 put_symbol(c, state, f->chroma_h_shift, 0);
791 put_symbol(c, state, f->chroma_v_shift, 0);
792 put_rac(c, state, f->transparency);
793 put_symbol(c, state, f->num_h_slices-1, 0);
794 put_symbol(c, state, f->num_v_slices-1, 0);
796 put_symbol(c, state, f->quant_table_count, 0);
797 for(i=0; i<f->quant_table_count; i++)
798 write_quant_tables(c, f->quant_tables[i]);
800 for(i=0; i<f->quant_table_count; i++){
801 for(j=0; j<f->context_count[i]*CONTEXT_SIZE; j++)
802 if(f->initial_states[i] && f->initial_states[i][0][j] != 128)
804 if(j<f->context_count[i]*CONTEXT_SIZE){
805 put_rac(c, state, 1);
806 for(j=0; j<f->context_count[i]; j++){
807 for(k=0; k<CONTEXT_SIZE; k++){
808 int pred= j ? f->initial_states[i][j-1][k] : 128;
809 put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k]-pred), 1);
813 put_rac(c, state, 0);
817 f->avctx->extradata_size= ff_rac_terminate(c);
822 static int sort_stt(FFV1Context *s, uint8_t stt[256]){
823 int i,i2,changed,print=0;
827 for(i=12; i<244; i++){
828 for(i2=i+1; i2<245 && i2<i+4; i2++){
829 #define COST(old, new) \
830 s->rc_stat[old][0]*-log2((256-(new))/256.0)\
831 +s->rc_stat[old][1]*-log2( (new) /256.0)
833 #define COST2(old, new) \
835 +COST(256-(old), 256-(new))
837 double size0= COST2(i, i ) + COST2(i2, i2);
838 double sizeX= COST2(i, i2) + COST2(i2, i );
839 if(sizeX < size0 && i!=128 && i2!=128){
841 FFSWAP(int, stt[ i], stt[ i2]);
842 FFSWAP(int, s->rc_stat[i ][0],s->rc_stat[ i2][0]);
843 FFSWAP(int, s->rc_stat[i ][1],s->rc_stat[ i2][1]);
845 FFSWAP(int, stt[256-i], stt[256-i2]);
846 FFSWAP(int, s->rc_stat[256-i][0],s->rc_stat[256-i2][0]);
847 FFSWAP(int, s->rc_stat[256-i][1],s->rc_stat[256-i2][1]);
849 for(j=1; j<256; j++){
850 if (stt[j] == i ) stt[j] = i2;
851 else if(stt[j] == i2) stt[j] = i ;
853 if (stt[256-j] == 256-i ) stt[256-j] = 256-i2;
854 else if(stt[256-j] == 256-i2) stt[256-j] = 256-i ;
865 static av_cold int encode_init(AVCodecContext *avctx)
867 FFV1Context *s = avctx->priv_data;
873 s->ac= avctx->coder_type ? 2:0;
877 s->state_transition[i]=ver2_state[i];
880 switch(avctx->pix_fmt){
881 case PIX_FMT_YUV444P9:
882 case PIX_FMT_YUV422P9:
883 case PIX_FMT_YUV420P9:
884 if (!avctx->bits_per_raw_sample)
885 s->bits_per_raw_sample = 9;
886 case PIX_FMT_YUV444P10:
887 case PIX_FMT_YUV420P10:
888 case PIX_FMT_YUV422P10:
889 s->packed_at_lsb = 1;
890 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
891 s->bits_per_raw_sample = 10;
893 case PIX_FMT_YUV444P16:
894 case PIX_FMT_YUV422P16:
895 case PIX_FMT_YUV420P16:
896 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) {
897 s->bits_per_raw_sample = 16;
898 } else if (!s->bits_per_raw_sample){
899 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
901 if(s->bits_per_raw_sample <=8){
902 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
906 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
909 s->version= FFMAX(s->version, 1);
911 case PIX_FMT_YUV444P:
912 case PIX_FMT_YUV440P:
913 case PIX_FMT_YUV422P:
914 case PIX_FMT_YUV420P:
915 case PIX_FMT_YUV411P:
916 case PIX_FMT_YUV410P:
917 s->chroma_planes= av_pix_fmt_descriptors[avctx->pix_fmt].nb_components < 3 ? 0 : 1;
920 case PIX_FMT_YUVA444P:
921 case PIX_FMT_YUVA420P:
934 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
937 if (s->transparency) {
938 av_log(avctx, AV_LOG_WARNING, "Storing alpha plane, this will require a recent FFV1 decoder to playback!\n");
940 if (avctx->context_model > 1U) {
941 av_log(avctx, AV_LOG_ERROR, "Invalid context model %d, valid values are 0 and 1\n", avctx->context_model);
942 return AVERROR(EINVAL);
945 for(i=0; i<256; i++){
946 s->quant_table_count=2;
947 if(s->bits_per_raw_sample <=8){
948 s->quant_tables[0][0][i]= quant11[i];
949 s->quant_tables[0][1][i]= 11*quant11[i];
950 s->quant_tables[0][2][i]= 11*11*quant11[i];
951 s->quant_tables[1][0][i]= quant11[i];
952 s->quant_tables[1][1][i]= 11*quant11[i];
953 s->quant_tables[1][2][i]= 11*11*quant5 [i];
954 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
955 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
957 s->quant_tables[0][0][i]= quant9_10bit[i];
958 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
959 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
960 s->quant_tables[1][0][i]= quant9_10bit[i];
961 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
962 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
963 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
964 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
967 s->context_count[0]= (11*11*11+1)/2;
968 s->context_count[1]= (11*11*5*5*5+1)/2;
969 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table));
971 for(i=0; i<s->plane_count; i++){
972 PlaneContext * const p= &s->plane[i];
974 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
975 p->quant_table_index= avctx->context_model;
976 p->context_count= s->context_count[p->quant_table_index];
979 if(allocate_initial_states(s) < 0)
980 return AVERROR(ENOMEM);
982 avctx->coded_frame= &s->picture;
985 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
989 if(avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
990 for(i=0; i<s->quant_table_count; i++){
991 s->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*s->rc_stat2[i]));
993 return AVERROR(ENOMEM);
997 char *p= avctx->stats_in;
998 uint8_t best_state[256][256];
1002 av_assert0(s->version>=2);
1005 for(j=0; j<256; j++){
1007 s->rc_stat[j][i]= strtol(p, &next, 0);
1009 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d [%s]\n", j,i,p);
1015 for(i=0; i<s->quant_table_count; i++){
1016 for(j=0; j<s->context_count[i]; j++){
1017 for(k=0; k<32; k++){
1019 s->rc_stat2[i][j][k][m]= strtol(p, &next, 0);
1021 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d %d %d [%s]\n", i,j,k,m,p);
1029 gob_count= strtol(p, &next, 0);
1030 if(next==p || gob_count <0){
1031 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
1035 while(*p=='\n' || *p==' ') p++;
1038 sort_stt(s, s->state_transition);
1040 find_best_state(best_state, s->state_transition);
1042 for(i=0; i<s->quant_table_count; i++){
1043 for(j=0; j<s->context_count[i]; j++){
1044 for(k=0; k<32; k++){
1046 if(s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]){
1047 p=256.0*s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]);
1049 s->initial_states[i][j][k]= best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1])/gob_count, 0, 255)];
1058 write_extra_header(s);
1061 if(init_slice_contexts(s) < 0)
1063 if(init_slice_state(s) < 0)
1066 #define STATS_OUT_SIZE 1024*1024*6
1067 if(avctx->flags & CODEC_FLAG_PASS1){
1068 avctx->stats_out= av_mallocz(STATS_OUT_SIZE);
1069 for(i=0; i<s->quant_table_count; i++){
1070 for(j=0; j<s->slice_count; j++){
1071 FFV1Context *sf= s->slice_context[j];
1072 av_assert0(!sf->rc_stat2[i]);
1073 sf->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*sf->rc_stat2[i]));
1074 if(!sf->rc_stat2[i])
1075 return AVERROR(ENOMEM);
1082 #endif /* CONFIG_FFV1_ENCODER */
1085 static void clear_state(FFV1Context *f){
1088 for(si=0; si<f->slice_count; si++){
1089 FFV1Context *fs= f->slice_context[si];
1090 for(i=0; i<f->plane_count; i++){
1091 PlaneContext *p= &fs->plane[i];
1093 p->interlace_bit_state[0]= 128;
1094 p->interlace_bit_state[1]= 128;
1097 if(f->initial_states[p->quant_table_index]){
1098 memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE*p->context_count);
1100 memset(p->state, 128, CONTEXT_SIZE*p->context_count);
1102 for(j=0; j<p->context_count; j++){
1103 p->vlc_state[j].drift= 0;
1104 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
1105 p->vlc_state[j].bias= 0;
1106 p->vlc_state[j].count= 1;
1113 #if CONFIG_FFV1_ENCODER
1114 static int encode_slice(AVCodecContext *c, void *arg){
1115 FFV1Context *fs= *(void**)arg;
1116 FFV1Context *f= fs->avctx->priv_data;
1117 int width = fs->slice_width;
1118 int height= fs->slice_height;
1121 AVFrame * const p= &f->picture;
1122 const int ps= (f->bits_per_raw_sample>8)+1;
1124 if(f->colorspace==0){
1125 const int chroma_width = -((-width )>>f->chroma_h_shift);
1126 const int chroma_height= -((-height)>>f->chroma_v_shift);
1127 const int cx= x>>f->chroma_h_shift;
1128 const int cy= y>>f->chroma_v_shift;
1130 encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1132 if (f->chroma_planes){
1133 encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1134 encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1136 if (fs->transparency)
1137 encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1139 encode_rgb_frame(fs, (uint32_t*)(p->data[0]) + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1146 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1147 const AVFrame *pict, int *got_packet)
1149 FFV1Context *f = avctx->priv_data;
1150 RangeCoder * const c= &f->slice_context[0]->c;
1151 AVFrame * const p= &f->picture;
1153 uint8_t keystate=128;
1157 if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*((8*2+1+1)*4)/8
1158 + FF_MIN_BUFFER_SIZE)) < 0)
1161 ff_init_range_encoder(c, pkt->data, pkt->size);
1162 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1165 p->pict_type= AV_PICTURE_TYPE_I;
1167 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
1168 put_rac(c, &keystate, 1);
1174 put_rac(c, &keystate, 0);
1179 used_count += ff_rac_terminate(c);
1180 //printf("pos=%d\n", used_count);
1181 init_put_bits(&f->slice_context[0]->pb, pkt->data + used_count, pkt->size - used_count);
1184 for(i=1; i<256; i++){
1185 c->one_state[i]= f->state_transition[i];
1186 c->zero_state[256-i]= 256-c->one_state[i];
1190 for(i=1; i<f->slice_count; i++){
1191 FFV1Context *fs= f->slice_context[i];
1192 uint8_t *start = pkt->data + (pkt->size-used_count)*i/f->slice_count;
1193 int len = pkt->size/f->slice_count;
1196 ff_init_range_encoder(&fs->c, start, len);
1198 init_put_bits(&fs->pb, start, len);
1201 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1204 for(i=0; i<f->slice_count; i++){
1205 FFV1Context *fs= f->slice_context[i];
1210 put_rac(&fs->c, &state, 0);
1211 bytes= ff_rac_terminate(&fs->c);
1213 flush_put_bits(&fs->pb); //nicer padding FIXME
1214 bytes= used_count + (put_bits_count(&fs->pb)+7)/8;
1218 av_assert0(bytes < pkt->size/f->slice_count);
1219 memmove(buf_p, fs->ac ? fs->c.bytestream_start : fs->pb.buf, bytes);
1220 av_assert0(bytes < (1<<24));
1221 AV_WB24(buf_p+bytes, bytes);
1227 if((avctx->flags&CODEC_FLAG_PASS1) && (f->picture_number&31)==0){
1229 char *p= avctx->stats_out;
1230 char *end= p + STATS_OUT_SIZE;
1232 memset(f->rc_stat, 0, sizeof(f->rc_stat));
1233 for(i=0; i<f->quant_table_count; i++)
1234 memset(f->rc_stat2[i], 0, f->context_count[i]*sizeof(*f->rc_stat2[i]));
1236 for(j=0; j<f->slice_count; j++){
1237 FFV1Context *fs= f->slice_context[j];
1238 for(i=0; i<256; i++){
1239 f->rc_stat[i][0] += fs->rc_stat[i][0];
1240 f->rc_stat[i][1] += fs->rc_stat[i][1];
1242 for(i=0; i<f->quant_table_count; i++){
1243 for(k=0; k<f->context_count[i]; k++){
1244 for(m=0; m<32; m++){
1245 f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
1246 f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
1252 for(j=0; j<256; j++){
1253 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat[j][0], f->rc_stat[j][1]);
1256 snprintf(p, end-p, "\n");
1258 for(i=0; i<f->quant_table_count; i++){
1259 for(j=0; j<f->context_count[i]; j++){
1260 for(m=0; m<32; m++){
1261 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
1266 snprintf(p, end-p, "%d\n", f->gob_count);
1267 } else if(avctx->flags&CODEC_FLAG_PASS1)
1268 avctx->stats_out[0] = '\0';
1270 f->picture_number++;
1271 pkt->size = buf_p - pkt->data;
1272 pkt->flags |= AV_PKT_FLAG_KEY*p->key_frame;
1277 #endif /* CONFIG_FFV1_ENCODER */
1279 static av_cold int common_end(AVCodecContext *avctx){
1280 FFV1Context *s = avctx->priv_data;
1283 if (avctx->codec->decode && s->picture.data[0])
1284 avctx->release_buffer(avctx, &s->picture);
1286 for(j=0; j<s->slice_count; j++){
1287 FFV1Context *fs= s->slice_context[j];
1288 for(i=0; i<s->plane_count; i++){
1289 PlaneContext *p= &fs->plane[i];
1291 av_freep(&p->state);
1292 av_freep(&p->vlc_state);
1294 av_freep(&fs->sample_buffer);
1297 av_freep(&avctx->stats_out);
1298 for(j=0; j<s->quant_table_count; j++){
1299 av_freep(&s->initial_states[j]);
1300 for(i=0; i<s->slice_count; i++){
1301 FFV1Context *sf= s->slice_context[i];
1302 av_freep(&sf->rc_stat2[j]);
1304 av_freep(&s->rc_stat2[j]);
1307 for(i=0; i<s->slice_count; i++){
1308 av_freep(&s->slice_context[i]);
1314 static av_always_inline void decode_line(FFV1Context *s, int w,
1316 int plane_index, int bits)
1318 PlaneContext * const p= &s->plane[plane_index];
1319 RangeCoder * const c= &s->c;
1323 int run_index= s->run_index;
1326 int diff, context, sign;
1328 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
1335 av_assert2(context < p->context_count);
1338 diff= get_symbol_inline(c, p->state[context], 1);
1340 if(context == 0 && run_mode==0) run_mode=1;
1343 if(run_count==0 && run_mode==1){
1344 if(get_bits1(&s->gb)){
1345 run_count = 1<<ff_log2_run[run_index];
1346 if(x + run_count <= w) run_index++;
1348 if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]);
1350 if(run_index) run_index--;
1358 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1363 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1365 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, get_bits_count(&s->gb));
1368 if(sign) diff= -diff;
1370 sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1);
1372 s->run_index= run_index;
1375 static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
1378 sample[0]=s->sample_buffer +3;
1379 sample[1]=s->sample_buffer+w+6+3;
1383 memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
1386 int16_t *temp = sample[0]; //FIXME try a normal buffer
1388 sample[0]= sample[1];
1391 sample[1][-1]= sample[0][0 ];
1392 sample[0][ w]= sample[0][w-1];
1395 if(s->avctx->bits_per_raw_sample <= 8){
1396 decode_line(s, w, sample, plane_index, 8);
1398 src[x + stride*y]= sample[1][x];
1401 decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
1402 if(s->packed_at_lsb){
1404 ((uint16_t*)(src + stride*y))[x]= sample[1][x];
1408 ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
1412 //STOP_TIMER("decode-line")}
1416 static void decode_rgb_frame(FFV1Context *s, uint32_t *src, int w, int h, int stride){
1418 int16_t *sample[4][2];
1420 sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
1421 sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
1426 memset(s->sample_buffer, 0, 8*(w+6)*sizeof(*s->sample_buffer));
1429 for(p=0; p<3 + s->transparency; p++){
1430 int16_t *temp = sample[p][0]; //FIXME try a normal buffer
1432 sample[p][0]= sample[p][1];
1435 sample[p][1][-1]= sample[p][0][0 ];
1436 sample[p][0][ w]= sample[p][0][w-1];
1437 decode_line(s, w, sample[p], (p+1)/2, 9);
1440 int g= sample[0][1][x];
1441 int b= sample[1][1][x];
1442 int r= sample[2][1][x];
1443 int a= sample[3][1][x];
1445 // assert(g>=0 && b>=0 && r>=0);
1446 // assert(g<256 && b<512 && r<512);
1454 src[x + stride*y]= b + (g<<8) + (r<<16) + (a<<24);
1459 static int decode_slice(AVCodecContext *c, void *arg){
1460 FFV1Context *fs= *(void**)arg;
1461 FFV1Context *f= fs->avctx->priv_data;
1462 int width = fs->slice_width;
1463 int height= fs->slice_height;
1466 const int ps= (c->bits_per_raw_sample>8)+1;
1467 AVFrame * const p= &f->picture;
1469 av_assert1(width && height);
1470 if(f->colorspace==0){
1471 const int chroma_width = -((-width )>>f->chroma_h_shift);
1472 const int chroma_height= -((-height)>>f->chroma_v_shift);
1473 const int cx= x>>f->chroma_h_shift;
1474 const int cy= y>>f->chroma_v_shift;
1475 decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1477 if (f->chroma_planes){
1478 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1479 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1481 if (fs->transparency)
1482 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1484 decode_rgb_frame(fs, (uint32_t*)p->data[0] + ps*x + y*(p->linesize[0]/4), width, height, p->linesize[0]/4);
1492 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
1495 uint8_t state[CONTEXT_SIZE];
1497 memset(state, 128, sizeof(state));
1499 for(v=0; i<128 ; v++){
1500 int len= get_symbol(c, state, 0) + 1;
1502 if(len + i > 128) return -1;
1505 quant_table[i] = scale*v;
1508 //if(i%16==0) printf("\n");
1512 for(i=1; i<128; i++){
1513 quant_table[256-i]= -quant_table[i];
1515 quant_table[128]= -quant_table[127];
1520 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
1522 int context_count=1;
1525 context_count*= read_quant_table(c, quant_table[i], context_count);
1526 if(context_count > 32768U){
1530 return (context_count+1)/2;
1533 static int read_extra_header(FFV1Context *f){
1534 RangeCoder * const c= &f->c;
1535 uint8_t state[CONTEXT_SIZE];
1537 uint8_t state2[32][CONTEXT_SIZE];
1539 memset(state2, 128, sizeof(state2));
1540 memset(state, 128, sizeof(state));
1542 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1543 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1545 f->version= get_symbol(c, state, 0);
1546 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1548 for(i=1; i<256; i++){
1549 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1552 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1553 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1554 get_rac(c, state); //no chroma = false
1555 f->chroma_h_shift= get_symbol(c, state, 0);
1556 f->chroma_v_shift= get_symbol(c, state, 0);
1557 f->transparency= get_rac(c, state);
1558 f->plane_count= 2 + f->transparency;
1559 f->num_h_slices= 1 + get_symbol(c, state, 0);
1560 f->num_v_slices= 1 + get_symbol(c, state, 0);
1561 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->height){
1562 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1566 f->quant_table_count= get_symbol(c, state, 0);
1567 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1569 for(i=0; i<f->quant_table_count; i++){
1570 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1571 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1576 if(allocate_initial_states(f) < 0)
1577 return AVERROR(ENOMEM);
1579 for(i=0; i<f->quant_table_count; i++){
1580 if(get_rac(c, state)){
1581 for(j=0; j<f->context_count[i]; j++){
1582 for(k=0; k<CONTEXT_SIZE; k++){
1583 int pred= j ? f->initial_states[i][j-1][k] : 128;
1584 f->initial_states[i][j][k]= (pred+get_symbol(c, state2[k], 1))&0xFF;
1593 static int read_header(FFV1Context *f){
1594 uint8_t state[CONTEXT_SIZE];
1595 int i, j, context_count;
1596 RangeCoder * const c= &f->slice_context[0]->c;
1598 memset(state, 128, sizeof(state));
1601 f->version= get_symbol(c, state, 0);
1602 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1604 for(i=1; i<256; i++){
1605 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1608 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1610 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1611 f->chroma_planes= get_rac(c, state);
1612 f->chroma_h_shift= get_symbol(c, state, 0);
1613 f->chroma_v_shift= get_symbol(c, state, 0);
1614 f->transparency= get_rac(c, state);
1615 f->plane_count= 2 + f->transparency;
1618 if(f->colorspace==0){
1619 if(!f->transparency && !f->chroma_planes){
1620 if (f->avctx->bits_per_raw_sample<=8)
1621 f->avctx->pix_fmt= PIX_FMT_GRAY8;
1623 f->avctx->pix_fmt= PIX_FMT_GRAY16;
1624 }else if(f->avctx->bits_per_raw_sample<=8 && !f->transparency){
1625 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1626 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
1627 case 0x01: f->avctx->pix_fmt= PIX_FMT_YUV440P; break;
1628 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
1629 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
1630 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
1631 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
1633 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1636 }else if(f->avctx->bits_per_raw_sample<=8 && f->transparency){
1637 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1638 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUVA444P; break;
1639 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUVA420P; break;
1641 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1644 }else if(f->avctx->bits_per_raw_sample==9) {
1646 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1647 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P9; break;
1648 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P9; break;
1649 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P9; break;
1651 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1654 }else if(f->avctx->bits_per_raw_sample==10) {
1656 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1657 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P10; break;
1658 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P10; break;
1659 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P10; break;
1661 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1665 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1666 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1667 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1668 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P16; break;
1670 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1674 }else if(f->colorspace==1){
1675 if(f->chroma_h_shift || f->chroma_v_shift){
1676 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1679 if(f->transparency) f->avctx->pix_fmt= PIX_FMT_RGB32;
1680 else f->avctx->pix_fmt= PIX_FMT_0RGB32;
1682 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1686 //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt);
1688 context_count= read_quant_tables(c, f->quant_table);
1689 if(context_count < 0){
1690 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1694 f->slice_count= get_symbol(c, state, 0);
1695 if(f->slice_count > (unsigned)MAX_SLICES)
1699 for(j=0; j<f->slice_count; j++){
1700 FFV1Context *fs= f->slice_context[j];
1702 fs->packed_at_lsb= f->packed_at_lsb;
1704 if(f->version >= 2){
1705 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1706 fs->slice_y = get_symbol(c, state, 0) *f->height;
1707 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1708 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1710 fs->slice_x /= f->num_h_slices;
1711 fs->slice_y /= f->num_v_slices;
1712 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1713 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1714 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1716 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1717 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1721 for(i=0; i<f->plane_count; i++){
1722 PlaneContext * const p= &fs->plane[i];
1724 if(f->version >= 2){
1725 int idx=get_symbol(c, state, 0);
1726 if(idx > (unsigned)f->quant_table_count){
1727 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1730 p->quant_table_index= idx;
1731 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1732 context_count= f->context_count[idx];
1734 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
1737 if(p->context_count < context_count){
1738 av_freep(&p->state);
1739 av_freep(&p->vlc_state);
1741 p->context_count= context_count;
1748 static av_cold int decode_init(AVCodecContext *avctx)
1750 FFV1Context *f = avctx->priv_data;
1754 if(avctx->extradata && read_extra_header(f) < 0)
1757 if(init_slice_contexts(f) < 0)
1763 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
1764 const uint8_t *buf = avpkt->data;
1765 int buf_size = avpkt->size;
1766 FFV1Context *f = avctx->priv_data;
1767 RangeCoder * const c= &f->slice_context[0]->c;
1768 AVFrame * const p= &f->picture;
1770 uint8_t keystate= 128;
1771 const uint8_t *buf_p;
1773 AVFrame *picture = data;
1775 /* release previously stored data */
1777 avctx->release_buffer(avctx, p);
1779 ff_init_range_decoder(c, buf, buf_size);
1780 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1783 p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
1784 if(get_rac(c, &keystate)){
1786 if(read_header(f) < 0)
1788 if(init_slice_state(f) < 0)
1797 for(i=1; i<256; i++){
1798 c->one_state[i]= f->state_transition[i];
1799 c->zero_state[256-i]= 256-c->one_state[i];
1804 if(avctx->get_buffer(avctx, p) < 0){
1805 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1809 if(avctx->debug&FF_DEBUG_PICT_INFO)
1810 av_log(avctx, AV_LOG_ERROR, "keyframe:%d coder:%d\n", p->key_frame, f->ac);
1813 bytes_read = c->bytestream - c->bytestream_start - 1;
1814 if(bytes_read ==0) av_log(avctx, AV_LOG_ERROR, "error at end of AC stream\n"); //FIXME
1815 //printf("pos=%d\n", bytes_read);
1816 init_get_bits(&f->slice_context[0]->gb, buf + bytes_read, (buf_size - bytes_read) * 8);
1818 bytes_read = 0; /* avoid warning */
1821 buf_p= buf + buf_size;
1822 for(i=f->slice_count-1; i>0; i--){
1823 FFV1Context *fs= f->slice_context[i];
1824 int v= AV_RB24(buf_p-3)+3;
1825 if(buf_p - buf <= v){
1826 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
1831 ff_init_range_decoder(&fs->c, buf_p, v);
1833 init_get_bits(&fs->gb, buf_p, v * 8);
1837 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1838 f->picture_number++;
1841 *data_size = sizeof(AVFrame);
1846 AVCodec ff_ffv1_decoder = {
1848 .type = AVMEDIA_TYPE_VIDEO,
1849 .id = CODEC_ID_FFV1,
1850 .priv_data_size = sizeof(FFV1Context),
1851 .init = decode_init,
1852 .close = common_end,
1853 .decode = decode_frame,
1854 .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ | CODEC_CAP_SLICE_THREADS,
1855 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
1858 #if CONFIG_FFV1_ENCODER
1859 AVCodec ff_ffv1_encoder = {
1861 .type = AVMEDIA_TYPE_VIDEO,
1862 .id = CODEC_ID_FFV1,
1863 .priv_data_size = sizeof(FFV1Context),
1864 .init = encode_init,
1865 .encode2 = encode_frame,
1866 .close = common_end,
1867 .capabilities = CODEC_CAP_SLICE_THREADS,
1868 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUV444P, PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV444P9, PIX_FMT_YUV422P9, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_YUV444P10, PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_NONE},
1869 .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),