2 * FFV1 codec for libavcodec
4 * Copyright (c) 2003-2012 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * FF Video Codec 1 (a lossless codec)
33 #include "rangecoder.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/crc.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/timer.h"
43 #ifdef __INTEL_COMPILER
49 #define CONTEXT_SIZE 32
51 #define MAX_QUANT_TABLES 8
52 #define MAX_CONTEXT_INPUTS 5
54 extern const uint8_t ff_log2_run[41];
56 static const int8_t quant5_10bit[256]={
57 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
58 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
59 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
60 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
61 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
62 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
63 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
64 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
65 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
66 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
67 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
68 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
69 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,
70 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
71 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
72 -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0,
75 static const int8_t quant5[256]={
76 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
77 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
78 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
79 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
80 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
85 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
86 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
87 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
88 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
89 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
90 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
91 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1,
94 static const int8_t quant9_10bit[256]={
95 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
97 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
98 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
99 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
100 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
101 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
102 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
103 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
104 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
105 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
106 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
107 -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,
108 -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,
109 -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
110 -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0,
113 static const int8_t quant11[256]={
114 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
115 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
116 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
117 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
118 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
119 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
120 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
121 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
122 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
123 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
124 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
125 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
126 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
127 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4,
128 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
129 -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1,
132 static const uint8_t ver2_state[256]= {
133 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
134 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
135 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
136 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
137 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
138 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
139 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
140 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
141 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
142 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
143 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
144 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
145 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
146 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
147 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
148 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
151 typedef struct VlcState{
158 typedef struct PlaneContext{
159 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
160 int quant_table_index;
162 uint8_t (*state)[CONTEXT_SIZE];
164 uint8_t interlace_bit_state[2];
167 #define MAX_SLICES 256
169 typedef struct FFV1Context{
171 AVCodecContext *avctx;
175 uint64_t rc_stat[256][2];
176 uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
180 int chroma_h_shift, chroma_v_shift;
186 AVFrame last_picture;
188 int ac; ///< 1=range coder <-> 0=golomb rice
189 int ac_byte_count; ///< number of bytes used for AC coding
190 PlaneContext plane[MAX_PLANES];
191 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
192 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
193 int context_count[MAX_QUANT_TABLES];
194 uint8_t state_transition[256];
195 uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
198 int16_t *sample_buffer;
205 int quant_table_count;
209 struct FFV1Context *slice_context[MAX_SLICES];
217 int bits_per_raw_sample;
220 static av_always_inline int fold(int diff, int bits){
232 static inline int predict(int16_t *src, int16_t *last)
234 const int LT= last[-1];
235 const int T= last[ 0];
236 const int L = src[-1];
238 return mid_pred(L, L + T - LT, T);
241 static inline int get_context(PlaneContext *p, int16_t *src,
242 int16_t *last, int16_t *last2)
244 const int LT= last[-1];
245 const int T= last[ 0];
246 const int RT= last[ 1];
247 const int L = src[-1];
249 if(p->quant_table[3][127]){
250 const int TT= last2[0];
251 const int LL= src[-2];
252 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF]
253 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xFF];
255 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF];
258 static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){
263 l2tab[i]= log2(i/256.0);
265 for(i=0; i<256; i++){
266 double best_len[256];
272 for(j=FFMAX(i-10,1); j<FFMIN(i+11,256); j++){
276 for(k=0; k<256; k++){
277 double newocc[256]={0};
278 for(m=0; m<256; m++){
280 len -=occ[m]*( p *l2tab[ m]
281 + (1-p)*l2tab[256-m]);
284 if(len < best_len[k]){
288 for(m=0; m<256; m++){
290 newocc[ one_state[ m]] += occ[m]* p ;
291 newocc[256-one_state[256-m]] += occ[m]*(1-p);
294 memcpy(occ, newocc, sizeof(occ));
300 static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
303 #define put_rac(C,S,B) \
307 rc_stat2[(S)-state][B]++;\
313 const int a= FFABS(v);
314 const int e= av_log2(a);
315 put_rac(c, state+0, 0);
318 put_rac(c, state+1+i, 1); //1..10
320 put_rac(c, state+1+i, 0);
322 for(i=e-1; i>=0; i--){
323 put_rac(c, state+22+i, (a>>i)&1); //22..31
327 put_rac(c, state+11 + e, v < 0); //11..21
330 put_rac(c, state+1+FFMIN(i,9), 1); //1..10
332 put_rac(c, state+1+9, 0);
334 for(i=e-1; i>=0; i--){
335 put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
339 put_rac(c, state+11 + 10, v < 0); //11..21
342 put_rac(c, state+0, 1);
347 static av_noinline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
348 put_symbol_inline(c, state, v, is_signed, NULL, NULL);
351 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){
352 if(get_rac(c, state+0))
357 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
362 for(i=e-1; i>=0; i--){
363 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
366 e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
371 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
372 return get_symbol_inline(c, state, is_signed);
375 static inline void update_vlc_state(VlcState * const state, const int v){
376 int drift= state->drift;
377 int count= state->count;
378 state->error_sum += FFABS(v);
381 if(count == 128){ //FIXME variable
384 state->error_sum >>= 1;
389 if(state->bias > -128) state->bias--;
395 if(state->bias < 127) state->bias++;
406 static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){
408 //printf("final: %d ", v);
409 v = fold(v - state->bias, bits);
413 while(i < state->error_sum){ //FIXME optimize
421 if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1);
424 code= v ^ ((2*state->drift + state->count)>>31);
427 //printf("v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code, state->bias, state->error_sum, state->drift, state->count, k);
428 set_sr_golomb(pb, code, k, 12, bits);
430 update_vlc_state(state, v);
433 static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){
438 while(i < state->error_sum){ //FIXME optimize
445 v= get_sr_golomb(gb, k, 12, bits);
446 //printf("v:%d bias:%d error:%d drift:%d count:%d k:%d", v, state->bias, state->error_sum, state->drift, state->count, k);
449 if(k==0 && 2*state->drift <= - state->count) v ^= (-1);
451 v ^= ((2*state->drift + state->count)>>31);
454 ret= fold(v + state->bias, bits);
456 update_vlc_state(state, v);
457 //printf("final: %d\n", ret);
461 #if CONFIG_FFV1_ENCODER
462 static av_always_inline int encode_line(FFV1Context *s, int w,
464 int plane_index, int bits)
466 PlaneContext * const p= &s->plane[plane_index];
467 RangeCoder * const c= &s->c;
469 int run_index= s->run_index;
474 if(c->bytestream_end - c->bytestream < w*20){
475 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
479 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
480 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
488 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
489 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
496 diff= fold(diff, bits);
499 if(s->flags & CODEC_FLAG_PASS1){
500 put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, s->rc_stat2[p->quant_table_index][context]);
502 put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
505 if(context == 0) run_mode=1;
510 while(run_count >= 1<<ff_log2_run[run_index]){
511 run_count -= 1<<ff_log2_run[run_index];
513 put_bits(&s->pb, 1, 1);
516 put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
517 if(run_index) run_index--;
526 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, (int)put_bits_count(&s->pb));
529 put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
533 while(run_count >= 1<<ff_log2_run[run_index]){
534 run_count -= 1<<ff_log2_run[run_index];
536 put_bits(&s->pb, 1, 1);
540 put_bits(&s->pb, 1, 1);
542 s->run_index= run_index;
547 static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
549 const int ring_size= s->avctx->context_model ? 3 : 2;
553 memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
556 for(i=0; i<ring_size; i++)
557 sample[i]= s->sample_buffer + (w+6)*((h+i-y)%ring_size) + 3;
559 sample[0][-1]= sample[1][0 ];
560 sample[1][ w]= sample[1][w-1];
562 if(s->bits_per_raw_sample<=8){
564 sample[0][x]= src[x + stride*y];
566 encode_line(s, w, sample, plane_index, 8);
568 if(s->packed_at_lsb){
570 sample[0][x]= ((uint16_t*)(src + stride*y))[x];
574 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample);
577 encode_line(s, w, sample, plane_index, s->bits_per_raw_sample);
579 //STOP_TIMER("encode line")}
583 static void encode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, int stride[3]){
585 const int ring_size= s->avctx->context_model ? 3 : 2;
586 int16_t *sample[4][3];
587 int lbd= s->avctx->bits_per_raw_sample <= 8;
588 int bits= s->avctx->bits_per_raw_sample > 0 ? s->avctx->bits_per_raw_sample : 8;
589 int offset= 1 << bits;
592 memset(s->sample_buffer, 0, ring_size*4*(w+6)*sizeof(*s->sample_buffer));
595 for(i=0; i<ring_size; i++)
597 sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
600 int b,g,r,av_uninit(a);
602 unsigned v= *((uint32_t*)(src[0] + x*4 + stride[0]*y));
608 b= *((uint16_t*)(src[0] + x*2 + stride[0]*y));
609 g= *((uint16_t*)(src[1] + x*2 + stride[1]*y));
610 r= *((uint16_t*)(src[2] + x*2 + stride[2]*y));
619 // assert(g>=0 && b>=0 && r>=0);
620 // assert(g<256 && b<512 && r<512);
626 for(p=0; p<3 + s->transparency; p++){
627 sample[p][0][-1]= sample[p][1][0 ];
628 sample[p][1][ w]= sample[p][1][w-1];
630 encode_line(s, w, sample[p], (p+1)/2, 9);
632 encode_line(s, w, sample[p], (p+1)/2, bits+1);
637 static void write_quant_table(RangeCoder *c, int16_t *quant_table){
640 uint8_t state[CONTEXT_SIZE];
641 memset(state, 128, sizeof(state));
643 for(i=1; i<128 ; i++){
644 if(quant_table[i] != quant_table[i-1]){
645 put_symbol(c, state, i-last-1, 0);
649 put_symbol(c, state, i-last-1, 0);
652 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
655 write_quant_table(c, quant_table[i]);
658 static void write_header(FFV1Context *f){
659 uint8_t state[CONTEXT_SIZE];
661 RangeCoder * const c= &f->slice_context[0]->c;
663 memset(state, 128, sizeof(state));
666 put_symbol(c, state, f->version, 0);
667 put_symbol(c, state, f->ac, 0);
669 for(i=1; i<256; i++){
670 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
673 put_symbol(c, state, f->colorspace, 0); //YUV cs type
675 put_symbol(c, state, f->bits_per_raw_sample, 0);
676 put_rac(c, state, f->chroma_planes);
677 put_symbol(c, state, f->chroma_h_shift, 0);
678 put_symbol(c, state, f->chroma_v_shift, 0);
679 put_rac(c, state, f->transparency);
681 write_quant_tables(c, f->quant_table);
682 }else if(f->version < 3){
683 put_symbol(c, state, f->slice_count, 0);
684 for(i=0; i<f->slice_count; i++){
685 FFV1Context *fs= f->slice_context[i];
686 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
687 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
688 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
689 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
690 for(j=0; j<f->plane_count; j++){
691 put_symbol(c, state, f->plane[j].quant_table_index, 0);
692 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
697 #endif /* CONFIG_FFV1_ENCODER */
699 static av_cold int common_init(AVCodecContext *avctx){
700 FFV1Context *s = avctx->priv_data;
703 s->flags= avctx->flags;
705 avcodec_get_frame_defaults(&s->picture);
707 ff_dsputil_init(&s->dsp, avctx);
709 s->width = avctx->width;
710 s->height= avctx->height;
712 assert(s->width && s->height);
721 static int init_slice_state(FFV1Context *f, FFV1Context *fs){
724 fs->plane_count= f->plane_count;
725 fs->transparency= f->transparency;
726 for(j=0; j<f->plane_count; j++){
727 PlaneContext * const p= &fs->plane[j];
730 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
732 return AVERROR(ENOMEM);
734 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState));
736 return AVERROR(ENOMEM);
741 //FIXME only redo if state_transition changed
742 for(j=1; j<256; j++){
743 fs->c.one_state [ j]= f->state_transition[j];
744 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
751 static int init_slices_state(FFV1Context *f){
753 for(i=0; i<f->slice_count; i++){
754 FFV1Context *fs= f->slice_context[i];
755 if(init_slice_state(f, fs) < 0)
761 static av_cold int init_slice_contexts(FFV1Context *f){
764 f->slice_count= f->num_h_slices * f->num_v_slices;
766 for(i=0; i<f->slice_count; i++){
767 FFV1Context *fs= av_mallocz(sizeof(*fs));
768 int sx= i % f->num_h_slices;
769 int sy= i / f->num_h_slices;
770 int sxs= f->avctx->width * sx / f->num_h_slices;
771 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
772 int sys= f->avctx->height* sy / f->num_v_slices;
773 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
774 f->slice_context[i]= fs;
775 memcpy(fs, f, sizeof(*fs));
776 memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
778 fs->slice_width = sxe - sxs;
779 fs->slice_height= sye - sys;
783 fs->sample_buffer = av_malloc(3*4 * (fs->width+6) * sizeof(*fs->sample_buffer));
784 if (!fs->sample_buffer)
785 return AVERROR(ENOMEM);
790 static int allocate_initial_states(FFV1Context *f){
793 for(i=0; i<f->quant_table_count; i++){
794 f->initial_states[i]= av_malloc(f->context_count[i]*sizeof(*f->initial_states[i]));
795 if(!f->initial_states[i])
796 return AVERROR(ENOMEM);
797 memset(f->initial_states[i], 128, f->context_count[i]*sizeof(*f->initial_states[i]));
802 #if CONFIG_FFV1_ENCODER
803 static int write_extra_header(FFV1Context *f){
804 RangeCoder * const c= &f->c;
805 uint8_t state[CONTEXT_SIZE];
807 uint8_t state2[32][CONTEXT_SIZE];
810 memset(state2, 128, sizeof(state2));
811 memset(state, 128, sizeof(state));
813 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000 + (11*11*5*5*5+11*11*11)*32);
814 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
815 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
817 put_symbol(c, state, f->version, 0);
820 f->minor_version = 2;
821 put_symbol(c, state, f->minor_version, 0);
823 put_symbol(c, state, f->ac, 0);
825 for(i=1; i<256; i++){
826 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
829 put_symbol(c, state, f->colorspace, 0); //YUV cs type
830 put_symbol(c, state, f->bits_per_raw_sample, 0);
831 put_rac(c, state, f->chroma_planes);
832 put_symbol(c, state, f->chroma_h_shift, 0);
833 put_symbol(c, state, f->chroma_v_shift, 0);
834 put_rac(c, state, f->transparency);
835 put_symbol(c, state, f->num_h_slices-1, 0);
836 put_symbol(c, state, f->num_v_slices-1, 0);
838 put_symbol(c, state, f->quant_table_count, 0);
839 for(i=0; i<f->quant_table_count; i++)
840 write_quant_tables(c, f->quant_tables[i]);
842 for(i=0; i<f->quant_table_count; i++){
843 for(j=0; j<f->context_count[i]*CONTEXT_SIZE; j++)
844 if(f->initial_states[i] && f->initial_states[i][0][j] != 128)
846 if(j<f->context_count[i]*CONTEXT_SIZE){
847 put_rac(c, state, 1);
848 for(j=0; j<f->context_count[i]; j++){
849 for(k=0; k<CONTEXT_SIZE; k++){
850 int pred= j ? f->initial_states[i][j-1][k] : 128;
851 put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k]-pred), 1);
855 put_rac(c, state, 0);
860 put_symbol(c, state, f->ec, 0);
863 f->avctx->extradata_size= ff_rac_terminate(c);
864 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
865 AV_WL32(f->avctx->extradata + f->avctx->extradata_size, v);
866 f->avctx->extradata_size += 4;
871 static int sort_stt(FFV1Context *s, uint8_t stt[256]){
872 int i,i2,changed,print=0;
876 for(i=12; i<244; i++){
877 for(i2=i+1; i2<245 && i2<i+4; i2++){
878 #define COST(old, new) \
879 s->rc_stat[old][0]*-log2((256-(new))/256.0)\
880 +s->rc_stat[old][1]*-log2( (new) /256.0)
882 #define COST2(old, new) \
884 +COST(256-(old), 256-(new))
886 double size0= COST2(i, i ) + COST2(i2, i2);
887 double sizeX= COST2(i, i2) + COST2(i2, i );
888 if(sizeX < size0 && i!=128 && i2!=128){
890 FFSWAP(int, stt[ i], stt[ i2]);
891 FFSWAP(int, s->rc_stat[i ][0],s->rc_stat[ i2][0]);
892 FFSWAP(int, s->rc_stat[i ][1],s->rc_stat[ i2][1]);
894 FFSWAP(int, stt[256-i], stt[256-i2]);
895 FFSWAP(int, s->rc_stat[256-i][0],s->rc_stat[256-i2][0]);
896 FFSWAP(int, s->rc_stat[256-i][1],s->rc_stat[256-i2][1]);
898 for(j=1; j<256; j++){
899 if (stt[j] == i ) stt[j] = i2;
900 else if(stt[j] == i2) stt[j] = i ;
902 if (stt[256-j] == 256-i ) stt[256-j] = 256-i2;
903 else if(stt[256-j] == 256-i2) stt[256-j] = 256-i ;
914 static av_cold int encode_init(AVCodecContext *avctx)
916 FFV1Context *s = avctx->priv_data;
923 if((avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) || avctx->slices>1)
924 s->version = FFMAX(s->version, 2);
926 if(avctx->level == 3){
931 s->ec = (s->version >= 3);
934 if(s->version >= 2 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
935 av_log(avctx, AV_LOG_ERROR, "Version 2 needed for requested features but version 2 is experimental and not enabled\n");
936 return AVERROR_INVALIDDATA;
939 s->ac= avctx->coder_type > 0 ? 2 : 0;
942 switch(avctx->pix_fmt){
943 case PIX_FMT_YUV444P9:
944 case PIX_FMT_YUV422P9:
945 case PIX_FMT_YUV420P9:
946 if (!avctx->bits_per_raw_sample)
947 s->bits_per_raw_sample = 9;
948 case PIX_FMT_YUV444P10:
949 case PIX_FMT_YUV420P10:
950 case PIX_FMT_YUV422P10:
951 s->packed_at_lsb = 1;
952 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
953 s->bits_per_raw_sample = 10;
955 case PIX_FMT_YUV444P16:
956 case PIX_FMT_YUV422P16:
957 case PIX_FMT_YUV420P16:
958 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) {
959 s->bits_per_raw_sample = 16;
960 } else if (!s->bits_per_raw_sample){
961 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
963 if(s->bits_per_raw_sample <=8){
964 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
965 return AVERROR_INVALIDDATA;
967 if(!s->ac && avctx->coder_type == -1) {
968 av_log(avctx, AV_LOG_INFO, "bits_per_raw_sample > 8, forcing coder 1\n");
972 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
973 return AVERROR_INVALIDDATA;
975 s->version= FFMAX(s->version, 1);
977 case PIX_FMT_YUV444P:
978 case PIX_FMT_YUV440P:
979 case PIX_FMT_YUV422P:
980 case PIX_FMT_YUV420P:
981 case PIX_FMT_YUV411P:
982 case PIX_FMT_YUV410P:
983 s->chroma_planes= av_pix_fmt_descriptors[avctx->pix_fmt].nb_components < 3 ? 0 : 1;
986 case PIX_FMT_YUVA444P:
987 case PIX_FMT_YUVA422P:
988 case PIX_FMT_YUVA420P:
1001 if (!avctx->bits_per_raw_sample)
1002 s->bits_per_raw_sample = 9;
1003 case PIX_FMT_GBRP10:
1004 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
1005 s->bits_per_raw_sample = 10;
1006 case PIX_FMT_GBRP12:
1007 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
1008 s->bits_per_raw_sample = 12;
1009 case PIX_FMT_GBRP14:
1010 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
1011 s->bits_per_raw_sample = 14;
1012 else if (!s->bits_per_raw_sample)
1013 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
1015 s->chroma_planes= 1;
1016 s->version= FFMAX(s->version, 1);
1019 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
1020 return AVERROR_INVALIDDATA;
1022 if (s->transparency) {
1023 av_log(avctx, AV_LOG_WARNING, "Storing alpha plane, this will require a recent FFV1 decoder to playback!\n");
1025 if (avctx->context_model > 1U) {
1026 av_log(avctx, AV_LOG_ERROR, "Invalid context model %d, valid values are 0 and 1\n", avctx->context_model);
1027 return AVERROR(EINVAL);
1031 for(i=1; i<256; i++)
1032 s->state_transition[i]=ver2_state[i];
1034 for(i=0; i<256; i++){
1035 s->quant_table_count=2;
1036 if(s->bits_per_raw_sample <=8){
1037 s->quant_tables[0][0][i]= quant11[i];
1038 s->quant_tables[0][1][i]= 11*quant11[i];
1039 s->quant_tables[0][2][i]= 11*11*quant11[i];
1040 s->quant_tables[1][0][i]= quant11[i];
1041 s->quant_tables[1][1][i]= 11*quant11[i];
1042 s->quant_tables[1][2][i]= 11*11*quant5 [i];
1043 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
1044 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
1046 s->quant_tables[0][0][i]= quant9_10bit[i];
1047 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
1048 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
1049 s->quant_tables[1][0][i]= quant9_10bit[i];
1050 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
1051 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
1052 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
1053 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
1056 s->context_count[0]= (11*11*11+1)/2;
1057 s->context_count[1]= (11*11*5*5*5+1)/2;
1058 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table));
1060 for(i=0; i<s->plane_count; i++){
1061 PlaneContext * const p= &s->plane[i];
1063 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
1064 p->quant_table_index= avctx->context_model;
1065 p->context_count= s->context_count[p->quant_table_index];
1068 if(allocate_initial_states(s) < 0)
1069 return AVERROR(ENOMEM);
1071 avctx->coded_frame= &s->picture;
1072 if(!s->transparency)
1074 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
1076 s->picture_number=0;
1078 if(avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
1079 for(i=0; i<s->quant_table_count; i++){
1080 s->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*s->rc_stat2[i]));
1082 return AVERROR(ENOMEM);
1085 if(avctx->stats_in){
1086 char *p= avctx->stats_in;
1087 uint8_t best_state[256][256];
1091 av_assert0(s->version>=2);
1094 for(j=0; j<256; j++){
1096 s->rc_stat[j][i]= strtol(p, &next, 0);
1098 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d [%s]\n", j,i,p);
1104 for(i=0; i<s->quant_table_count; i++){
1105 for(j=0; j<s->context_count[i]; j++){
1106 for(k=0; k<32; k++){
1108 s->rc_stat2[i][j][k][m]= strtol(p, &next, 0);
1110 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d %d %d [%s]\n", i,j,k,m,p);
1111 return AVERROR_INVALIDDATA;
1118 gob_count= strtol(p, &next, 0);
1119 if(next==p || gob_count <0){
1120 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
1121 return AVERROR_INVALIDDATA;
1124 while(*p=='\n' || *p==' ') p++;
1127 sort_stt(s, s->state_transition);
1129 find_best_state(best_state, s->state_transition);
1131 for(i=0; i<s->quant_table_count; i++){
1132 for(j=0; j<s->context_count[i]; j++){
1133 for(k=0; k<32; k++){
1135 if(s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]){
1136 p=256.0*s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]);
1138 s->initial_states[i][j][k]= best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1])/gob_count, 0, 255)];
1145 for(s->num_v_slices=2; s->num_v_slices<9; s->num_v_slices++){
1146 for(s->num_h_slices=s->num_v_slices; s->num_h_slices<2*s->num_v_slices; s->num_h_slices++){
1147 if(avctx->slices == s->num_h_slices * s->num_v_slices && avctx->slices <= 64 || !avctx->slices)
1151 av_log(avctx, AV_LOG_ERROR, "Unsupported number %d of slices requested, please specify a supported number with -slices (ex:4,6,9,12,16, ...)\n", avctx->slices);
1154 write_extra_header(s);
1157 if(init_slice_contexts(s) < 0)
1159 if(init_slices_state(s) < 0)
1162 #define STATS_OUT_SIZE 1024*1024*6
1163 if(avctx->flags & CODEC_FLAG_PASS1){
1164 avctx->stats_out= av_mallocz(STATS_OUT_SIZE);
1165 for(i=0; i<s->quant_table_count; i++){
1166 for(j=0; j<s->slice_count; j++){
1167 FFV1Context *sf= s->slice_context[j];
1168 av_assert0(!sf->rc_stat2[i]);
1169 sf->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*sf->rc_stat2[i]));
1170 if(!sf->rc_stat2[i])
1171 return AVERROR(ENOMEM);
1178 #endif /* CONFIG_FFV1_ENCODER */
1181 static void clear_slice_state(FFV1Context *f, FFV1Context *fs){
1184 for(i=0; i<f->plane_count; i++){
1185 PlaneContext *p= &fs->plane[i];
1187 p->interlace_bit_state[0]= 128;
1188 p->interlace_bit_state[1]= 128;
1191 if(f->initial_states[p->quant_table_index]){
1192 memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE*p->context_count);
1194 memset(p->state, 128, CONTEXT_SIZE*p->context_count);
1196 for(j=0; j<p->context_count; j++){
1197 p->vlc_state[j].drift= 0;
1198 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
1199 p->vlc_state[j].bias= 0;
1200 p->vlc_state[j].count= 1;
1206 #if CONFIG_FFV1_ENCODER
1208 static void encode_slice_header(FFV1Context *f, FFV1Context *fs){
1209 RangeCoder *c = &fs->c;
1210 uint8_t state[CONTEXT_SIZE];
1212 memset(state, 128, sizeof(state));
1214 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
1215 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
1216 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
1217 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
1218 for(j=0; j<f->plane_count; j++){
1219 put_symbol(c, state, f->plane[j].quant_table_index, 0);
1220 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
1222 if(!f->picture.interlaced_frame) put_symbol(c, state, 3, 0);
1223 else put_symbol(c, state, 1 + !f->picture.top_field_first, 0);
1224 put_symbol(c, state, f->picture.sample_aspect_ratio.num, 0);
1225 put_symbol(c, state, f->picture.sample_aspect_ratio.den, 0);
1228 static int encode_slice(AVCodecContext *c, void *arg){
1229 FFV1Context *fs= *(void**)arg;
1230 FFV1Context *f= fs->avctx->priv_data;
1231 int width = fs->slice_width;
1232 int height= fs->slice_height;
1235 AVFrame * const p= &f->picture;
1236 const int ps= (f->bits_per_raw_sample>8)+1;
1239 clear_slice_state(f, fs);
1241 encode_slice_header(f, fs);
1245 put_rac(&fs->c, (int[]){129}, 0);
1246 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? ff_rac_terminate(&fs->c) : 0;
1247 init_put_bits(&fs->pb, fs->c.bytestream_start + fs->ac_byte_count, fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count);
1250 if(f->colorspace==0){
1251 const int chroma_width = -((-width )>>f->chroma_h_shift);
1252 const int chroma_height= -((-height)>>f->chroma_v_shift);
1253 const int cx= x>>f->chroma_h_shift;
1254 const int cy= y>>f->chroma_v_shift;
1256 encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1258 if (f->chroma_planes){
1259 encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1260 encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1262 if (fs->transparency)
1263 encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1265 uint8_t *planes[3] = {p->data[0] + ps*x + y*p->linesize[0],
1266 p->data[1] + ps*x + y*p->linesize[1],
1267 p->data[2] + ps*x + y*p->linesize[2]};
1268 encode_rgb_frame(fs, planes, width, height, p->linesize);
1275 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1276 const AVFrame *pict, int *got_packet)
1278 FFV1Context *f = avctx->priv_data;
1279 RangeCoder * const c= &f->slice_context[0]->c;
1280 AVFrame * const p= &f->picture;
1282 uint8_t keystate=128;
1286 if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*((8*2+1+1)*4)/8
1287 + FF_MIN_BUFFER_SIZE)) < 0)
1290 ff_init_range_encoder(c, pkt->data, pkt->size);
1291 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1294 p->pict_type= AV_PICTURE_TYPE_I;
1296 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
1297 put_rac(c, &keystate, 1);
1302 put_rac(c, &keystate, 0);
1308 for(i=1; i<256; i++){
1309 c->one_state[i]= f->state_transition[i];
1310 c->zero_state[256-i]= 256-c->one_state[i];
1314 for(i=1; i<f->slice_count; i++){
1315 FFV1Context *fs= f->slice_context[i];
1316 uint8_t *start = pkt->data + (pkt->size-used_count)*i/f->slice_count;
1317 int len = pkt->size/f->slice_count;
1319 ff_init_range_encoder(&fs->c, start, len);
1321 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1324 for(i=0; i<f->slice_count; i++){
1325 FFV1Context *fs= f->slice_context[i];
1330 put_rac(&fs->c, &state, 0);
1331 bytes= ff_rac_terminate(&fs->c);
1333 flush_put_bits(&fs->pb); //nicer padding FIXME
1334 bytes= fs->ac_byte_count + (put_bits_count(&fs->pb)+7)/8;
1336 if(i>0 || f->version>2){
1337 av_assert0(bytes < pkt->size/f->slice_count);
1338 memmove(buf_p, fs->c.bytestream_start, bytes);
1339 av_assert0(bytes < (1<<24));
1340 AV_WB24(buf_p+bytes, bytes);
1346 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, bytes);
1347 AV_WL32(buf_p + bytes, v); bytes += 4;
1352 if((avctx->flags&CODEC_FLAG_PASS1) && (f->picture_number&31)==0){
1354 char *p= avctx->stats_out;
1355 char *end= p + STATS_OUT_SIZE;
1357 memset(f->rc_stat, 0, sizeof(f->rc_stat));
1358 for(i=0; i<f->quant_table_count; i++)
1359 memset(f->rc_stat2[i], 0, f->context_count[i]*sizeof(*f->rc_stat2[i]));
1361 for(j=0; j<f->slice_count; j++){
1362 FFV1Context *fs= f->slice_context[j];
1363 for(i=0; i<256; i++){
1364 f->rc_stat[i][0] += fs->rc_stat[i][0];
1365 f->rc_stat[i][1] += fs->rc_stat[i][1];
1367 for(i=0; i<f->quant_table_count; i++){
1368 for(k=0; k<f->context_count[i]; k++){
1369 for(m=0; m<32; m++){
1370 f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
1371 f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
1377 for(j=0; j<256; j++){
1378 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat[j][0], f->rc_stat[j][1]);
1381 snprintf(p, end-p, "\n");
1383 for(i=0; i<f->quant_table_count; i++){
1384 for(j=0; j<f->context_count[i]; j++){
1385 for(m=0; m<32; m++){
1386 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
1391 snprintf(p, end-p, "%d\n", f->gob_count);
1392 } else if(avctx->flags&CODEC_FLAG_PASS1)
1393 avctx->stats_out[0] = '\0';
1395 f->picture_number++;
1396 pkt->size = buf_p - pkt->data;
1397 pkt->flags |= AV_PKT_FLAG_KEY*p->key_frame;
1402 #endif /* CONFIG_FFV1_ENCODER */
1404 static av_cold int common_end(AVCodecContext *avctx){
1405 FFV1Context *s = avctx->priv_data;
1408 if (avctx->codec->decode && s->picture.data[0])
1409 avctx->release_buffer(avctx, &s->picture);
1410 if (avctx->codec->decode && s->last_picture.data[0])
1411 avctx->release_buffer(avctx, &s->last_picture);
1413 for(j=0; j<s->slice_count; j++){
1414 FFV1Context *fs= s->slice_context[j];
1415 for(i=0; i<s->plane_count; i++){
1416 PlaneContext *p= &fs->plane[i];
1418 av_freep(&p->state);
1419 av_freep(&p->vlc_state);
1421 av_freep(&fs->sample_buffer);
1424 av_freep(&avctx->stats_out);
1425 for(j=0; j<s->quant_table_count; j++){
1426 av_freep(&s->initial_states[j]);
1427 for(i=0; i<s->slice_count; i++){
1428 FFV1Context *sf= s->slice_context[i];
1429 av_freep(&sf->rc_stat2[j]);
1431 av_freep(&s->rc_stat2[j]);
1434 for(i=0; i<s->slice_count; i++){
1435 av_freep(&s->slice_context[i]);
1441 static av_always_inline void decode_line(FFV1Context *s, int w,
1443 int plane_index, int bits)
1445 PlaneContext * const p= &s->plane[plane_index];
1446 RangeCoder * const c= &s->c;
1450 int run_index= s->run_index;
1453 int diff, context, sign;
1455 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
1462 av_assert2(context < p->context_count);
1465 diff= get_symbol_inline(c, p->state[context], 1);
1467 if(context == 0 && run_mode==0) run_mode=1;
1470 if(run_count==0 && run_mode==1){
1471 if(get_bits1(&s->gb)){
1472 run_count = 1<<ff_log2_run[run_index];
1473 if(x + run_count <= w) run_index++;
1475 if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]);
1477 if(run_index) run_index--;
1485 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1490 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1492 // printf("count:%d index:%d, mode:%d, x:%d y:%d pos:%d\n", run_count, run_index, run_mode, x, y, get_bits_count(&s->gb));
1495 if(sign) diff= -diff;
1497 sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1);
1499 s->run_index= run_index;
1502 static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
1505 sample[0]=s->sample_buffer +3;
1506 sample[1]=s->sample_buffer+w+6+3;
1510 memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
1513 int16_t *temp = sample[0]; //FIXME try a normal buffer
1515 sample[0]= sample[1];
1518 sample[1][-1]= sample[0][0 ];
1519 sample[0][ w]= sample[0][w-1];
1522 if(s->avctx->bits_per_raw_sample <= 8){
1523 decode_line(s, w, sample, plane_index, 8);
1525 src[x + stride*y]= sample[1][x];
1528 decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
1529 if(s->packed_at_lsb){
1531 ((uint16_t*)(src + stride*y))[x]= sample[1][x];
1535 ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
1539 //STOP_TIMER("decode-line")}
1543 static void decode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, int stride[3]){
1545 int16_t *sample[4][2];
1546 int lbd= s->avctx->bits_per_raw_sample <= 8;
1547 int bits= s->avctx->bits_per_raw_sample > 0 ? s->avctx->bits_per_raw_sample : 8;
1548 int offset= 1 << bits;
1550 sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
1551 sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
1556 memset(s->sample_buffer, 0, 8*(w+6)*sizeof(*s->sample_buffer));
1559 for(p=0; p<3 + s->transparency; p++){
1560 int16_t *temp = sample[p][0]; //FIXME try a normal buffer
1562 sample[p][0]= sample[p][1];
1565 sample[p][1][-1]= sample[p][0][0 ];
1566 sample[p][0][ w]= sample[p][0][w-1];
1568 decode_line(s, w, sample[p], (p+1)/2, 9);
1570 decode_line(s, w, sample[p], (p+1)/2, bits+1);
1573 int g= sample[0][1][x];
1574 int b= sample[1][1][x];
1575 int r= sample[2][1][x];
1576 int a= sample[3][1][x];
1578 // assert(g>=0 && b>=0 && r>=0);
1579 // assert(g<256 && b<512 && r<512);
1588 *((uint32_t*)(src[0] + x*4 + stride[0]*y))= b + (g<<8) + (r<<16) + (a<<24);
1590 *((uint16_t*)(src[0] + x*2 + stride[0]*y)) = b;
1591 *((uint16_t*)(src[1] + x*2 + stride[1]*y)) = g;
1592 *((uint16_t*)(src[2] + x*2 + stride[2]*y)) = r;
1598 static int decode_slice_header(FFV1Context *f, FFV1Context *fs){
1599 RangeCoder *c = &fs->c;
1600 uint8_t state[CONTEXT_SIZE];
1601 unsigned ps, i, context_count;
1602 memset(state, 128, sizeof(state));
1604 av_assert0(f->version > 2);
1606 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1607 fs->slice_y = get_symbol(c, state, 0) *f->height;
1608 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1609 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1611 fs->slice_x /= f->num_h_slices;
1612 fs->slice_y /= f->num_v_slices;
1613 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1614 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1615 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1617 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1618 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1621 for(i=0; i<f->plane_count; i++){
1622 PlaneContext * const p= &fs->plane[i];
1623 int idx=get_symbol(c, state, 0);
1624 if(idx > (unsigned)f->quant_table_count){
1625 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1628 p->quant_table_index= idx;
1629 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1630 context_count= f->context_count[idx];
1632 if(p->context_count < context_count){
1633 av_freep(&p->state);
1634 av_freep(&p->vlc_state);
1636 p->context_count= context_count;
1639 ps = get_symbol(c, state, 0);
1641 f->picture.interlaced_frame = 1;
1642 f->picture.top_field_first = 1;
1644 f->picture.interlaced_frame = 1;
1645 f->picture.top_field_first = 0;
1647 f->picture.interlaced_frame = 0;
1649 f->picture.sample_aspect_ratio.num = get_symbol(c, state, 0);
1650 f->picture.sample_aspect_ratio.den = get_symbol(c, state, 0);
1655 static int decode_slice(AVCodecContext *c, void *arg){
1656 FFV1Context *fs= *(void**)arg;
1657 FFV1Context *f= fs->avctx->priv_data;
1658 int width, height, x, y;
1659 const int ps= (c->bits_per_raw_sample>8)+1;
1660 AVFrame * const p= &f->picture;
1663 if(init_slice_state(f, fs) < 0)
1664 return AVERROR(ENOMEM);
1665 if(decode_slice_header(f, fs) < 0) {
1666 fs->slice_damaged = 1;
1667 return AVERROR_INVALIDDATA;
1670 if(init_slice_state(f, fs) < 0)
1671 return AVERROR(ENOMEM);
1672 if(f->picture.key_frame)
1673 clear_slice_state(f, fs);
1674 width = fs->slice_width;
1675 height= fs->slice_height;
1680 if (f->version == 3 && f->minor_version > 1 || f->version > 3)
1681 get_rac(&fs->c, (int[]){129});
1682 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? fs->c.bytestream - fs->c.bytestream_start - 1 : 0;
1683 init_get_bits(&fs->gb,
1684 fs->c.bytestream_start + fs->ac_byte_count,
1685 (fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count) * 8);
1688 av_assert1(width && height);
1689 if(f->colorspace==0){
1690 const int chroma_width = -((-width )>>f->chroma_h_shift);
1691 const int chroma_height= -((-height)>>f->chroma_v_shift);
1692 const int cx= x>>f->chroma_h_shift;
1693 const int cy= y>>f->chroma_v_shift;
1694 decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1696 if (f->chroma_planes){
1697 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1698 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1700 if (fs->transparency)
1701 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1703 uint8_t *planes[3] = {p->data[0] + ps*x + y*p->linesize[0],
1704 p->data[1] + ps*x + y*p->linesize[1],
1705 p->data[2] + ps*x + y*p->linesize[2]};
1706 decode_rgb_frame(fs, planes, width, height, p->linesize);
1708 if(fs->ac && f->version > 2) {
1710 get_rac(&fs->c, (int[]){129});
1711 v = fs->c.bytestream_end - fs->c.bytestream - 2 - 5*f->ec;
1713 av_log(f->avctx, AV_LOG_ERROR, "bytestream end mismatching by %d\n", v);
1714 fs->slice_damaged = 1;
1723 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
1726 uint8_t state[CONTEXT_SIZE];
1728 memset(state, 128, sizeof(state));
1730 for(v=0; i<128 ; v++){
1731 unsigned len= get_symbol(c, state, 0) + 1;
1733 if(len > 128 - i) return -1;
1736 quant_table[i] = scale*v;
1739 //if(i%16==0) printf("\n");
1743 for(i=1; i<128; i++){
1744 quant_table[256-i]= -quant_table[i];
1746 quant_table[128]= -quant_table[127];
1751 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
1753 int context_count=1;
1756 context_count*= read_quant_table(c, quant_table[i], context_count);
1757 if(context_count > 32768U){
1761 return (context_count+1)/2;
1764 static int read_extra_header(FFV1Context *f){
1765 RangeCoder * const c= &f->c;
1766 uint8_t state[CONTEXT_SIZE];
1768 uint8_t state2[32][CONTEXT_SIZE];
1770 memset(state2, 128, sizeof(state2));
1771 memset(state, 128, sizeof(state));
1773 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1774 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1776 f->version= get_symbol(c, state, 0);
1777 if(f->version > 2) {
1778 c->bytestream_end -= 4;
1779 f->minor_version= get_symbol(c, state, 0);
1781 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1783 for(i=1; i<256; i++){
1784 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1787 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1788 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1789 f->chroma_planes= get_rac(c, state);
1790 f->chroma_h_shift= get_symbol(c, state, 0);
1791 f->chroma_v_shift= get_symbol(c, state, 0);
1792 f->transparency= get_rac(c, state);
1793 f->plane_count= 2 + f->transparency;
1794 f->num_h_slices= 1 + get_symbol(c, state, 0);
1795 f->num_v_slices= 1 + get_symbol(c, state, 0);
1796 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->height){
1797 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1801 f->quant_table_count= get_symbol(c, state, 0);
1802 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1804 for(i=0; i<f->quant_table_count; i++){
1805 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1806 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1811 if(allocate_initial_states(f) < 0)
1812 return AVERROR(ENOMEM);
1814 for(i=0; i<f->quant_table_count; i++){
1815 if(get_rac(c, state)){
1816 for(j=0; j<f->context_count[i]; j++){
1817 for(k=0; k<CONTEXT_SIZE; k++){
1818 int pred= j ? f->initial_states[i][j-1][k] : 128;
1819 f->initial_states[i][j][k]= (pred+get_symbol(c, state2[k], 1))&0xFF;
1826 f->ec = get_symbol(c, state, 0);
1831 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
1833 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v);
1834 return AVERROR_INVALIDDATA;
1841 static int read_header(FFV1Context *f){
1842 uint8_t state[CONTEXT_SIZE];
1843 int i, j, context_count = -1; //-1 to avoid warning
1844 RangeCoder * const c= &f->slice_context[0]->c;
1846 memset(state, 128, sizeof(state));
1849 unsigned v= get_symbol(c, state, 0);
1851 av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
1852 return AVERROR_INVALIDDATA;
1855 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1857 for(i=1; i<256; i++){
1858 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1861 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1863 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1864 f->chroma_planes= get_rac(c, state);
1865 f->chroma_h_shift= get_symbol(c, state, 0);
1866 f->chroma_v_shift= get_symbol(c, state, 0);
1867 f->transparency= get_rac(c, state);
1868 f->plane_count= 2 + f->transparency;
1871 if(f->colorspace==0){
1872 if(!f->transparency && !f->chroma_planes){
1873 if (f->avctx->bits_per_raw_sample<=8)
1874 f->avctx->pix_fmt= PIX_FMT_GRAY8;
1876 f->avctx->pix_fmt= PIX_FMT_GRAY16;
1877 }else if(f->avctx->bits_per_raw_sample<=8 && !f->transparency){
1878 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1879 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
1880 case 0x01: f->avctx->pix_fmt= PIX_FMT_YUV440P; break;
1881 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
1882 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
1883 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
1884 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
1886 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1889 }else if(f->avctx->bits_per_raw_sample<=8 && f->transparency){
1890 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1891 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUVA444P; break;
1892 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUVA422P; break;
1893 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUVA420P; break;
1895 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1898 }else if(f->avctx->bits_per_raw_sample==9) {
1900 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1901 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P9; break;
1902 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P9; break;
1903 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P9; break;
1905 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1908 }else if(f->avctx->bits_per_raw_sample==10) {
1910 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1911 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P10; break;
1912 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P10; break;
1913 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P10; break;
1915 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1919 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1920 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1921 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1922 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P16; break;
1924 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1928 }else if(f->colorspace==1){
1929 if(f->chroma_h_shift || f->chroma_v_shift){
1930 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1933 if(f->avctx->bits_per_raw_sample==9)
1934 f->avctx->pix_fmt= PIX_FMT_GBRP9;
1935 else if(f->avctx->bits_per_raw_sample==10)
1936 f->avctx->pix_fmt= PIX_FMT_GBRP10;
1937 else if(f->avctx->bits_per_raw_sample==12)
1938 f->avctx->pix_fmt= PIX_FMT_GBRP12;
1939 else if(f->avctx->bits_per_raw_sample==14)
1940 f->avctx->pix_fmt= PIX_FMT_GBRP14;
1942 if(f->transparency) f->avctx->pix_fmt= PIX_FMT_RGB32;
1943 else f->avctx->pix_fmt= PIX_FMT_0RGB32;
1945 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1949 //printf("%d %d %d\n", f->chroma_h_shift, f->chroma_v_shift,f->avctx->pix_fmt);
1951 context_count= read_quant_tables(c, f->quant_table);
1952 if(context_count < 0){
1953 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1956 }else if(f->version < 3){
1957 f->slice_count= get_symbol(c, state, 0);
1959 const uint8_t *p= c->bytestream_end;
1960 for(f->slice_count = 0; f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start; f->slice_count++){
1961 int trailer = 3 + 5*!!f->ec;
1962 int size = AV_RB24(p-trailer);
1963 if(size + trailer > p - c->bytestream_start)
1965 p -= size + trailer;
1968 if(f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0){
1969 av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
1973 for(j=0; j<f->slice_count; j++){
1974 FFV1Context *fs= f->slice_context[j];
1976 fs->packed_at_lsb= f->packed_at_lsb;
1978 fs->slice_damaged = 0;
1980 if(f->version == 2){
1981 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1982 fs->slice_y = get_symbol(c, state, 0) *f->height;
1983 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1984 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1986 fs->slice_x /= f->num_h_slices;
1987 fs->slice_y /= f->num_v_slices;
1988 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1989 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1990 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1992 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1993 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1997 for(i=0; i<f->plane_count; i++){
1998 PlaneContext * const p= &fs->plane[i];
2000 if(f->version == 2){
2001 int idx=get_symbol(c, state, 0);
2002 if(idx > (unsigned)f->quant_table_count){
2003 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
2006 p->quant_table_index= idx;
2007 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
2008 context_count= f->context_count[idx];
2010 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
2013 if(f->version <= 2){
2014 av_assert0(context_count>=0);
2015 if(p->context_count < context_count){
2016 av_freep(&p->state);
2017 av_freep(&p->vlc_state);
2019 p->context_count= context_count;
2026 static av_cold int decode_init(AVCodecContext *avctx)
2028 FFV1Context *f = avctx->priv_data;
2032 if(avctx->extradata && read_extra_header(f) < 0)
2035 if(init_slice_contexts(f) < 0)
2041 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
2042 const uint8_t *buf = avpkt->data;
2043 int buf_size = avpkt->size;
2044 FFV1Context *f = avctx->priv_data;
2045 RangeCoder * const c= &f->slice_context[0]->c;
2046 AVFrame * const p= &f->picture;
2048 uint8_t keystate= 128;
2049 const uint8_t *buf_p;
2051 AVFrame *picture = data;
2053 /* release previously stored data */
2055 avctx->release_buffer(avctx, p);
2057 ff_init_range_decoder(c, buf, buf_size);
2058 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
2061 p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
2062 if(get_rac(c, &keystate)){
2064 f->key_frame_ok = 0;
2065 if(read_header(f) < 0)
2067 f->key_frame_ok = 1;
2069 if (!f->key_frame_ok) {
2070 av_log(avctx, AV_LOG_ERROR, "Cant decode non keyframe without valid keyframe\n");
2071 return AVERROR_INVALIDDATA;
2076 p->reference= 3; //for error concealment
2077 if(avctx->get_buffer(avctx, p) < 0){
2078 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
2082 if(avctx->debug&FF_DEBUG_PICT_INFO)
2083 av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d\n",
2084 f->version, p->key_frame, f->ac, f->ec, f->slice_count);
2086 buf_p= buf + buf_size;
2087 for(i=f->slice_count-1; i>=0; i--){
2088 FFV1Context *fs= f->slice_context[i];
2089 int trailer = 3 + 5*!!f->ec;
2092 if(i || f->version>2) v = AV_RB24(buf_p-trailer)+trailer;
2093 else v = buf_p - c->bytestream_start;
2094 if(buf_p - c->bytestream_start < v){
2095 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
2101 unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, v);
2103 int64_t ts = avpkt->pts != AV_NOPTS_VALUE ? avpkt->pts : avpkt->dts;
2104 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!", crc);
2105 if(ts != AV_NOPTS_VALUE && avctx->pkt_timebase.num) {
2106 av_log(f->avctx, AV_LOG_ERROR, "at %f seconds\n",ts*av_q2d(avctx->pkt_timebase));
2107 } else if(ts != AV_NOPTS_VALUE) {
2108 av_log(f->avctx, AV_LOG_ERROR, "at %"PRId64"\n", ts);
2110 av_log(f->avctx, AV_LOG_ERROR, "\n");
2112 fs->slice_damaged = 1;
2117 ff_init_range_decoder(&fs->c, buf_p, v);
2119 fs->c.bytestream_end = (uint8_t *)(buf_p + v);
2122 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
2124 for(i=f->slice_count-1; i>=0; i--){
2125 FFV1Context *fs= f->slice_context[i];
2127 if(fs->slice_damaged && f->last_picture.data[0]){
2128 uint8_t *dst[4], *src[4];
2130 int sh = (j==1 || j==2) ? f->chroma_h_shift : 0;
2131 int sv = (j==1 || j==2) ? f->chroma_v_shift : 0;
2132 dst[j] = f->picture .data[j] + f->picture .linesize[j]*
2133 (fs->slice_y>>sv) + (fs->slice_x>>sh);
2134 src[j] = f->last_picture.data[j] + f->last_picture.linesize[j]*
2135 (fs->slice_y>>sv) + (fs->slice_x>>sh);
2137 av_image_copy(dst, f->picture.linesize, (const uint8_t **)src, f->last_picture.linesize,
2138 avctx->pix_fmt, fs->slice_width, fs->slice_height);
2142 f->picture_number++;
2145 *data_size = sizeof(AVFrame);
2147 FFSWAP(AVFrame, f->picture, f->last_picture);
2152 AVCodec ff_ffv1_decoder = {
2154 .type = AVMEDIA_TYPE_VIDEO,
2155 .id = AV_CODEC_ID_FFV1,
2156 .priv_data_size = sizeof(FFV1Context),
2157 .init = decode_init,
2158 .close = common_end,
2159 .decode = decode_frame,
2160 .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
2161 CODEC_CAP_SLICE_THREADS,
2162 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2165 #if CONFIG_FFV1_ENCODER
2167 #define OFFSET(x) offsetof(FFV1Context, x)
2168 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2169 static const AVOption options[] = {
2170 { "slicecrc", "Protect slices with CRCs", OFFSET(ec), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, VE},
2174 static const AVClass class = {
2175 .class_name = "ffv1 encoder",
2176 .item_name = av_default_item_name,
2178 .version = LIBAVUTIL_VERSION_INT,
2181 static const AVCodecDefault ffv1_defaults[] = {
2186 AVCodec ff_ffv1_encoder = {
2188 .type = AVMEDIA_TYPE_VIDEO,
2189 .id = AV_CODEC_ID_FFV1,
2190 .priv_data_size = sizeof(FFV1Context),
2191 .init = encode_init,
2192 .encode2 = encode_frame,
2193 .close = common_end,
2194 .capabilities = CODEC_CAP_SLICE_THREADS,
2195 .defaults = ffv1_defaults,
2196 .pix_fmts = (const enum PixelFormat[]){
2197 PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUVA422P, PIX_FMT_YUV444P,
2198 PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P,
2199 PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16,
2200 PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV444P9, PIX_FMT_YUV422P9,
2201 PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_YUV444P10,
2202 PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_GBRP9, PIX_FMT_GBRP10,
2203 PIX_FMT_GBRP12, PIX_FMT_GBRP14,
2206 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2207 .priv_class = &class,