2 * FFV1 codec for libavcodec
4 * Copyright (c) 2003-2012 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * FF Video Codec 1 (a lossless codec)
33 #include "rangecoder.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/crc.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/timer.h"
43 #ifdef __INTEL_COMPILER
49 #define CONTEXT_SIZE 32
51 #define MAX_QUANT_TABLES 8
52 #define MAX_CONTEXT_INPUTS 5
54 extern const uint8_t ff_log2_run[41];
56 static const int8_t quant5_10bit[256]={
57 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
58 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
59 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
60 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
61 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
62 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
63 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
64 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
65 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
66 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
67 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
68 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
69 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,
70 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
71 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
72 -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0,
75 static const int8_t quant5[256]={
76 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
77 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
78 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
79 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
80 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
85 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
86 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
87 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
88 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
89 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
90 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
91 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1,
94 static const int8_t quant9_10bit[256]={
95 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
97 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
98 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
99 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
100 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
101 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
102 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
103 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
104 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
105 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
106 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
107 -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,
108 -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,
109 -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
110 -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0,
113 static const int8_t quant11[256]={
114 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
115 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
116 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
117 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
118 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
119 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
120 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
121 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
122 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
123 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
124 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
125 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
126 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
127 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4,
128 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
129 -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1,
132 static const uint8_t ver2_state[256]= {
133 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
134 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
135 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
136 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
137 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
138 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
139 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
140 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
141 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
142 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
143 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
144 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
145 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
146 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
147 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
148 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
151 typedef struct VlcState{
158 typedef struct PlaneContext{
159 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
160 int quant_table_index;
162 uint8_t (*state)[CONTEXT_SIZE];
164 uint8_t interlace_bit_state[2];
167 #define MAX_SLICES 256
169 typedef struct FFV1Context{
171 AVCodecContext *avctx;
175 uint64_t rc_stat[256][2];
176 uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
180 int chroma_h_shift, chroma_v_shift;
186 AVFrame last_picture;
188 int ac; ///< 1=range coder <-> 0=golomb rice
189 int ac_byte_count; ///< number of bytes used for AC coding
190 PlaneContext plane[MAX_PLANES];
191 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
192 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
193 int context_count[MAX_QUANT_TABLES];
194 uint8_t state_transition[256];
195 uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
198 int16_t *sample_buffer;
205 int quant_table_count;
209 struct FFV1Context *slice_context[MAX_SLICES];
217 int bits_per_raw_sample;
220 static av_always_inline int fold(int diff, int bits){
232 static inline int predict(int16_t *src, int16_t *last)
234 const int LT= last[-1];
235 const int T= last[ 0];
236 const int L = src[-1];
238 return mid_pred(L, L + T - LT, T);
241 static inline int get_context(PlaneContext *p, int16_t *src,
242 int16_t *last, int16_t *last2)
244 const int LT= last[-1];
245 const int T= last[ 0];
246 const int RT= last[ 1];
247 const int L = src[-1];
249 if(p->quant_table[3][127]){
250 const int TT= last2[0];
251 const int LL= src[-2];
252 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF]
253 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xFF];
255 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF];
258 static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){
263 l2tab[i]= log2(i/256.0);
265 for(i=0; i<256; i++){
266 double best_len[256];
272 for(j=FFMAX(i-10,1); j<FFMIN(i+11,256); j++){
276 for(k=0; k<256; k++){
277 double newocc[256]={0};
278 for(m=0; m<256; m++){
280 len -=occ[m]*( p *l2tab[ m]
281 + (1-p)*l2tab[256-m]);
284 if(len < best_len[k]){
288 for(m=0; m<256; m++){
290 newocc[ one_state[ m]] += occ[m]* p ;
291 newocc[256-one_state[256-m]] += occ[m]*(1-p);
294 memcpy(occ, newocc, sizeof(occ));
300 static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
303 #define put_rac(C,S,B) \
307 rc_stat2[(S)-state][B]++;\
313 const int a= FFABS(v);
314 const int e= av_log2(a);
315 put_rac(c, state+0, 0);
318 put_rac(c, state+1+i, 1); //1..10
320 put_rac(c, state+1+i, 0);
322 for(i=e-1; i>=0; i--){
323 put_rac(c, state+22+i, (a>>i)&1); //22..31
327 put_rac(c, state+11 + e, v < 0); //11..21
330 put_rac(c, state+1+FFMIN(i,9), 1); //1..10
332 put_rac(c, state+1+9, 0);
334 for(i=e-1; i>=0; i--){
335 put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
339 put_rac(c, state+11 + 10, v < 0); //11..21
342 put_rac(c, state+0, 1);
347 static av_noinline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
348 put_symbol_inline(c, state, v, is_signed, NULL, NULL);
351 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){
352 if(get_rac(c, state+0))
357 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
362 for(i=e-1; i>=0; i--){
363 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
366 e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
371 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
372 return get_symbol_inline(c, state, is_signed);
375 static inline void update_vlc_state(VlcState * const state, const int v){
376 int drift= state->drift;
377 int count= state->count;
378 state->error_sum += FFABS(v);
381 if(count == 128){ //FIXME variable
384 state->error_sum >>= 1;
389 if(state->bias > -128) state->bias--;
395 if(state->bias < 127) state->bias++;
406 static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){
408 v = fold(v - state->bias, bits);
412 while(i < state->error_sum){ //FIXME optimize
420 if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1);
423 code= v ^ ((2*state->drift + state->count)>>31);
426 av_dlog(NULL, "v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code,
427 state->bias, state->error_sum, state->drift, state->count, k);
428 set_sr_golomb(pb, code, k, 12, bits);
430 update_vlc_state(state, v);
433 static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){
438 while(i < state->error_sum){ //FIXME optimize
445 v= get_sr_golomb(gb, k, 12, bits);
446 av_dlog(NULL, "v:%d bias:%d error:%d drift:%d count:%d k:%d",
447 v, state->bias, state->error_sum, state->drift, state->count, k);
450 if(k==0 && 2*state->drift <= - state->count) v ^= (-1);
452 v ^= ((2*state->drift + state->count)>>31);
455 ret= fold(v + state->bias, bits);
457 update_vlc_state(state, v);
462 #if CONFIG_FFV1_ENCODER
463 static av_always_inline int encode_line(FFV1Context *s, int w,
465 int plane_index, int bits)
467 PlaneContext * const p= &s->plane[plane_index];
468 RangeCoder * const c= &s->c;
470 int run_index= s->run_index;
475 if(c->bytestream_end - c->bytestream < w*20){
476 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
480 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
481 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
489 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
490 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
497 diff= fold(diff, bits);
500 if(s->flags & CODEC_FLAG_PASS1){
501 put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, s->rc_stat2[p->quant_table_index][context]);
503 put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
506 if(context == 0) run_mode=1;
511 while(run_count >= 1<<ff_log2_run[run_index]){
512 run_count -= 1<<ff_log2_run[run_index];
514 put_bits(&s->pb, 1, 1);
517 put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
518 if(run_index) run_index--;
527 av_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
528 run_count, run_index, run_mode, x,
529 (int)put_bits_count(&s->pb));
532 put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
536 while(run_count >= 1<<ff_log2_run[run_index]){
537 run_count -= 1<<ff_log2_run[run_index];
539 put_bits(&s->pb, 1, 1);
543 put_bits(&s->pb, 1, 1);
545 s->run_index= run_index;
550 static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
552 const int ring_size= s->avctx->context_model ? 3 : 2;
556 memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
559 for(i=0; i<ring_size; i++)
560 sample[i]= s->sample_buffer + (w+6)*((h+i-y)%ring_size) + 3;
562 sample[0][-1]= sample[1][0 ];
563 sample[1][ w]= sample[1][w-1];
565 if(s->bits_per_raw_sample<=8){
567 sample[0][x]= src[x + stride*y];
569 encode_line(s, w, sample, plane_index, 8);
571 if(s->packed_at_lsb){
573 sample[0][x]= ((uint16_t*)(src + stride*y))[x];
577 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample);
580 encode_line(s, w, sample, plane_index, s->bits_per_raw_sample);
582 //STOP_TIMER("encode line")}
586 static void encode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, int stride[3]){
588 const int ring_size= s->avctx->context_model ? 3 : 2;
589 int16_t *sample[4][3];
590 int lbd= s->avctx->bits_per_raw_sample <= 8;
591 int bits= s->avctx->bits_per_raw_sample > 0 ? s->avctx->bits_per_raw_sample : 8;
592 int offset= 1 << bits;
595 memset(s->sample_buffer, 0, ring_size*4*(w+6)*sizeof(*s->sample_buffer));
598 for(i=0; i<ring_size; i++)
600 sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
603 int b,g,r,av_uninit(a);
605 unsigned v= *((uint32_t*)(src[0] + x*4 + stride[0]*y));
611 b= *((uint16_t*)(src[0] + x*2 + stride[0]*y));
612 g= *((uint16_t*)(src[1] + x*2 + stride[1]*y));
613 r= *((uint16_t*)(src[2] + x*2 + stride[2]*y));
627 for(p=0; p<3 + s->transparency; p++){
628 sample[p][0][-1]= sample[p][1][0 ];
629 sample[p][1][ w]= sample[p][1][w-1];
631 encode_line(s, w, sample[p], (p+1)/2, 9);
633 encode_line(s, w, sample[p], (p+1)/2, bits+1);
638 static void write_quant_table(RangeCoder *c, int16_t *quant_table){
641 uint8_t state[CONTEXT_SIZE];
642 memset(state, 128, sizeof(state));
644 for(i=1; i<128 ; i++){
645 if(quant_table[i] != quant_table[i-1]){
646 put_symbol(c, state, i-last-1, 0);
650 put_symbol(c, state, i-last-1, 0);
653 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
656 write_quant_table(c, quant_table[i]);
659 static void write_header(FFV1Context *f){
660 uint8_t state[CONTEXT_SIZE];
662 RangeCoder * const c= &f->slice_context[0]->c;
664 memset(state, 128, sizeof(state));
667 put_symbol(c, state, f->version, 0);
668 put_symbol(c, state, f->ac, 0);
670 for(i=1; i<256; i++){
671 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
674 put_symbol(c, state, f->colorspace, 0); //YUV cs type
676 put_symbol(c, state, f->bits_per_raw_sample, 0);
677 put_rac(c, state, f->chroma_planes);
678 put_symbol(c, state, f->chroma_h_shift, 0);
679 put_symbol(c, state, f->chroma_v_shift, 0);
680 put_rac(c, state, f->transparency);
682 write_quant_tables(c, f->quant_table);
683 }else if(f->version < 3){
684 put_symbol(c, state, f->slice_count, 0);
685 for(i=0; i<f->slice_count; i++){
686 FFV1Context *fs= f->slice_context[i];
687 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
688 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
689 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
690 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
691 for(j=0; j<f->plane_count; j++){
692 put_symbol(c, state, f->plane[j].quant_table_index, 0);
693 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
698 #endif /* CONFIG_FFV1_ENCODER */
700 static av_cold int common_init(AVCodecContext *avctx){
701 FFV1Context *s = avctx->priv_data;
703 if(!avctx->width || !avctx->height)
704 return AVERROR_INVALIDDATA;
707 s->flags= avctx->flags;
709 avcodec_get_frame_defaults(&s->picture);
711 ff_dsputil_init(&s->dsp, avctx);
713 s->width = avctx->width;
714 s->height= avctx->height;
724 static int init_slice_state(FFV1Context *f, FFV1Context *fs){
727 fs->plane_count= f->plane_count;
728 fs->transparency= f->transparency;
729 for(j=0; j<f->plane_count; j++){
730 PlaneContext * const p= &fs->plane[j];
733 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
735 return AVERROR(ENOMEM);
737 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState));
739 return AVERROR(ENOMEM);
744 //FIXME only redo if state_transition changed
745 for(j=1; j<256; j++){
746 fs->c.one_state [ j]= f->state_transition[j];
747 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
754 static int init_slices_state(FFV1Context *f){
756 for(i=0; i<f->slice_count; i++){
757 FFV1Context *fs= f->slice_context[i];
758 if(init_slice_state(f, fs) < 0)
764 static av_cold int init_slice_contexts(FFV1Context *f){
767 f->slice_count= f->num_h_slices * f->num_v_slices;
769 for(i=0; i<f->slice_count; i++){
770 FFV1Context *fs= av_mallocz(sizeof(*fs));
771 int sx= i % f->num_h_slices;
772 int sy= i / f->num_h_slices;
773 int sxs= f->avctx->width * sx / f->num_h_slices;
774 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
775 int sys= f->avctx->height* sy / f->num_v_slices;
776 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
777 f->slice_context[i]= fs;
778 memcpy(fs, f, sizeof(*fs));
779 memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
781 fs->slice_width = sxe - sxs;
782 fs->slice_height= sye - sys;
786 fs->sample_buffer = av_malloc(3*4 * (fs->width+6) * sizeof(*fs->sample_buffer));
787 if (!fs->sample_buffer)
788 return AVERROR(ENOMEM);
793 static int allocate_initial_states(FFV1Context *f){
796 for(i=0; i<f->quant_table_count; i++){
797 f->initial_states[i]= av_malloc(f->context_count[i]*sizeof(*f->initial_states[i]));
798 if(!f->initial_states[i])
799 return AVERROR(ENOMEM);
800 memset(f->initial_states[i], 128, f->context_count[i]*sizeof(*f->initial_states[i]));
805 #if CONFIG_FFV1_ENCODER
806 static int write_extra_header(FFV1Context *f){
807 RangeCoder * const c= &f->c;
808 uint8_t state[CONTEXT_SIZE];
810 uint8_t state2[32][CONTEXT_SIZE];
813 memset(state2, 128, sizeof(state2));
814 memset(state, 128, sizeof(state));
816 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000 + (11*11*5*5*5+11*11*11)*32);
817 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
818 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
820 put_symbol(c, state, f->version, 0);
823 f->minor_version = 2;
824 put_symbol(c, state, f->minor_version, 0);
826 put_symbol(c, state, f->ac, 0);
828 for(i=1; i<256; i++){
829 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
832 put_symbol(c, state, f->colorspace, 0); //YUV cs type
833 put_symbol(c, state, f->bits_per_raw_sample, 0);
834 put_rac(c, state, f->chroma_planes);
835 put_symbol(c, state, f->chroma_h_shift, 0);
836 put_symbol(c, state, f->chroma_v_shift, 0);
837 put_rac(c, state, f->transparency);
838 put_symbol(c, state, f->num_h_slices-1, 0);
839 put_symbol(c, state, f->num_v_slices-1, 0);
841 put_symbol(c, state, f->quant_table_count, 0);
842 for(i=0; i<f->quant_table_count; i++)
843 write_quant_tables(c, f->quant_tables[i]);
845 for(i=0; i<f->quant_table_count; i++){
846 for(j=0; j<f->context_count[i]*CONTEXT_SIZE; j++)
847 if(f->initial_states[i] && f->initial_states[i][0][j] != 128)
849 if(j<f->context_count[i]*CONTEXT_SIZE){
850 put_rac(c, state, 1);
851 for(j=0; j<f->context_count[i]; j++){
852 for(k=0; k<CONTEXT_SIZE; k++){
853 int pred= j ? f->initial_states[i][j-1][k] : 128;
854 put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k]-pred), 1);
858 put_rac(c, state, 0);
863 put_symbol(c, state, f->ec, 0);
866 f->avctx->extradata_size= ff_rac_terminate(c);
867 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
868 AV_WL32(f->avctx->extradata + f->avctx->extradata_size, v);
869 f->avctx->extradata_size += 4;
874 static int sort_stt(FFV1Context *s, uint8_t stt[256]){
875 int i,i2,changed,print=0;
879 for(i=12; i<244; i++){
880 for(i2=i+1; i2<245 && i2<i+4; i2++){
881 #define COST(old, new) \
882 s->rc_stat[old][0]*-log2((256-(new))/256.0)\
883 +s->rc_stat[old][1]*-log2( (new) /256.0)
885 #define COST2(old, new) \
887 +COST(256-(old), 256-(new))
889 double size0= COST2(i, i ) + COST2(i2, i2);
890 double sizeX= COST2(i, i2) + COST2(i2, i );
891 if(sizeX < size0 && i!=128 && i2!=128){
893 FFSWAP(int, stt[ i], stt[ i2]);
894 FFSWAP(int, s->rc_stat[i ][0],s->rc_stat[ i2][0]);
895 FFSWAP(int, s->rc_stat[i ][1],s->rc_stat[ i2][1]);
897 FFSWAP(int, stt[256-i], stt[256-i2]);
898 FFSWAP(int, s->rc_stat[256-i][0],s->rc_stat[256-i2][0]);
899 FFSWAP(int, s->rc_stat[256-i][1],s->rc_stat[256-i2][1]);
901 for(j=1; j<256; j++){
902 if (stt[j] == i ) stt[j] = i2;
903 else if(stt[j] == i2) stt[j] = i ;
905 if (stt[256-j] == 256-i ) stt[256-j] = 256-i2;
906 else if(stt[256-j] == 256-i2) stt[256-j] = 256-i ;
917 static av_cold int encode_init(AVCodecContext *avctx)
919 FFV1Context *s = avctx->priv_data;
926 if((avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) || avctx->slices>1)
927 s->version = FFMAX(s->version, 2);
929 if(avctx->level == 3){
934 s->ec = (s->version >= 3);
937 if(s->version >= 2 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
938 av_log(avctx, AV_LOG_ERROR, "Version 2 needed for requested features but version 2 is experimental and not enabled\n");
939 return AVERROR_INVALIDDATA;
942 s->ac= avctx->coder_type > 0 ? 2 : 0;
945 switch(avctx->pix_fmt){
946 case AV_PIX_FMT_YUV444P9:
947 case AV_PIX_FMT_YUV422P9:
948 case AV_PIX_FMT_YUV420P9:
949 if (!avctx->bits_per_raw_sample)
950 s->bits_per_raw_sample = 9;
951 case AV_PIX_FMT_YUV444P10:
952 case AV_PIX_FMT_YUV420P10:
953 case AV_PIX_FMT_YUV422P10:
954 s->packed_at_lsb = 1;
955 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
956 s->bits_per_raw_sample = 10;
957 case AV_PIX_FMT_GRAY16:
958 case AV_PIX_FMT_YUV444P16:
959 case AV_PIX_FMT_YUV422P16:
960 case AV_PIX_FMT_YUV420P16:
961 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) {
962 s->bits_per_raw_sample = 16;
963 } else if (!s->bits_per_raw_sample){
964 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
966 if(s->bits_per_raw_sample <=8){
967 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
968 return AVERROR_INVALIDDATA;
970 if(!s->ac && avctx->coder_type == -1) {
971 av_log(avctx, AV_LOG_INFO, "bits_per_raw_sample > 8, forcing coder 1\n");
975 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
976 return AVERROR_INVALIDDATA;
978 s->version= FFMAX(s->version, 1);
979 case AV_PIX_FMT_GRAY8:
980 case AV_PIX_FMT_YUV444P:
981 case AV_PIX_FMT_YUV440P:
982 case AV_PIX_FMT_YUV422P:
983 case AV_PIX_FMT_YUV420P:
984 case AV_PIX_FMT_YUV411P:
985 case AV_PIX_FMT_YUV410P:
986 s->chroma_planes= av_pix_fmt_descriptors[avctx->pix_fmt].nb_components < 3 ? 0 : 1;
989 case AV_PIX_FMT_YUVA444P:
990 case AV_PIX_FMT_YUVA422P:
991 case AV_PIX_FMT_YUVA420P:
996 case AV_PIX_FMT_RGB32:
1000 case AV_PIX_FMT_0RGB32:
1003 case AV_PIX_FMT_GBRP9:
1004 if (!avctx->bits_per_raw_sample)
1005 s->bits_per_raw_sample = 9;
1006 case AV_PIX_FMT_GBRP10:
1007 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
1008 s->bits_per_raw_sample = 10;
1009 case AV_PIX_FMT_GBRP12:
1010 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
1011 s->bits_per_raw_sample = 12;
1012 case AV_PIX_FMT_GBRP14:
1013 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
1014 s->bits_per_raw_sample = 14;
1015 else if (!s->bits_per_raw_sample)
1016 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
1018 s->chroma_planes= 1;
1019 s->version= FFMAX(s->version, 1);
1022 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
1023 return AVERROR_INVALIDDATA;
1025 if (s->transparency) {
1026 av_log(avctx, AV_LOG_WARNING, "Storing alpha plane, this will require a recent FFV1 decoder to playback!\n");
1028 if (avctx->context_model > 1U) {
1029 av_log(avctx, AV_LOG_ERROR, "Invalid context model %d, valid values are 0 and 1\n", avctx->context_model);
1030 return AVERROR(EINVAL);
1034 for(i=1; i<256; i++)
1035 s->state_transition[i]=ver2_state[i];
1037 for(i=0; i<256; i++){
1038 s->quant_table_count=2;
1039 if(s->bits_per_raw_sample <=8){
1040 s->quant_tables[0][0][i]= quant11[i];
1041 s->quant_tables[0][1][i]= 11*quant11[i];
1042 s->quant_tables[0][2][i]= 11*11*quant11[i];
1043 s->quant_tables[1][0][i]= quant11[i];
1044 s->quant_tables[1][1][i]= 11*quant11[i];
1045 s->quant_tables[1][2][i]= 11*11*quant5 [i];
1046 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
1047 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
1049 s->quant_tables[0][0][i]= quant9_10bit[i];
1050 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
1051 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
1052 s->quant_tables[1][0][i]= quant9_10bit[i];
1053 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
1054 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
1055 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
1056 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
1059 s->context_count[0]= (11*11*11+1)/2;
1060 s->context_count[1]= (11*11*5*5*5+1)/2;
1061 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table));
1063 for(i=0; i<s->plane_count; i++){
1064 PlaneContext * const p= &s->plane[i];
1066 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
1067 p->quant_table_index= avctx->context_model;
1068 p->context_count= s->context_count[p->quant_table_index];
1071 if(allocate_initial_states(s) < 0)
1072 return AVERROR(ENOMEM);
1074 avctx->coded_frame= &s->picture;
1075 if(!s->transparency)
1077 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
1079 s->picture_number=0;
1081 if(avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
1082 for(i=0; i<s->quant_table_count; i++){
1083 s->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*s->rc_stat2[i]));
1085 return AVERROR(ENOMEM);
1088 if(avctx->stats_in){
1089 char *p= avctx->stats_in;
1090 uint8_t best_state[256][256];
1094 av_assert0(s->version>=2);
1097 for(j=0; j<256; j++){
1099 s->rc_stat[j][i]= strtol(p, &next, 0);
1101 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d [%s]\n", j,i,p);
1107 for(i=0; i<s->quant_table_count; i++){
1108 for(j=0; j<s->context_count[i]; j++){
1109 for(k=0; k<32; k++){
1111 s->rc_stat2[i][j][k][m]= strtol(p, &next, 0);
1113 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d %d %d [%s]\n", i,j,k,m,p);
1114 return AVERROR_INVALIDDATA;
1121 gob_count= strtol(p, &next, 0);
1122 if(next==p || gob_count <0){
1123 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
1124 return AVERROR_INVALIDDATA;
1127 while(*p=='\n' || *p==' ') p++;
1130 sort_stt(s, s->state_transition);
1132 find_best_state(best_state, s->state_transition);
1134 for(i=0; i<s->quant_table_count; i++){
1135 for(j=0; j<s->context_count[i]; j++){
1136 for(k=0; k<32; k++){
1138 if(s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]){
1139 p=256.0*s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]);
1141 s->initial_states[i][j][k]= best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1])/gob_count, 0, 255)];
1148 for(s->num_v_slices=2; s->num_v_slices<9; s->num_v_slices++){
1149 for(s->num_h_slices=s->num_v_slices; s->num_h_slices<2*s->num_v_slices; s->num_h_slices++){
1150 if(avctx->slices == s->num_h_slices * s->num_v_slices && avctx->slices <= 64 || !avctx->slices)
1154 av_log(avctx, AV_LOG_ERROR, "Unsupported number %d of slices requested, please specify a supported number with -slices (ex:4,6,9,12,16, ...)\n", avctx->slices);
1157 write_extra_header(s);
1160 if(init_slice_contexts(s) < 0)
1162 if(init_slices_state(s) < 0)
1165 #define STATS_OUT_SIZE 1024*1024*6
1166 if(avctx->flags & CODEC_FLAG_PASS1){
1167 avctx->stats_out= av_mallocz(STATS_OUT_SIZE);
1168 for(i=0; i<s->quant_table_count; i++){
1169 for(j=0; j<s->slice_count; j++){
1170 FFV1Context *sf= s->slice_context[j];
1171 av_assert0(!sf->rc_stat2[i]);
1172 sf->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*sf->rc_stat2[i]));
1173 if(!sf->rc_stat2[i])
1174 return AVERROR(ENOMEM);
1181 #endif /* CONFIG_FFV1_ENCODER */
1184 static void clear_slice_state(FFV1Context *f, FFV1Context *fs){
1187 for(i=0; i<f->plane_count; i++){
1188 PlaneContext *p= &fs->plane[i];
1190 p->interlace_bit_state[0]= 128;
1191 p->interlace_bit_state[1]= 128;
1194 if(f->initial_states[p->quant_table_index]){
1195 memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE*p->context_count);
1197 memset(p->state, 128, CONTEXT_SIZE*p->context_count);
1199 for(j=0; j<p->context_count; j++){
1200 p->vlc_state[j].drift= 0;
1201 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
1202 p->vlc_state[j].bias= 0;
1203 p->vlc_state[j].count= 1;
1209 #if CONFIG_FFV1_ENCODER
1211 static void encode_slice_header(FFV1Context *f, FFV1Context *fs){
1212 RangeCoder *c = &fs->c;
1213 uint8_t state[CONTEXT_SIZE];
1215 memset(state, 128, sizeof(state));
1217 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
1218 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
1219 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
1220 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
1221 for(j=0; j<f->plane_count; j++){
1222 put_symbol(c, state, f->plane[j].quant_table_index, 0);
1223 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
1225 if(!f->picture.interlaced_frame) put_symbol(c, state, 3, 0);
1226 else put_symbol(c, state, 1 + !f->picture.top_field_first, 0);
1227 put_symbol(c, state, f->picture.sample_aspect_ratio.num, 0);
1228 put_symbol(c, state, f->picture.sample_aspect_ratio.den, 0);
1231 static int encode_slice(AVCodecContext *c, void *arg){
1232 FFV1Context *fs= *(void**)arg;
1233 FFV1Context *f= fs->avctx->priv_data;
1234 int width = fs->slice_width;
1235 int height= fs->slice_height;
1238 AVFrame * const p= &f->picture;
1239 const int ps= (f->bits_per_raw_sample>8)+1;
1242 clear_slice_state(f, fs);
1244 encode_slice_header(f, fs);
1248 put_rac(&fs->c, (uint8_t[]){129}, 0);
1249 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? ff_rac_terminate(&fs->c) : 0;
1250 init_put_bits(&fs->pb, fs->c.bytestream_start + fs->ac_byte_count, fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count);
1253 if(f->colorspace==0){
1254 const int chroma_width = -((-width )>>f->chroma_h_shift);
1255 const int chroma_height= -((-height)>>f->chroma_v_shift);
1256 const int cx= x>>f->chroma_h_shift;
1257 const int cy= y>>f->chroma_v_shift;
1259 encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1261 if (f->chroma_planes){
1262 encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1263 encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1265 if (fs->transparency)
1266 encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1268 uint8_t *planes[3] = {p->data[0] + ps*x + y*p->linesize[0],
1269 p->data[1] + ps*x + y*p->linesize[1],
1270 p->data[2] + ps*x + y*p->linesize[2]};
1271 encode_rgb_frame(fs, planes, width, height, p->linesize);
1278 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1279 const AVFrame *pict, int *got_packet)
1281 FFV1Context *f = avctx->priv_data;
1282 RangeCoder * const c= &f->slice_context[0]->c;
1283 AVFrame * const p= &f->picture;
1285 uint8_t keystate=128;
1289 if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*((8*2+1+1)*4)/8
1290 + FF_MIN_BUFFER_SIZE)) < 0)
1293 ff_init_range_encoder(c, pkt->data, pkt->size);
1294 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1297 p->pict_type= AV_PICTURE_TYPE_I;
1299 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
1300 put_rac(c, &keystate, 1);
1305 put_rac(c, &keystate, 0);
1311 for(i=1; i<256; i++){
1312 c->one_state[i]= f->state_transition[i];
1313 c->zero_state[256-i]= 256-c->one_state[i];
1317 for(i=1; i<f->slice_count; i++){
1318 FFV1Context *fs= f->slice_context[i];
1319 uint8_t *start = pkt->data + (pkt->size-used_count)*(int64_t)i/f->slice_count;
1320 int len = pkt->size/f->slice_count;
1321 ff_init_range_encoder(&fs->c, start, len);
1323 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1326 for(i=0; i<f->slice_count; i++){
1327 FFV1Context *fs= f->slice_context[i];
1332 put_rac(&fs->c, &state, 0);
1333 bytes= ff_rac_terminate(&fs->c);
1335 flush_put_bits(&fs->pb); //nicer padding FIXME
1336 bytes= fs->ac_byte_count + (put_bits_count(&fs->pb)+7)/8;
1338 if(i>0 || f->version>2){
1339 av_assert0(bytes < pkt->size/f->slice_count);
1340 memmove(buf_p, fs->c.bytestream_start, bytes);
1341 av_assert0(bytes < (1<<24));
1342 AV_WB24(buf_p+bytes, bytes);
1348 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, bytes);
1349 AV_WL32(buf_p + bytes, v); bytes += 4;
1354 if((avctx->flags&CODEC_FLAG_PASS1) && (f->picture_number&31)==0){
1356 char *p= avctx->stats_out;
1357 char *end= p + STATS_OUT_SIZE;
1359 memset(f->rc_stat, 0, sizeof(f->rc_stat));
1360 for(i=0; i<f->quant_table_count; i++)
1361 memset(f->rc_stat2[i], 0, f->context_count[i]*sizeof(*f->rc_stat2[i]));
1363 for(j=0; j<f->slice_count; j++){
1364 FFV1Context *fs= f->slice_context[j];
1365 for(i=0; i<256; i++){
1366 f->rc_stat[i][0] += fs->rc_stat[i][0];
1367 f->rc_stat[i][1] += fs->rc_stat[i][1];
1369 for(i=0; i<f->quant_table_count; i++){
1370 for(k=0; k<f->context_count[i]; k++){
1371 for(m=0; m<32; m++){
1372 f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
1373 f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
1379 for(j=0; j<256; j++){
1380 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat[j][0], f->rc_stat[j][1]);
1383 snprintf(p, end-p, "\n");
1385 for(i=0; i<f->quant_table_count; i++){
1386 for(j=0; j<f->context_count[i]; j++){
1387 for(m=0; m<32; m++){
1388 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
1393 snprintf(p, end-p, "%d\n", f->gob_count);
1394 } else if(avctx->flags&CODEC_FLAG_PASS1)
1395 avctx->stats_out[0] = '\0';
1397 f->picture_number++;
1398 pkt->size = buf_p - pkt->data;
1399 pkt->flags |= AV_PKT_FLAG_KEY*p->key_frame;
1404 #endif /* CONFIG_FFV1_ENCODER */
1406 static av_cold int common_end(AVCodecContext *avctx){
1407 FFV1Context *s = avctx->priv_data;
1410 if (avctx->codec->decode && s->picture.data[0])
1411 avctx->release_buffer(avctx, &s->picture);
1412 if (avctx->codec->decode && s->last_picture.data[0])
1413 avctx->release_buffer(avctx, &s->last_picture);
1415 for(j=0; j<s->slice_count; j++){
1416 FFV1Context *fs= s->slice_context[j];
1417 for(i=0; i<s->plane_count; i++){
1418 PlaneContext *p= &fs->plane[i];
1420 av_freep(&p->state);
1421 av_freep(&p->vlc_state);
1423 av_freep(&fs->sample_buffer);
1426 av_freep(&avctx->stats_out);
1427 for(j=0; j<s->quant_table_count; j++){
1428 av_freep(&s->initial_states[j]);
1429 for(i=0; i<s->slice_count; i++){
1430 FFV1Context *sf= s->slice_context[i];
1431 av_freep(&sf->rc_stat2[j]);
1433 av_freep(&s->rc_stat2[j]);
1436 for(i=0; i<s->slice_count; i++){
1437 av_freep(&s->slice_context[i]);
1443 static av_always_inline void decode_line(FFV1Context *s, int w,
1445 int plane_index, int bits)
1447 PlaneContext * const p= &s->plane[plane_index];
1448 RangeCoder * const c= &s->c;
1452 int run_index= s->run_index;
1455 int diff, context, sign;
1457 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
1464 av_assert2(context < p->context_count);
1467 diff= get_symbol_inline(c, p->state[context], 1);
1469 if(context == 0 && run_mode==0) run_mode=1;
1472 if(run_count==0 && run_mode==1){
1473 if(get_bits1(&s->gb)){
1474 run_count = 1<<ff_log2_run[run_index];
1475 if(x + run_count <= w) run_index++;
1477 if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]);
1479 if(run_index) run_index--;
1487 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1492 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1494 av_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
1495 run_count, run_index, run_mode, x, get_bits_count(&s->gb));
1498 if(sign) diff= -diff;
1500 sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1);
1502 s->run_index= run_index;
1505 static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
1508 sample[0]=s->sample_buffer +3;
1509 sample[1]=s->sample_buffer+w+6+3;
1513 memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
1516 int16_t *temp = sample[0]; //FIXME try a normal buffer
1518 sample[0]= sample[1];
1521 sample[1][-1]= sample[0][0 ];
1522 sample[0][ w]= sample[0][w-1];
1525 if(s->avctx->bits_per_raw_sample <= 8){
1526 decode_line(s, w, sample, plane_index, 8);
1528 src[x + stride*y]= sample[1][x];
1531 decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
1532 if(s->packed_at_lsb){
1534 ((uint16_t*)(src + stride*y))[x]= sample[1][x];
1538 ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
1542 //STOP_TIMER("decode-line")}
1546 static void decode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, int stride[3]){
1548 int16_t *sample[4][2];
1549 int lbd= s->avctx->bits_per_raw_sample <= 8;
1550 int bits= s->avctx->bits_per_raw_sample > 0 ? s->avctx->bits_per_raw_sample : 8;
1551 int offset= 1 << bits;
1553 sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
1554 sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
1559 memset(s->sample_buffer, 0, 8*(w+6)*sizeof(*s->sample_buffer));
1562 for(p=0; p<3 + s->transparency; p++){
1563 int16_t *temp = sample[p][0]; //FIXME try a normal buffer
1565 sample[p][0]= sample[p][1];
1568 sample[p][1][-1]= sample[p][0][0 ];
1569 sample[p][0][ w]= sample[p][0][w-1];
1571 decode_line(s, w, sample[p], (p+1)/2, 9);
1573 decode_line(s, w, sample[p], (p+1)/2, bits+1);
1576 int g= sample[0][1][x];
1577 int b= sample[1][1][x];
1578 int r= sample[2][1][x];
1579 int a= sample[3][1][x];
1588 *((uint32_t*)(src[0] + x*4 + stride[0]*y))= b + (g<<8) + (r<<16) + (a<<24);
1590 *((uint16_t*)(src[0] + x*2 + stride[0]*y)) = b;
1591 *((uint16_t*)(src[1] + x*2 + stride[1]*y)) = g;
1592 *((uint16_t*)(src[2] + x*2 + stride[2]*y)) = r;
1598 static int decode_slice_header(FFV1Context *f, FFV1Context *fs){
1599 RangeCoder *c = &fs->c;
1600 uint8_t state[CONTEXT_SIZE];
1601 unsigned ps, i, context_count;
1602 memset(state, 128, sizeof(state));
1604 av_assert0(f->version > 2);
1606 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1607 fs->slice_y = get_symbol(c, state, 0) *f->height;
1608 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1609 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1611 fs->slice_x /= f->num_h_slices;
1612 fs->slice_y /= f->num_v_slices;
1613 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1614 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1615 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1617 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1618 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1621 for(i=0; i<f->plane_count; i++){
1622 PlaneContext * const p= &fs->plane[i];
1623 int idx=get_symbol(c, state, 0);
1624 if(idx > (unsigned)f->quant_table_count){
1625 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1628 p->quant_table_index= idx;
1629 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1630 context_count= f->context_count[idx];
1632 if(p->context_count < context_count){
1633 av_freep(&p->state);
1634 av_freep(&p->vlc_state);
1636 p->context_count= context_count;
1639 ps = get_symbol(c, state, 0);
1641 f->picture.interlaced_frame = 1;
1642 f->picture.top_field_first = 1;
1644 f->picture.interlaced_frame = 1;
1645 f->picture.top_field_first = 0;
1647 f->picture.interlaced_frame = 0;
1649 f->picture.sample_aspect_ratio.num = get_symbol(c, state, 0);
1650 f->picture.sample_aspect_ratio.den = get_symbol(c, state, 0);
1655 static int decode_slice(AVCodecContext *c, void *arg){
1656 FFV1Context *fs= *(void**)arg;
1657 FFV1Context *f= fs->avctx->priv_data;
1658 int width, height, x, y;
1659 const int ps= (c->bits_per_raw_sample>8)+1;
1660 AVFrame * const p= &f->picture;
1663 if(init_slice_state(f, fs) < 0)
1664 return AVERROR(ENOMEM);
1665 if(decode_slice_header(f, fs) < 0) {
1666 fs->slice_damaged = 1;
1667 return AVERROR_INVALIDDATA;
1670 if(init_slice_state(f, fs) < 0)
1671 return AVERROR(ENOMEM);
1672 if(f->picture.key_frame)
1673 clear_slice_state(f, fs);
1674 width = fs->slice_width;
1675 height= fs->slice_height;
1680 if (f->version == 3 && f->minor_version > 1 || f->version > 3)
1681 get_rac(&fs->c, (uint8_t[]){129});
1682 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? fs->c.bytestream - fs->c.bytestream_start - 1 : 0;
1683 init_get_bits(&fs->gb,
1684 fs->c.bytestream_start + fs->ac_byte_count,
1685 (fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count) * 8);
1688 av_assert1(width && height);
1689 if(f->colorspace==0){
1690 const int chroma_width = -((-width )>>f->chroma_h_shift);
1691 const int chroma_height= -((-height)>>f->chroma_v_shift);
1692 const int cx= x>>f->chroma_h_shift;
1693 const int cy= y>>f->chroma_v_shift;
1694 decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1696 if (f->chroma_planes){
1697 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1698 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1700 if (fs->transparency)
1701 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1703 uint8_t *planes[3] = {p->data[0] + ps*x + y*p->linesize[0],
1704 p->data[1] + ps*x + y*p->linesize[1],
1705 p->data[2] + ps*x + y*p->linesize[2]};
1706 decode_rgb_frame(fs, planes, width, height, p->linesize);
1708 if(fs->ac && f->version > 2) {
1710 get_rac(&fs->c, (uint8_t[]){129});
1711 v = fs->c.bytestream_end - fs->c.bytestream - 2 - 5*f->ec;
1713 av_log(f->avctx, AV_LOG_ERROR, "bytestream end mismatching by %d\n", v);
1714 fs->slice_damaged = 1;
1723 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
1726 uint8_t state[CONTEXT_SIZE];
1728 memset(state, 128, sizeof(state));
1730 for(v=0; i<128 ; v++){
1731 unsigned len= get_symbol(c, state, 0) + 1;
1733 if(len > 128 - i) return -1;
1736 quant_table[i] = scale*v;
1741 for(i=1; i<128; i++){
1742 quant_table[256-i]= -quant_table[i];
1744 quant_table[128]= -quant_table[127];
1749 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
1751 int context_count=1;
1754 context_count*= read_quant_table(c, quant_table[i], context_count);
1755 if(context_count > 32768U){
1759 return (context_count+1)/2;
1762 static int read_extra_header(FFV1Context *f){
1763 RangeCoder * const c= &f->c;
1764 uint8_t state[CONTEXT_SIZE];
1766 uint8_t state2[32][CONTEXT_SIZE];
1768 memset(state2, 128, sizeof(state2));
1769 memset(state, 128, sizeof(state));
1771 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1772 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1774 f->version= get_symbol(c, state, 0);
1775 if(f->version > 2) {
1776 c->bytestream_end -= 4;
1777 f->minor_version= get_symbol(c, state, 0);
1779 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1781 for(i=1; i<256; i++){
1782 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1785 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1786 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1787 f->chroma_planes= get_rac(c, state);
1788 f->chroma_h_shift= get_symbol(c, state, 0);
1789 f->chroma_v_shift= get_symbol(c, state, 0);
1790 f->transparency= get_rac(c, state);
1791 f->plane_count= 2 + f->transparency;
1792 f->num_h_slices= 1 + get_symbol(c, state, 0);
1793 f->num_v_slices= 1 + get_symbol(c, state, 0);
1794 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->height){
1795 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1799 f->quant_table_count= get_symbol(c, state, 0);
1800 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1802 for(i=0; i<f->quant_table_count; i++){
1803 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1804 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1809 if(allocate_initial_states(f) < 0)
1810 return AVERROR(ENOMEM);
1812 for(i=0; i<f->quant_table_count; i++){
1813 if(get_rac(c, state)){
1814 for(j=0; j<f->context_count[i]; j++){
1815 for(k=0; k<CONTEXT_SIZE; k++){
1816 int pred= j ? f->initial_states[i][j-1][k] : 128;
1817 f->initial_states[i][j][k]= (pred+get_symbol(c, state2[k], 1))&0xFF;
1824 f->ec = get_symbol(c, state, 0);
1829 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
1831 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v);
1832 return AVERROR_INVALIDDATA;
1839 static int read_header(FFV1Context *f){
1840 uint8_t state[CONTEXT_SIZE];
1841 int i, j, context_count = -1; //-1 to avoid warning
1842 RangeCoder * const c= &f->slice_context[0]->c;
1844 memset(state, 128, sizeof(state));
1847 unsigned v= get_symbol(c, state, 0);
1849 av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
1850 return AVERROR_INVALIDDATA;
1853 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1855 for(i=1; i<256; i++){
1856 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1859 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1861 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1862 f->chroma_planes= get_rac(c, state);
1863 f->chroma_h_shift= get_symbol(c, state, 0);
1864 f->chroma_v_shift= get_symbol(c, state, 0);
1865 f->transparency= get_rac(c, state);
1866 f->plane_count= 2 + f->transparency;
1869 if(f->colorspace==0){
1870 if(!f->transparency && !f->chroma_planes){
1871 if (f->avctx->bits_per_raw_sample<=8)
1872 f->avctx->pix_fmt= AV_PIX_FMT_GRAY8;
1874 f->avctx->pix_fmt= AV_PIX_FMT_GRAY16;
1875 }else if(f->avctx->bits_per_raw_sample<=8 && !f->transparency){
1876 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1877 case 0x00: f->avctx->pix_fmt= AV_PIX_FMT_YUV444P; break;
1878 case 0x01: f->avctx->pix_fmt= AV_PIX_FMT_YUV440P; break;
1879 case 0x10: f->avctx->pix_fmt= AV_PIX_FMT_YUV422P; break;
1880 case 0x11: f->avctx->pix_fmt= AV_PIX_FMT_YUV420P; break;
1881 case 0x20: f->avctx->pix_fmt= AV_PIX_FMT_YUV411P; break;
1882 case 0x22: f->avctx->pix_fmt= AV_PIX_FMT_YUV410P; break;
1884 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1887 }else if(f->avctx->bits_per_raw_sample<=8 && f->transparency){
1888 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1889 case 0x00: f->avctx->pix_fmt= AV_PIX_FMT_YUVA444P; break;
1890 case 0x10: f->avctx->pix_fmt= AV_PIX_FMT_YUVA422P; break;
1891 case 0x11: f->avctx->pix_fmt= AV_PIX_FMT_YUVA420P; break;
1893 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1896 }else if(f->avctx->bits_per_raw_sample==9) {
1898 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1899 case 0x00: f->avctx->pix_fmt= AV_PIX_FMT_YUV444P9; break;
1900 case 0x10: f->avctx->pix_fmt= AV_PIX_FMT_YUV422P9; break;
1901 case 0x11: f->avctx->pix_fmt= AV_PIX_FMT_YUV420P9; break;
1903 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1906 }else if(f->avctx->bits_per_raw_sample==10) {
1908 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1909 case 0x00: f->avctx->pix_fmt= AV_PIX_FMT_YUV444P10; break;
1910 case 0x10: f->avctx->pix_fmt= AV_PIX_FMT_YUV422P10; break;
1911 case 0x11: f->avctx->pix_fmt= AV_PIX_FMT_YUV420P10; break;
1913 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1917 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1918 case 0x00: f->avctx->pix_fmt= AV_PIX_FMT_YUV444P16; break;
1919 case 0x10: f->avctx->pix_fmt= AV_PIX_FMT_YUV422P16; break;
1920 case 0x11: f->avctx->pix_fmt= AV_PIX_FMT_YUV420P16; break;
1922 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1926 }else if(f->colorspace==1){
1927 if(f->chroma_h_shift || f->chroma_v_shift){
1928 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1931 if(f->avctx->bits_per_raw_sample==9)
1932 f->avctx->pix_fmt= AV_PIX_FMT_GBRP9;
1933 else if(f->avctx->bits_per_raw_sample==10)
1934 f->avctx->pix_fmt= AV_PIX_FMT_GBRP10;
1935 else if(f->avctx->bits_per_raw_sample==12)
1936 f->avctx->pix_fmt= AV_PIX_FMT_GBRP12;
1937 else if(f->avctx->bits_per_raw_sample==14)
1938 f->avctx->pix_fmt= AV_PIX_FMT_GBRP14;
1940 if(f->transparency) f->avctx->pix_fmt= AV_PIX_FMT_RGB32;
1941 else f->avctx->pix_fmt= AV_PIX_FMT_0RGB32;
1943 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1947 av_dlog(f->avctx, "%d %d %d\n",
1948 f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
1950 context_count= read_quant_tables(c, f->quant_table);
1951 if(context_count < 0){
1952 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1955 }else if(f->version < 3){
1956 f->slice_count= get_symbol(c, state, 0);
1958 const uint8_t *p= c->bytestream_end;
1959 for(f->slice_count = 0; f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start; f->slice_count++){
1960 int trailer = 3 + 5*!!f->ec;
1961 int size = AV_RB24(p-trailer);
1962 if(size + trailer > p - c->bytestream_start)
1964 p -= size + trailer;
1967 if(f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0){
1968 av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
1972 for(j=0; j<f->slice_count; j++){
1973 FFV1Context *fs= f->slice_context[j];
1975 fs->packed_at_lsb= f->packed_at_lsb;
1977 fs->slice_damaged = 0;
1979 if(f->version == 2){
1980 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1981 fs->slice_y = get_symbol(c, state, 0) *f->height;
1982 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1983 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1985 fs->slice_x /= f->num_h_slices;
1986 fs->slice_y /= f->num_v_slices;
1987 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1988 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1989 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1991 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1992 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1996 for(i=0; i<f->plane_count; i++){
1997 PlaneContext * const p= &fs->plane[i];
1999 if(f->version == 2){
2000 int idx=get_symbol(c, state, 0);
2001 if(idx > (unsigned)f->quant_table_count){
2002 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
2005 p->quant_table_index= idx;
2006 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
2007 context_count= f->context_count[idx];
2009 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
2012 if(f->version <= 2){
2013 av_assert0(context_count>=0);
2014 if(p->context_count < context_count){
2015 av_freep(&p->state);
2016 av_freep(&p->vlc_state);
2018 p->context_count= context_count;
2025 static av_cold int decode_init(AVCodecContext *avctx)
2027 FFV1Context *f = avctx->priv_data;
2031 if(avctx->extradata && read_extra_header(f) < 0)
2034 if(init_slice_contexts(f) < 0)
2040 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
2041 const uint8_t *buf = avpkt->data;
2042 int buf_size = avpkt->size;
2043 FFV1Context *f = avctx->priv_data;
2044 RangeCoder * const c= &f->slice_context[0]->c;
2045 AVFrame * const p= &f->picture;
2047 uint8_t keystate= 128;
2048 const uint8_t *buf_p;
2050 AVFrame *picture = data;
2052 /* release previously stored data */
2054 avctx->release_buffer(avctx, p);
2056 ff_init_range_decoder(c, buf, buf_size);
2057 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
2060 p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
2061 if(get_rac(c, &keystate)){
2063 f->key_frame_ok = 0;
2064 if(read_header(f) < 0)
2066 f->key_frame_ok = 1;
2068 if (!f->key_frame_ok) {
2069 av_log(avctx, AV_LOG_ERROR, "Cant decode non keyframe without valid keyframe\n");
2070 return AVERROR_INVALIDDATA;
2075 p->reference= 3; //for error concealment
2076 if(avctx->get_buffer(avctx, p) < 0){
2077 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
2081 if(avctx->debug&FF_DEBUG_PICT_INFO)
2082 av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d bps:%d\n",
2083 f->version, p->key_frame, f->ac, f->ec, f->slice_count, f->avctx->bits_per_raw_sample);
2085 buf_p= buf + buf_size;
2086 for(i=f->slice_count-1; i>=0; i--){
2087 FFV1Context *fs= f->slice_context[i];
2088 int trailer = 3 + 5*!!f->ec;
2091 if(i || f->version>2) v = AV_RB24(buf_p-trailer)+trailer;
2092 else v = buf_p - c->bytestream_start;
2093 if(buf_p - c->bytestream_start < v){
2094 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
2100 unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, v);
2102 int64_t ts = avpkt->pts != AV_NOPTS_VALUE ? avpkt->pts : avpkt->dts;
2103 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!", crc);
2104 if(ts != AV_NOPTS_VALUE && avctx->pkt_timebase.num) {
2105 av_log(f->avctx, AV_LOG_ERROR, "at %f seconds\n",ts*av_q2d(avctx->pkt_timebase));
2106 } else if(ts != AV_NOPTS_VALUE) {
2107 av_log(f->avctx, AV_LOG_ERROR, "at %"PRId64"\n", ts);
2109 av_log(f->avctx, AV_LOG_ERROR, "\n");
2111 fs->slice_damaged = 1;
2116 ff_init_range_decoder(&fs->c, buf_p, v);
2118 fs->c.bytestream_end = (uint8_t *)(buf_p + v);
2121 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
2123 for(i=f->slice_count-1; i>=0; i--){
2124 FFV1Context *fs= f->slice_context[i];
2126 if(fs->slice_damaged && f->last_picture.data[0]){
2127 uint8_t *dst[4], *src[4];
2129 int sh = (j==1 || j==2) ? f->chroma_h_shift : 0;
2130 int sv = (j==1 || j==2) ? f->chroma_v_shift : 0;
2131 dst[j] = f->picture .data[j] + f->picture .linesize[j]*
2132 (fs->slice_y>>sv) + (fs->slice_x>>sh);
2133 src[j] = f->last_picture.data[j] + f->last_picture.linesize[j]*
2134 (fs->slice_y>>sv) + (fs->slice_x>>sh);
2136 av_image_copy(dst, f->picture.linesize, (const uint8_t **)src, f->last_picture.linesize,
2137 avctx->pix_fmt, fs->slice_width, fs->slice_height);
2141 f->picture_number++;
2144 *data_size = sizeof(AVFrame);
2146 FFSWAP(AVFrame, f->picture, f->last_picture);
2151 AVCodec ff_ffv1_decoder = {
2153 .type = AVMEDIA_TYPE_VIDEO,
2154 .id = AV_CODEC_ID_FFV1,
2155 .priv_data_size = sizeof(FFV1Context),
2156 .init = decode_init,
2157 .close = common_end,
2158 .decode = decode_frame,
2159 .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
2160 CODEC_CAP_SLICE_THREADS,
2161 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2164 #if CONFIG_FFV1_ENCODER
2166 #define OFFSET(x) offsetof(FFV1Context, x)
2167 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2168 static const AVOption options[] = {
2169 { "slicecrc", "Protect slices with CRCs", OFFSET(ec), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, VE},
2173 static const AVClass class = {
2174 .class_name = "ffv1 encoder",
2175 .item_name = av_default_item_name,
2177 .version = LIBAVUTIL_VERSION_INT,
2180 static const AVCodecDefault ffv1_defaults[] = {
2185 AVCodec ff_ffv1_encoder = {
2187 .type = AVMEDIA_TYPE_VIDEO,
2188 .id = AV_CODEC_ID_FFV1,
2189 .priv_data_size = sizeof(FFV1Context),
2190 .init = encode_init,
2191 .encode2 = encode_frame,
2192 .close = common_end,
2193 .capabilities = CODEC_CAP_SLICE_THREADS,
2194 .defaults = ffv1_defaults,
2195 .pix_fmts = (const enum AVPixelFormat[]){
2196 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV444P,
2197 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
2198 AV_PIX_FMT_YUV410P, AV_PIX_FMT_0RGB32, AV_PIX_FMT_RGB32, AV_PIX_FMT_YUV420P16,
2199 AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9,
2200 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
2201 AV_PIX_FMT_GRAY16, AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
2202 AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14,
2205 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2206 .priv_class = &class,