2 * FFV1 codec for libavcodec
4 * Copyright (c) 2003-2012 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * FF Video Codec 1 (a lossless codec)
33 #include "rangecoder.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/crc.h"
39 #include "libavutil/opt.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/timer.h"
43 #ifdef __INTEL_COMPILER
49 #define CONTEXT_SIZE 32
51 #define MAX_QUANT_TABLES 8
52 #define MAX_CONTEXT_INPUTS 5
54 extern const uint8_t ff_log2_run[41];
56 static const int8_t quant5_10bit[256]={
57 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
58 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
59 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
60 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
61 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
62 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
63 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
64 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
65 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
66 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
67 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
68 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
69 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,
70 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
71 -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
72 -1,-1,-1,-1,-1,-1,-0,-0,-0,-0,-0,-0,-0,-0,-0,-0,
75 static const int8_t quant5[256]={
76 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
77 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
78 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
79 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
80 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
85 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
86 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
87 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
88 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
89 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
90 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
91 -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-1,-1,-1,
94 static const int8_t quant9_10bit[256]={
95 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,
97 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
98 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
99 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
100 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
101 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
102 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
103 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
104 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
105 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
106 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
107 -4,-4,-4,-4,-4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,
108 -3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,
109 -3,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
110 -2,-2,-2,-2,-1,-1,-1,-1,-1,-1,-1,-1,-0,-0,-0,-0,
113 static const int8_t quant11[256]={
114 0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4,
115 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
116 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
117 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
118 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
119 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
120 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
121 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
122 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
123 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
124 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
125 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
126 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,
127 -5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-4,-4,
128 -4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,
129 -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-3,-3,-2,-2,-2,-1,
132 static const uint8_t ver2_state[256]= {
133 0, 10, 10, 10, 10, 16, 16, 16, 28, 16, 16, 29, 42, 49, 20, 49,
134 59, 25, 26, 26, 27, 31, 33, 33, 33, 34, 34, 37, 67, 38, 39, 39,
135 40, 40, 41, 79, 43, 44, 45, 45, 48, 48, 64, 50, 51, 52, 88, 52,
136 53, 74, 55, 57, 58, 58, 74, 60, 101, 61, 62, 84, 66, 66, 68, 69,
137 87, 82, 71, 97, 73, 73, 82, 75, 111, 77, 94, 78, 87, 81, 83, 97,
138 85, 83, 94, 86, 99, 89, 90, 99, 111, 92, 93, 134, 95, 98, 105, 98,
139 105, 110, 102, 108, 102, 118, 103, 106, 106, 113, 109, 112, 114, 112, 116, 125,
140 115, 116, 117, 117, 126, 119, 125, 121, 121, 123, 145, 124, 126, 131, 127, 129,
141 165, 130, 132, 138, 133, 135, 145, 136, 137, 139, 146, 141, 143, 142, 144, 148,
142 147, 155, 151, 149, 151, 150, 152, 157, 153, 154, 156, 168, 158, 162, 161, 160,
143 172, 163, 169, 164, 166, 184, 167, 170, 177, 174, 171, 173, 182, 176, 180, 178,
144 175, 189, 179, 181, 186, 183, 192, 185, 200, 187, 191, 188, 190, 197, 193, 196,
145 197, 194, 195, 196, 198, 202, 199, 201, 210, 203, 207, 204, 205, 206, 208, 214,
146 209, 211, 221, 212, 213, 215, 224, 216, 217, 218, 219, 220, 222, 228, 223, 225,
147 226, 224, 227, 229, 240, 230, 231, 232, 233, 234, 235, 236, 238, 239, 237, 242,
148 241, 243, 242, 244, 245, 246, 247, 248, 249, 250, 251, 252, 252, 253, 254, 255,
151 typedef struct VlcState{
158 typedef struct PlaneContext{
159 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
160 int quant_table_index;
162 uint8_t (*state)[CONTEXT_SIZE];
164 uint8_t interlace_bit_state[2];
167 #define MAX_SLICES 256
169 typedef struct FFV1Context{
171 AVCodecContext *avctx;
175 uint64_t rc_stat[256][2];
176 uint64_t (*rc_stat2[MAX_QUANT_TABLES])[32][2];
180 int chroma_h_shift, chroma_v_shift;
186 AVFrame last_picture;
188 int ac; ///< 1=range coder <-> 0=golomb rice
189 int ac_byte_count; ///< number of bytes used for AC coding
190 PlaneContext plane[MAX_PLANES];
191 int16_t quant_table[MAX_CONTEXT_INPUTS][256];
192 int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256];
193 int context_count[MAX_QUANT_TABLES];
194 uint8_t state_transition[256];
195 uint8_t (*initial_states[MAX_QUANT_TABLES])[32];
198 int16_t *sample_buffer;
205 int quant_table_count;
209 struct FFV1Context *slice_context[MAX_SLICES];
217 int bits_per_raw_sample;
220 static av_always_inline int fold(int diff, int bits){
232 static inline int predict(int16_t *src, int16_t *last)
234 const int LT= last[-1];
235 const int T= last[ 0];
236 const int L = src[-1];
238 return mid_pred(L, L + T - LT, T);
241 static inline int get_context(PlaneContext *p, int16_t *src,
242 int16_t *last, int16_t *last2)
244 const int LT= last[-1];
245 const int T= last[ 0];
246 const int RT= last[ 1];
247 const int L = src[-1];
249 if(p->quant_table[3][127]){
250 const int TT= last2[0];
251 const int LL= src[-2];
252 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF]
253 +p->quant_table[3][(LL-L) & 0xFF] + p->quant_table[4][(TT-T) & 0xFF];
255 return p->quant_table[0][(L-LT) & 0xFF] + p->quant_table[1][(LT-T) & 0xFF] + p->quant_table[2][(T-RT) & 0xFF];
258 static void find_best_state(uint8_t best_state[256][256], const uint8_t one_state[256]){
263 l2tab[i]= log2(i/256.0);
265 for(i=0; i<256; i++){
266 double best_len[256];
272 for(j=FFMAX(i-10,1); j<FFMIN(i+11,256); j++){
276 for(k=0; k<256; k++){
277 double newocc[256]={0};
278 for(m=0; m<256; m++){
280 len -=occ[m]*( p *l2tab[ m]
281 + (1-p)*l2tab[256-m]);
284 if(len < best_len[k]){
288 for(m=0; m<256; m++){
290 newocc[ one_state[ m]] += occ[m]* p ;
291 newocc[256-one_state[256-m]] += occ[m]*(1-p);
294 memcpy(occ, newocc, sizeof(occ));
300 static av_always_inline av_flatten void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
303 #define put_rac(C,S,B) \
307 rc_stat2[(S)-state][B]++;\
313 const int a= FFABS(v);
314 const int e= av_log2(a);
315 put_rac(c, state+0, 0);
318 put_rac(c, state+1+i, 1); //1..10
320 put_rac(c, state+1+i, 0);
322 for(i=e-1; i>=0; i--){
323 put_rac(c, state+22+i, (a>>i)&1); //22..31
327 put_rac(c, state+11 + e, v < 0); //11..21
330 put_rac(c, state+1+FFMIN(i,9), 1); //1..10
332 put_rac(c, state+1+9, 0);
334 for(i=e-1; i>=0; i--){
335 put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
339 put_rac(c, state+11 + 10, v < 0); //11..21
342 put_rac(c, state+0, 1);
347 static av_noinline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
348 put_symbol_inline(c, state, v, is_signed, NULL, NULL);
351 static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed){
352 if(get_rac(c, state+0))
357 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
362 for(i=e-1; i>=0; i--){
363 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
366 e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
371 static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
372 return get_symbol_inline(c, state, is_signed);
375 static inline void update_vlc_state(VlcState * const state, const int v){
376 int drift= state->drift;
377 int count= state->count;
378 state->error_sum += FFABS(v);
381 if(count == 128){ //FIXME variable
384 state->error_sum >>= 1;
389 if(state->bias > -128) state->bias--;
395 if(state->bias < 127) state->bias++;
406 static inline void put_vlc_symbol(PutBitContext *pb, VlcState * const state, int v, int bits){
408 v = fold(v - state->bias, bits);
412 while(i < state->error_sum){ //FIXME optimize
420 if(k==0 && 2*state->drift <= - state->count) code= v ^ (-1);
423 code= v ^ ((2*state->drift + state->count)>>31);
426 av_dlog(NULL, "v:%d/%d bias:%d error:%d drift:%d count:%d k:%d\n", v, code,
427 state->bias, state->error_sum, state->drift, state->count, k);
428 set_sr_golomb(pb, code, k, 12, bits);
430 update_vlc_state(state, v);
433 static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int bits){
438 while(i < state->error_sum){ //FIXME optimize
445 v= get_sr_golomb(gb, k, 12, bits);
446 av_dlog(NULL, "v:%d bias:%d error:%d drift:%d count:%d k:%d",
447 v, state->bias, state->error_sum, state->drift, state->count, k);
450 if(k==0 && 2*state->drift <= - state->count) v ^= (-1);
452 v ^= ((2*state->drift + state->count)>>31);
455 ret= fold(v + state->bias, bits);
457 update_vlc_state(state, v);
462 #if CONFIG_FFV1_ENCODER
463 static av_always_inline int encode_line(FFV1Context *s, int w,
465 int plane_index, int bits)
467 PlaneContext * const p= &s->plane[plane_index];
468 RangeCoder * const c= &s->c;
470 int run_index= s->run_index;
475 if(c->bytestream_end - c->bytestream < w*20){
476 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
480 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < w*4){
481 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
489 context= get_context(p, sample[0]+x, sample[1]+x, sample[2]+x);
490 diff= sample[0][x] - predict(sample[0]+x, sample[1]+x);
497 diff= fold(diff, bits);
500 if(s->flags & CODEC_FLAG_PASS1){
501 put_symbol_inline(c, p->state[context], diff, 1, s->rc_stat, s->rc_stat2[p->quant_table_index][context]);
503 put_symbol_inline(c, p->state[context], diff, 1, NULL, NULL);
506 if(context == 0) run_mode=1;
511 while(run_count >= 1<<ff_log2_run[run_index]){
512 run_count -= 1<<ff_log2_run[run_index];
514 put_bits(&s->pb, 1, 1);
517 put_bits(&s->pb, 1 + ff_log2_run[run_index], run_count);
518 if(run_index) run_index--;
527 av_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
528 run_count, run_index, run_mode, x,
529 (int)put_bits_count(&s->pb));
532 put_vlc_symbol(&s->pb, &p->vlc_state[context], diff, bits);
536 while(run_count >= 1<<ff_log2_run[run_index]){
537 run_count -= 1<<ff_log2_run[run_index];
539 put_bits(&s->pb, 1, 1);
543 put_bits(&s->pb, 1, 1);
545 s->run_index= run_index;
550 static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
552 const int ring_size= s->avctx->context_model ? 3 : 2;
556 memset(s->sample_buffer, 0, ring_size*(w+6)*sizeof(*s->sample_buffer));
559 for(i=0; i<ring_size; i++)
560 sample[i]= s->sample_buffer + (w+6)*((h+i-y)%ring_size) + 3;
562 sample[0][-1]= sample[1][0 ];
563 sample[1][ w]= sample[1][w-1];
565 if(s->bits_per_raw_sample<=8){
567 sample[0][x]= src[x + stride*y];
569 encode_line(s, w, sample, plane_index, 8);
571 if(s->packed_at_lsb){
573 sample[0][x]= ((uint16_t*)(src + stride*y))[x];
577 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample);
580 encode_line(s, w, sample, plane_index, s->bits_per_raw_sample);
582 //STOP_TIMER("encode line")}
586 static void encode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, int stride[3]){
588 const int ring_size= s->avctx->context_model ? 3 : 2;
589 int16_t *sample[4][3];
590 int lbd= s->avctx->bits_per_raw_sample <= 8;
591 int bits= s->avctx->bits_per_raw_sample > 0 ? s->avctx->bits_per_raw_sample : 8;
592 int offset= 1 << bits;
595 memset(s->sample_buffer, 0, ring_size*4*(w+6)*sizeof(*s->sample_buffer));
598 for(i=0; i<ring_size; i++)
600 sample[p][i]= s->sample_buffer + p*ring_size*(w+6) + ((h+i-y)%ring_size)*(w+6) + 3;
603 int b,g,r,av_uninit(a);
605 unsigned v= *((uint32_t*)(src[0] + x*4 + stride[0]*y));
611 b= *((uint16_t*)(src[0] + x*2 + stride[0]*y));
612 g= *((uint16_t*)(src[1] + x*2 + stride[1]*y));
613 r= *((uint16_t*)(src[2] + x*2 + stride[2]*y));
627 for(p=0; p<3 + s->transparency; p++){
628 sample[p][0][-1]= sample[p][1][0 ];
629 sample[p][1][ w]= sample[p][1][w-1];
631 encode_line(s, w, sample[p], (p+1)/2, 9);
633 encode_line(s, w, sample[p], (p+1)/2, bits+1);
638 static void write_quant_table(RangeCoder *c, int16_t *quant_table){
641 uint8_t state[CONTEXT_SIZE];
642 memset(state, 128, sizeof(state));
644 for(i=1; i<128 ; i++){
645 if(quant_table[i] != quant_table[i-1]){
646 put_symbol(c, state, i-last-1, 0);
650 put_symbol(c, state, i-last-1, 0);
653 static void write_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
656 write_quant_table(c, quant_table[i]);
659 static void write_header(FFV1Context *f){
660 uint8_t state[CONTEXT_SIZE];
662 RangeCoder * const c= &f->slice_context[0]->c;
664 memset(state, 128, sizeof(state));
667 put_symbol(c, state, f->version, 0);
668 put_symbol(c, state, f->ac, 0);
670 for(i=1; i<256; i++){
671 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
674 put_symbol(c, state, f->colorspace, 0); //YUV cs type
676 put_symbol(c, state, f->bits_per_raw_sample, 0);
677 put_rac(c, state, f->chroma_planes);
678 put_symbol(c, state, f->chroma_h_shift, 0);
679 put_symbol(c, state, f->chroma_v_shift, 0);
680 put_rac(c, state, f->transparency);
682 write_quant_tables(c, f->quant_table);
683 }else if(f->version < 3){
684 put_symbol(c, state, f->slice_count, 0);
685 for(i=0; i<f->slice_count; i++){
686 FFV1Context *fs= f->slice_context[i];
687 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
688 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
689 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
690 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
691 for(j=0; j<f->plane_count; j++){
692 put_symbol(c, state, f->plane[j].quant_table_index, 0);
693 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
698 #endif /* CONFIG_FFV1_ENCODER */
700 static av_cold int common_init(AVCodecContext *avctx){
701 FFV1Context *s = avctx->priv_data;
704 s->flags= avctx->flags;
706 avcodec_get_frame_defaults(&s->picture);
708 ff_dsputil_init(&s->dsp, avctx);
710 s->width = avctx->width;
711 s->height= avctx->height;
713 assert(s->width && s->height);
722 static int init_slice_state(FFV1Context *f, FFV1Context *fs){
725 fs->plane_count= f->plane_count;
726 fs->transparency= f->transparency;
727 for(j=0; j<f->plane_count; j++){
728 PlaneContext * const p= &fs->plane[j];
731 if(!p-> state) p-> state= av_malloc(CONTEXT_SIZE*p->context_count*sizeof(uint8_t));
733 return AVERROR(ENOMEM);
735 if(!p->vlc_state) p->vlc_state= av_malloc(p->context_count*sizeof(VlcState));
737 return AVERROR(ENOMEM);
742 //FIXME only redo if state_transition changed
743 for(j=1; j<256; j++){
744 fs->c.one_state [ j]= f->state_transition[j];
745 fs->c.zero_state[256-j]= 256-fs->c.one_state [j];
752 static int init_slices_state(FFV1Context *f){
754 for(i=0; i<f->slice_count; i++){
755 FFV1Context *fs= f->slice_context[i];
756 if(init_slice_state(f, fs) < 0)
762 static av_cold int init_slice_contexts(FFV1Context *f){
765 f->slice_count= f->num_h_slices * f->num_v_slices;
767 for(i=0; i<f->slice_count; i++){
768 FFV1Context *fs= av_mallocz(sizeof(*fs));
769 int sx= i % f->num_h_slices;
770 int sy= i / f->num_h_slices;
771 int sxs= f->avctx->width * sx / f->num_h_slices;
772 int sxe= f->avctx->width *(sx+1) / f->num_h_slices;
773 int sys= f->avctx->height* sy / f->num_v_slices;
774 int sye= f->avctx->height*(sy+1) / f->num_v_slices;
775 f->slice_context[i]= fs;
776 memcpy(fs, f, sizeof(*fs));
777 memset(fs->rc_stat2, 0, sizeof(fs->rc_stat2));
779 fs->slice_width = sxe - sxs;
780 fs->slice_height= sye - sys;
784 fs->sample_buffer = av_malloc(3*4 * (fs->width+6) * sizeof(*fs->sample_buffer));
785 if (!fs->sample_buffer)
786 return AVERROR(ENOMEM);
791 static int allocate_initial_states(FFV1Context *f){
794 for(i=0; i<f->quant_table_count; i++){
795 f->initial_states[i]= av_malloc(f->context_count[i]*sizeof(*f->initial_states[i]));
796 if(!f->initial_states[i])
797 return AVERROR(ENOMEM);
798 memset(f->initial_states[i], 128, f->context_count[i]*sizeof(*f->initial_states[i]));
803 #if CONFIG_FFV1_ENCODER
804 static int write_extra_header(FFV1Context *f){
805 RangeCoder * const c= &f->c;
806 uint8_t state[CONTEXT_SIZE];
808 uint8_t state2[32][CONTEXT_SIZE];
811 memset(state2, 128, sizeof(state2));
812 memset(state, 128, sizeof(state));
814 f->avctx->extradata= av_malloc(f->avctx->extradata_size= 10000 + (11*11*5*5*5+11*11*11)*32);
815 ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size);
816 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
818 put_symbol(c, state, f->version, 0);
821 f->minor_version = 2;
822 put_symbol(c, state, f->minor_version, 0);
824 put_symbol(c, state, f->ac, 0);
826 for(i=1; i<256; i++){
827 put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1);
830 put_symbol(c, state, f->colorspace, 0); //YUV cs type
831 put_symbol(c, state, f->bits_per_raw_sample, 0);
832 put_rac(c, state, f->chroma_planes);
833 put_symbol(c, state, f->chroma_h_shift, 0);
834 put_symbol(c, state, f->chroma_v_shift, 0);
835 put_rac(c, state, f->transparency);
836 put_symbol(c, state, f->num_h_slices-1, 0);
837 put_symbol(c, state, f->num_v_slices-1, 0);
839 put_symbol(c, state, f->quant_table_count, 0);
840 for(i=0; i<f->quant_table_count; i++)
841 write_quant_tables(c, f->quant_tables[i]);
843 for(i=0; i<f->quant_table_count; i++){
844 for(j=0; j<f->context_count[i]*CONTEXT_SIZE; j++)
845 if(f->initial_states[i] && f->initial_states[i][0][j] != 128)
847 if(j<f->context_count[i]*CONTEXT_SIZE){
848 put_rac(c, state, 1);
849 for(j=0; j<f->context_count[i]; j++){
850 for(k=0; k<CONTEXT_SIZE; k++){
851 int pred= j ? f->initial_states[i][j-1][k] : 128;
852 put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k]-pred), 1);
856 put_rac(c, state, 0);
861 put_symbol(c, state, f->ec, 0);
864 f->avctx->extradata_size= ff_rac_terminate(c);
865 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
866 AV_WL32(f->avctx->extradata + f->avctx->extradata_size, v);
867 f->avctx->extradata_size += 4;
872 static int sort_stt(FFV1Context *s, uint8_t stt[256]){
873 int i,i2,changed,print=0;
877 for(i=12; i<244; i++){
878 for(i2=i+1; i2<245 && i2<i+4; i2++){
879 #define COST(old, new) \
880 s->rc_stat[old][0]*-log2((256-(new))/256.0)\
881 +s->rc_stat[old][1]*-log2( (new) /256.0)
883 #define COST2(old, new) \
885 +COST(256-(old), 256-(new))
887 double size0= COST2(i, i ) + COST2(i2, i2);
888 double sizeX= COST2(i, i2) + COST2(i2, i );
889 if(sizeX < size0 && i!=128 && i2!=128){
891 FFSWAP(int, stt[ i], stt[ i2]);
892 FFSWAP(int, s->rc_stat[i ][0],s->rc_stat[ i2][0]);
893 FFSWAP(int, s->rc_stat[i ][1],s->rc_stat[ i2][1]);
895 FFSWAP(int, stt[256-i], stt[256-i2]);
896 FFSWAP(int, s->rc_stat[256-i][0],s->rc_stat[256-i2][0]);
897 FFSWAP(int, s->rc_stat[256-i][1],s->rc_stat[256-i2][1]);
899 for(j=1; j<256; j++){
900 if (stt[j] == i ) stt[j] = i2;
901 else if(stt[j] == i2) stt[j] = i ;
903 if (stt[256-j] == 256-i ) stt[256-j] = 256-i2;
904 else if(stt[256-j] == 256-i2) stt[256-j] = 256-i ;
915 static av_cold int encode_init(AVCodecContext *avctx)
917 FFV1Context *s = avctx->priv_data;
924 if((avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) || avctx->slices>1)
925 s->version = FFMAX(s->version, 2);
927 if(avctx->level == 3){
932 s->ec = (s->version >= 3);
935 if(s->version >= 2 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
936 av_log(avctx, AV_LOG_ERROR, "Version 2 needed for requested features but version 2 is experimental and not enabled\n");
937 return AVERROR_INVALIDDATA;
940 s->ac= avctx->coder_type > 0 ? 2 : 0;
943 switch(avctx->pix_fmt){
944 case PIX_FMT_YUV444P9:
945 case PIX_FMT_YUV422P9:
946 case PIX_FMT_YUV420P9:
947 if (!avctx->bits_per_raw_sample)
948 s->bits_per_raw_sample = 9;
949 case PIX_FMT_YUV444P10:
950 case PIX_FMT_YUV420P10:
951 case PIX_FMT_YUV422P10:
952 s->packed_at_lsb = 1;
953 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
954 s->bits_per_raw_sample = 10;
956 case PIX_FMT_YUV444P16:
957 case PIX_FMT_YUV422P16:
958 case PIX_FMT_YUV420P16:
959 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample) {
960 s->bits_per_raw_sample = 16;
961 } else if (!s->bits_per_raw_sample){
962 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
964 if(s->bits_per_raw_sample <=8){
965 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
966 return AVERROR_INVALIDDATA;
968 if(!s->ac && avctx->coder_type == -1) {
969 av_log(avctx, AV_LOG_INFO, "bits_per_raw_sample > 8, forcing coder 1\n");
973 av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
974 return AVERROR_INVALIDDATA;
976 s->version= FFMAX(s->version, 1);
978 case PIX_FMT_YUV444P:
979 case PIX_FMT_YUV440P:
980 case PIX_FMT_YUV422P:
981 case PIX_FMT_YUV420P:
982 case PIX_FMT_YUV411P:
983 case PIX_FMT_YUV410P:
984 s->chroma_planes= av_pix_fmt_descriptors[avctx->pix_fmt].nb_components < 3 ? 0 : 1;
987 case PIX_FMT_YUVA444P:
988 case PIX_FMT_YUVA422P:
989 case PIX_FMT_YUVA420P:
1002 if (!avctx->bits_per_raw_sample)
1003 s->bits_per_raw_sample = 9;
1004 case PIX_FMT_GBRP10:
1005 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
1006 s->bits_per_raw_sample = 10;
1007 case PIX_FMT_GBRP12:
1008 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
1009 s->bits_per_raw_sample = 12;
1010 case PIX_FMT_GBRP14:
1011 if (!avctx->bits_per_raw_sample && !s->bits_per_raw_sample)
1012 s->bits_per_raw_sample = 14;
1013 else if (!s->bits_per_raw_sample)
1014 s->bits_per_raw_sample = avctx->bits_per_raw_sample;
1016 s->chroma_planes= 1;
1017 s->version= FFMAX(s->version, 1);
1020 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
1021 return AVERROR_INVALIDDATA;
1023 if (s->transparency) {
1024 av_log(avctx, AV_LOG_WARNING, "Storing alpha plane, this will require a recent FFV1 decoder to playback!\n");
1026 if (avctx->context_model > 1U) {
1027 av_log(avctx, AV_LOG_ERROR, "Invalid context model %d, valid values are 0 and 1\n", avctx->context_model);
1028 return AVERROR(EINVAL);
1032 for(i=1; i<256; i++)
1033 s->state_transition[i]=ver2_state[i];
1035 for(i=0; i<256; i++){
1036 s->quant_table_count=2;
1037 if(s->bits_per_raw_sample <=8){
1038 s->quant_tables[0][0][i]= quant11[i];
1039 s->quant_tables[0][1][i]= 11*quant11[i];
1040 s->quant_tables[0][2][i]= 11*11*quant11[i];
1041 s->quant_tables[1][0][i]= quant11[i];
1042 s->quant_tables[1][1][i]= 11*quant11[i];
1043 s->quant_tables[1][2][i]= 11*11*quant5 [i];
1044 s->quant_tables[1][3][i]= 5*11*11*quant5 [i];
1045 s->quant_tables[1][4][i]= 5*5*11*11*quant5 [i];
1047 s->quant_tables[0][0][i]= quant9_10bit[i];
1048 s->quant_tables[0][1][i]= 11*quant9_10bit[i];
1049 s->quant_tables[0][2][i]= 11*11*quant9_10bit[i];
1050 s->quant_tables[1][0][i]= quant9_10bit[i];
1051 s->quant_tables[1][1][i]= 11*quant9_10bit[i];
1052 s->quant_tables[1][2][i]= 11*11*quant5_10bit[i];
1053 s->quant_tables[1][3][i]= 5*11*11*quant5_10bit[i];
1054 s->quant_tables[1][4][i]= 5*5*11*11*quant5_10bit[i];
1057 s->context_count[0]= (11*11*11+1)/2;
1058 s->context_count[1]= (11*11*5*5*5+1)/2;
1059 memcpy(s->quant_table, s->quant_tables[avctx->context_model], sizeof(s->quant_table));
1061 for(i=0; i<s->plane_count; i++){
1062 PlaneContext * const p= &s->plane[i];
1064 memcpy(p->quant_table, s->quant_table, sizeof(p->quant_table));
1065 p->quant_table_index= avctx->context_model;
1066 p->context_count= s->context_count[p->quant_table_index];
1069 if(allocate_initial_states(s) < 0)
1070 return AVERROR(ENOMEM);
1072 avctx->coded_frame= &s->picture;
1073 if(!s->transparency)
1075 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
1077 s->picture_number=0;
1079 if(avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
1080 for(i=0; i<s->quant_table_count; i++){
1081 s->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*s->rc_stat2[i]));
1083 return AVERROR(ENOMEM);
1086 if(avctx->stats_in){
1087 char *p= avctx->stats_in;
1088 uint8_t best_state[256][256];
1092 av_assert0(s->version>=2);
1095 for(j=0; j<256; j++){
1097 s->rc_stat[j][i]= strtol(p, &next, 0);
1099 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d [%s]\n", j,i,p);
1105 for(i=0; i<s->quant_table_count; i++){
1106 for(j=0; j<s->context_count[i]; j++){
1107 for(k=0; k<32; k++){
1109 s->rc_stat2[i][j][k][m]= strtol(p, &next, 0);
1111 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid at %d %d %d %d [%s]\n", i,j,k,m,p);
1112 return AVERROR_INVALIDDATA;
1119 gob_count= strtol(p, &next, 0);
1120 if(next==p || gob_count <0){
1121 av_log(avctx, AV_LOG_ERROR, "2Pass file invalid\n");
1122 return AVERROR_INVALIDDATA;
1125 while(*p=='\n' || *p==' ') p++;
1128 sort_stt(s, s->state_transition);
1130 find_best_state(best_state, s->state_transition);
1132 for(i=0; i<s->quant_table_count; i++){
1133 for(j=0; j<s->context_count[i]; j++){
1134 for(k=0; k<32; k++){
1136 if(s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]){
1137 p=256.0*s->rc_stat2[i][j][k][1] / (s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1]);
1139 s->initial_states[i][j][k]= best_state[av_clip(round(p), 1, 255)][av_clip((s->rc_stat2[i][j][k][0]+s->rc_stat2[i][j][k][1])/gob_count, 0, 255)];
1146 for(s->num_v_slices=2; s->num_v_slices<9; s->num_v_slices++){
1147 for(s->num_h_slices=s->num_v_slices; s->num_h_slices<2*s->num_v_slices; s->num_h_slices++){
1148 if(avctx->slices == s->num_h_slices * s->num_v_slices && avctx->slices <= 64 || !avctx->slices)
1152 av_log(avctx, AV_LOG_ERROR, "Unsupported number %d of slices requested, please specify a supported number with -slices (ex:4,6,9,12,16, ...)\n", avctx->slices);
1155 write_extra_header(s);
1158 if(init_slice_contexts(s) < 0)
1160 if(init_slices_state(s) < 0)
1163 #define STATS_OUT_SIZE 1024*1024*6
1164 if(avctx->flags & CODEC_FLAG_PASS1){
1165 avctx->stats_out= av_mallocz(STATS_OUT_SIZE);
1166 for(i=0; i<s->quant_table_count; i++){
1167 for(j=0; j<s->slice_count; j++){
1168 FFV1Context *sf= s->slice_context[j];
1169 av_assert0(!sf->rc_stat2[i]);
1170 sf->rc_stat2[i]= av_mallocz(s->context_count[i]*sizeof(*sf->rc_stat2[i]));
1171 if(!sf->rc_stat2[i])
1172 return AVERROR(ENOMEM);
1179 #endif /* CONFIG_FFV1_ENCODER */
1182 static void clear_slice_state(FFV1Context *f, FFV1Context *fs){
1185 for(i=0; i<f->plane_count; i++){
1186 PlaneContext *p= &fs->plane[i];
1188 p->interlace_bit_state[0]= 128;
1189 p->interlace_bit_state[1]= 128;
1192 if(f->initial_states[p->quant_table_index]){
1193 memcpy(p->state, f->initial_states[p->quant_table_index], CONTEXT_SIZE*p->context_count);
1195 memset(p->state, 128, CONTEXT_SIZE*p->context_count);
1197 for(j=0; j<p->context_count; j++){
1198 p->vlc_state[j].drift= 0;
1199 p->vlc_state[j].error_sum= 4; //FFMAX((RANGE + 32)/64, 2);
1200 p->vlc_state[j].bias= 0;
1201 p->vlc_state[j].count= 1;
1207 #if CONFIG_FFV1_ENCODER
1209 static void encode_slice_header(FFV1Context *f, FFV1Context *fs){
1210 RangeCoder *c = &fs->c;
1211 uint8_t state[CONTEXT_SIZE];
1213 memset(state, 128, sizeof(state));
1215 put_symbol(c, state, (fs->slice_x +1)*f->num_h_slices / f->width , 0);
1216 put_symbol(c, state, (fs->slice_y +1)*f->num_v_slices / f->height , 0);
1217 put_symbol(c, state, (fs->slice_width +1)*f->num_h_slices / f->width -1, 0);
1218 put_symbol(c, state, (fs->slice_height+1)*f->num_v_slices / f->height-1, 0);
1219 for(j=0; j<f->plane_count; j++){
1220 put_symbol(c, state, f->plane[j].quant_table_index, 0);
1221 av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
1223 if(!f->picture.interlaced_frame) put_symbol(c, state, 3, 0);
1224 else put_symbol(c, state, 1 + !f->picture.top_field_first, 0);
1225 put_symbol(c, state, f->picture.sample_aspect_ratio.num, 0);
1226 put_symbol(c, state, f->picture.sample_aspect_ratio.den, 0);
1229 static int encode_slice(AVCodecContext *c, void *arg){
1230 FFV1Context *fs= *(void**)arg;
1231 FFV1Context *f= fs->avctx->priv_data;
1232 int width = fs->slice_width;
1233 int height= fs->slice_height;
1236 AVFrame * const p= &f->picture;
1237 const int ps= (f->bits_per_raw_sample>8)+1;
1240 clear_slice_state(f, fs);
1242 encode_slice_header(f, fs);
1246 put_rac(&fs->c, (int[]){129}, 0);
1247 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? ff_rac_terminate(&fs->c) : 0;
1248 init_put_bits(&fs->pb, fs->c.bytestream_start + fs->ac_byte_count, fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count);
1251 if(f->colorspace==0){
1252 const int chroma_width = -((-width )>>f->chroma_h_shift);
1253 const int chroma_height= -((-height)>>f->chroma_v_shift);
1254 const int cx= x>>f->chroma_h_shift;
1255 const int cy= y>>f->chroma_v_shift;
1257 encode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1259 if (f->chroma_planes){
1260 encode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1261 encode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1263 if (fs->transparency)
1264 encode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1266 uint8_t *planes[3] = {p->data[0] + ps*x + y*p->linesize[0],
1267 p->data[1] + ps*x + y*p->linesize[1],
1268 p->data[2] + ps*x + y*p->linesize[2]};
1269 encode_rgb_frame(fs, planes, width, height, p->linesize);
1276 static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
1277 const AVFrame *pict, int *got_packet)
1279 FFV1Context *f = avctx->priv_data;
1280 RangeCoder * const c= &f->slice_context[0]->c;
1281 AVFrame * const p= &f->picture;
1283 uint8_t keystate=128;
1287 if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*((8*2+1+1)*4)/8
1288 + FF_MIN_BUFFER_SIZE)) < 0)
1291 ff_init_range_encoder(c, pkt->data, pkt->size);
1292 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1295 p->pict_type= AV_PICTURE_TYPE_I;
1297 if(avctx->gop_size==0 || f->picture_number % avctx->gop_size == 0){
1298 put_rac(c, &keystate, 1);
1303 put_rac(c, &keystate, 0);
1309 for(i=1; i<256; i++){
1310 c->one_state[i]= f->state_transition[i];
1311 c->zero_state[256-i]= 256-c->one_state[i];
1315 for(i=1; i<f->slice_count; i++){
1316 FFV1Context *fs= f->slice_context[i];
1317 uint8_t *start = pkt->data + (pkt->size-used_count)*(int64_t)i/f->slice_count;
1318 int len = pkt->size/f->slice_count;
1319 ff_init_range_encoder(&fs->c, start, len);
1321 avctx->execute(avctx, encode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
1324 for(i=0; i<f->slice_count; i++){
1325 FFV1Context *fs= f->slice_context[i];
1330 put_rac(&fs->c, &state, 0);
1331 bytes= ff_rac_terminate(&fs->c);
1333 flush_put_bits(&fs->pb); //nicer padding FIXME
1334 bytes= fs->ac_byte_count + (put_bits_count(&fs->pb)+7)/8;
1336 if(i>0 || f->version>2){
1337 av_assert0(bytes < pkt->size/f->slice_count);
1338 memmove(buf_p, fs->c.bytestream_start, bytes);
1339 av_assert0(bytes < (1<<24));
1340 AV_WB24(buf_p+bytes, bytes);
1346 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, bytes);
1347 AV_WL32(buf_p + bytes, v); bytes += 4;
1352 if((avctx->flags&CODEC_FLAG_PASS1) && (f->picture_number&31)==0){
1354 char *p= avctx->stats_out;
1355 char *end= p + STATS_OUT_SIZE;
1357 memset(f->rc_stat, 0, sizeof(f->rc_stat));
1358 for(i=0; i<f->quant_table_count; i++)
1359 memset(f->rc_stat2[i], 0, f->context_count[i]*sizeof(*f->rc_stat2[i]));
1361 for(j=0; j<f->slice_count; j++){
1362 FFV1Context *fs= f->slice_context[j];
1363 for(i=0; i<256; i++){
1364 f->rc_stat[i][0] += fs->rc_stat[i][0];
1365 f->rc_stat[i][1] += fs->rc_stat[i][1];
1367 for(i=0; i<f->quant_table_count; i++){
1368 for(k=0; k<f->context_count[i]; k++){
1369 for(m=0; m<32; m++){
1370 f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
1371 f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
1377 for(j=0; j<256; j++){
1378 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat[j][0], f->rc_stat[j][1]);
1381 snprintf(p, end-p, "\n");
1383 for(i=0; i<f->quant_table_count; i++){
1384 for(j=0; j<f->context_count[i]; j++){
1385 for(m=0; m<32; m++){
1386 snprintf(p, end-p, "%"PRIu64" %"PRIu64" ", f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
1391 snprintf(p, end-p, "%d\n", f->gob_count);
1392 } else if(avctx->flags&CODEC_FLAG_PASS1)
1393 avctx->stats_out[0] = '\0';
1395 f->picture_number++;
1396 pkt->size = buf_p - pkt->data;
1397 pkt->flags |= AV_PKT_FLAG_KEY*p->key_frame;
1402 #endif /* CONFIG_FFV1_ENCODER */
1404 static av_cold int common_end(AVCodecContext *avctx){
1405 FFV1Context *s = avctx->priv_data;
1408 if (avctx->codec->decode && s->picture.data[0])
1409 avctx->release_buffer(avctx, &s->picture);
1410 if (avctx->codec->decode && s->last_picture.data[0])
1411 avctx->release_buffer(avctx, &s->last_picture);
1413 for(j=0; j<s->slice_count; j++){
1414 FFV1Context *fs= s->slice_context[j];
1415 for(i=0; i<s->plane_count; i++){
1416 PlaneContext *p= &fs->plane[i];
1418 av_freep(&p->state);
1419 av_freep(&p->vlc_state);
1421 av_freep(&fs->sample_buffer);
1424 av_freep(&avctx->stats_out);
1425 for(j=0; j<s->quant_table_count; j++){
1426 av_freep(&s->initial_states[j]);
1427 for(i=0; i<s->slice_count; i++){
1428 FFV1Context *sf= s->slice_context[i];
1429 av_freep(&sf->rc_stat2[j]);
1431 av_freep(&s->rc_stat2[j]);
1434 for(i=0; i<s->slice_count; i++){
1435 av_freep(&s->slice_context[i]);
1441 static av_always_inline void decode_line(FFV1Context *s, int w,
1443 int plane_index, int bits)
1445 PlaneContext * const p= &s->plane[plane_index];
1446 RangeCoder * const c= &s->c;
1450 int run_index= s->run_index;
1453 int diff, context, sign;
1455 context= get_context(p, sample[1] + x, sample[0] + x, sample[1] + x);
1462 av_assert2(context < p->context_count);
1465 diff= get_symbol_inline(c, p->state[context], 1);
1467 if(context == 0 && run_mode==0) run_mode=1;
1470 if(run_count==0 && run_mode==1){
1471 if(get_bits1(&s->gb)){
1472 run_count = 1<<ff_log2_run[run_index];
1473 if(x + run_count <= w) run_index++;
1475 if(ff_log2_run[run_index]) run_count = get_bits(&s->gb, ff_log2_run[run_index]);
1477 if(run_index) run_index--;
1485 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1490 diff= get_vlc_symbol(&s->gb, &p->vlc_state[context], bits);
1492 av_dlog(s->avctx, "count:%d index:%d, mode:%d, x:%d pos:%d\n",
1493 run_count, run_index, run_mode, x, get_bits_count(&s->gb));
1496 if(sign) diff= -diff;
1498 sample[1][x]= (predict(sample[1] + x, sample[0] + x) + diff) & ((1<<bits)-1);
1500 s->run_index= run_index;
1503 static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index){
1506 sample[0]=s->sample_buffer +3;
1507 sample[1]=s->sample_buffer+w+6+3;
1511 memset(s->sample_buffer, 0, 2*(w+6)*sizeof(*s->sample_buffer));
1514 int16_t *temp = sample[0]; //FIXME try a normal buffer
1516 sample[0]= sample[1];
1519 sample[1][-1]= sample[0][0 ];
1520 sample[0][ w]= sample[0][w-1];
1523 if(s->avctx->bits_per_raw_sample <= 8){
1524 decode_line(s, w, sample, plane_index, 8);
1526 src[x + stride*y]= sample[1][x];
1529 decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
1530 if(s->packed_at_lsb){
1532 ((uint16_t*)(src + stride*y))[x]= sample[1][x];
1536 ((uint16_t*)(src + stride*y))[x]= sample[1][x] << (16 - s->avctx->bits_per_raw_sample);
1540 //STOP_TIMER("decode-line")}
1544 static void decode_rgb_frame(FFV1Context *s, uint8_t *src[3], int w, int h, int stride[3]){
1546 int16_t *sample[4][2];
1547 int lbd= s->avctx->bits_per_raw_sample <= 8;
1548 int bits= s->avctx->bits_per_raw_sample > 0 ? s->avctx->bits_per_raw_sample : 8;
1549 int offset= 1 << bits;
1551 sample[x][0] = s->sample_buffer + x*2 *(w+6) + 3;
1552 sample[x][1] = s->sample_buffer + (x*2+1)*(w+6) + 3;
1557 memset(s->sample_buffer, 0, 8*(w+6)*sizeof(*s->sample_buffer));
1560 for(p=0; p<3 + s->transparency; p++){
1561 int16_t *temp = sample[p][0]; //FIXME try a normal buffer
1563 sample[p][0]= sample[p][1];
1566 sample[p][1][-1]= sample[p][0][0 ];
1567 sample[p][0][ w]= sample[p][0][w-1];
1569 decode_line(s, w, sample[p], (p+1)/2, 9);
1571 decode_line(s, w, sample[p], (p+1)/2, bits+1);
1574 int g= sample[0][1][x];
1575 int b= sample[1][1][x];
1576 int r= sample[2][1][x];
1577 int a= sample[3][1][x];
1586 *((uint32_t*)(src[0] + x*4 + stride[0]*y))= b + (g<<8) + (r<<16) + (a<<24);
1588 *((uint16_t*)(src[0] + x*2 + stride[0]*y)) = b;
1589 *((uint16_t*)(src[1] + x*2 + stride[1]*y)) = g;
1590 *((uint16_t*)(src[2] + x*2 + stride[2]*y)) = r;
1596 static int decode_slice_header(FFV1Context *f, FFV1Context *fs){
1597 RangeCoder *c = &fs->c;
1598 uint8_t state[CONTEXT_SIZE];
1599 unsigned ps, i, context_count;
1600 memset(state, 128, sizeof(state));
1602 av_assert0(f->version > 2);
1604 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1605 fs->slice_y = get_symbol(c, state, 0) *f->height;
1606 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1607 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1609 fs->slice_x /= f->num_h_slices;
1610 fs->slice_y /= f->num_v_slices;
1611 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1612 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1613 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1615 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1616 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1619 for(i=0; i<f->plane_count; i++){
1620 PlaneContext * const p= &fs->plane[i];
1621 int idx=get_symbol(c, state, 0);
1622 if(idx > (unsigned)f->quant_table_count){
1623 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
1626 p->quant_table_index= idx;
1627 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
1628 context_count= f->context_count[idx];
1630 if(p->context_count < context_count){
1631 av_freep(&p->state);
1632 av_freep(&p->vlc_state);
1634 p->context_count= context_count;
1637 ps = get_symbol(c, state, 0);
1639 f->picture.interlaced_frame = 1;
1640 f->picture.top_field_first = 1;
1642 f->picture.interlaced_frame = 1;
1643 f->picture.top_field_first = 0;
1645 f->picture.interlaced_frame = 0;
1647 f->picture.sample_aspect_ratio.num = get_symbol(c, state, 0);
1648 f->picture.sample_aspect_ratio.den = get_symbol(c, state, 0);
1653 static int decode_slice(AVCodecContext *c, void *arg){
1654 FFV1Context *fs= *(void**)arg;
1655 FFV1Context *f= fs->avctx->priv_data;
1656 int width, height, x, y;
1657 const int ps= (c->bits_per_raw_sample>8)+1;
1658 AVFrame * const p= &f->picture;
1661 if(init_slice_state(f, fs) < 0)
1662 return AVERROR(ENOMEM);
1663 if(decode_slice_header(f, fs) < 0) {
1664 fs->slice_damaged = 1;
1665 return AVERROR_INVALIDDATA;
1668 if(init_slice_state(f, fs) < 0)
1669 return AVERROR(ENOMEM);
1670 if(f->picture.key_frame)
1671 clear_slice_state(f, fs);
1672 width = fs->slice_width;
1673 height= fs->slice_height;
1678 if (f->version == 3 && f->minor_version > 1 || f->version > 3)
1679 get_rac(&fs->c, (int[]){129});
1680 fs->ac_byte_count = f->version > 2 || (!x&&!y) ? fs->c.bytestream - fs->c.bytestream_start - 1 : 0;
1681 init_get_bits(&fs->gb,
1682 fs->c.bytestream_start + fs->ac_byte_count,
1683 (fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count) * 8);
1686 av_assert1(width && height);
1687 if(f->colorspace==0){
1688 const int chroma_width = -((-width )>>f->chroma_h_shift);
1689 const int chroma_height= -((-height)>>f->chroma_v_shift);
1690 const int cx= x>>f->chroma_h_shift;
1691 const int cy= y>>f->chroma_v_shift;
1692 decode_plane(fs, p->data[0] + ps*x + y*p->linesize[0], width, height, p->linesize[0], 0);
1694 if (f->chroma_planes){
1695 decode_plane(fs, p->data[1] + ps*cx+cy*p->linesize[1], chroma_width, chroma_height, p->linesize[1], 1);
1696 decode_plane(fs, p->data[2] + ps*cx+cy*p->linesize[2], chroma_width, chroma_height, p->linesize[2], 1);
1698 if (fs->transparency)
1699 decode_plane(fs, p->data[3] + ps*x + y*p->linesize[3], width, height, p->linesize[3], 2);
1701 uint8_t *planes[3] = {p->data[0] + ps*x + y*p->linesize[0],
1702 p->data[1] + ps*x + y*p->linesize[1],
1703 p->data[2] + ps*x + y*p->linesize[2]};
1704 decode_rgb_frame(fs, planes, width, height, p->linesize);
1706 if(fs->ac && f->version > 2) {
1708 get_rac(&fs->c, (int[]){129});
1709 v = fs->c.bytestream_end - fs->c.bytestream - 2 - 5*f->ec;
1711 av_log(f->avctx, AV_LOG_ERROR, "bytestream end mismatching by %d\n", v);
1712 fs->slice_damaged = 1;
1721 static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale){
1724 uint8_t state[CONTEXT_SIZE];
1726 memset(state, 128, sizeof(state));
1728 for(v=0; i<128 ; v++){
1729 unsigned len= get_symbol(c, state, 0) + 1;
1731 if(len > 128 - i) return -1;
1734 quant_table[i] = scale*v;
1739 for(i=1; i<128; i++){
1740 quant_table[256-i]= -quant_table[i];
1742 quant_table[128]= -quant_table[127];
1747 static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256]){
1749 int context_count=1;
1752 context_count*= read_quant_table(c, quant_table[i], context_count);
1753 if(context_count > 32768U){
1757 return (context_count+1)/2;
1760 static int read_extra_header(FFV1Context *f){
1761 RangeCoder * const c= &f->c;
1762 uint8_t state[CONTEXT_SIZE];
1764 uint8_t state2[32][CONTEXT_SIZE];
1766 memset(state2, 128, sizeof(state2));
1767 memset(state, 128, sizeof(state));
1769 ff_init_range_decoder(c, f->avctx->extradata, f->avctx->extradata_size);
1770 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
1772 f->version= get_symbol(c, state, 0);
1773 if(f->version > 2) {
1774 c->bytestream_end -= 4;
1775 f->minor_version= get_symbol(c, state, 0);
1777 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1779 for(i=1; i<256; i++){
1780 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1783 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1784 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1785 f->chroma_planes= get_rac(c, state);
1786 f->chroma_h_shift= get_symbol(c, state, 0);
1787 f->chroma_v_shift= get_symbol(c, state, 0);
1788 f->transparency= get_rac(c, state);
1789 f->plane_count= 2 + f->transparency;
1790 f->num_h_slices= 1 + get_symbol(c, state, 0);
1791 f->num_v_slices= 1 + get_symbol(c, state, 0);
1792 if(f->num_h_slices > (unsigned)f->width || f->num_v_slices > (unsigned)f->height){
1793 av_log(f->avctx, AV_LOG_ERROR, "too many slices\n");
1797 f->quant_table_count= get_symbol(c, state, 0);
1798 if(f->quant_table_count > (unsigned)MAX_QUANT_TABLES)
1800 for(i=0; i<f->quant_table_count; i++){
1801 if((f->context_count[i]= read_quant_tables(c, f->quant_tables[i])) < 0){
1802 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1807 if(allocate_initial_states(f) < 0)
1808 return AVERROR(ENOMEM);
1810 for(i=0; i<f->quant_table_count; i++){
1811 if(get_rac(c, state)){
1812 for(j=0; j<f->context_count[i]; j++){
1813 for(k=0; k<CONTEXT_SIZE; k++){
1814 int pred= j ? f->initial_states[i][j-1][k] : 128;
1815 f->initial_states[i][j][k]= (pred+get_symbol(c, state2[k], 1))&0xFF;
1822 f->ec = get_symbol(c, state, 0);
1827 v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size);
1829 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!\n", v);
1830 return AVERROR_INVALIDDATA;
1837 static int read_header(FFV1Context *f){
1838 uint8_t state[CONTEXT_SIZE];
1839 int i, j, context_count = -1; //-1 to avoid warning
1840 RangeCoder * const c= &f->slice_context[0]->c;
1842 memset(state, 128, sizeof(state));
1845 unsigned v= get_symbol(c, state, 0);
1847 av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
1848 return AVERROR_INVALIDDATA;
1851 f->ac= f->avctx->coder_type= get_symbol(c, state, 0);
1853 for(i=1; i<256; i++){
1854 f->state_transition[i]= get_symbol(c, state, 1) + c->one_state[i];
1857 f->colorspace= get_symbol(c, state, 0); //YUV cs type
1859 f->avctx->bits_per_raw_sample= get_symbol(c, state, 0);
1860 f->chroma_planes= get_rac(c, state);
1861 f->chroma_h_shift= get_symbol(c, state, 0);
1862 f->chroma_v_shift= get_symbol(c, state, 0);
1863 f->transparency= get_rac(c, state);
1864 f->plane_count= 2 + f->transparency;
1867 if(f->colorspace==0){
1868 if(!f->transparency && !f->chroma_planes){
1869 if (f->avctx->bits_per_raw_sample<=8)
1870 f->avctx->pix_fmt= PIX_FMT_GRAY8;
1872 f->avctx->pix_fmt= PIX_FMT_GRAY16;
1873 }else if(f->avctx->bits_per_raw_sample<=8 && !f->transparency){
1874 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1875 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
1876 case 0x01: f->avctx->pix_fmt= PIX_FMT_YUV440P; break;
1877 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
1878 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
1879 case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
1880 case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
1882 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1885 }else if(f->avctx->bits_per_raw_sample<=8 && f->transparency){
1886 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1887 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUVA444P; break;
1888 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUVA422P; break;
1889 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUVA420P; break;
1891 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1894 }else if(f->avctx->bits_per_raw_sample==9) {
1896 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1897 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P9; break;
1898 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P9; break;
1899 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P9; break;
1901 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1904 }else if(f->avctx->bits_per_raw_sample==10) {
1906 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1907 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P10; break;
1908 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P10; break;
1909 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P10; break;
1911 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1915 switch(16*f->chroma_h_shift + f->chroma_v_shift){
1916 case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
1917 case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P16; break;
1918 case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P16; break;
1920 av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
1924 }else if(f->colorspace==1){
1925 if(f->chroma_h_shift || f->chroma_v_shift){
1926 av_log(f->avctx, AV_LOG_ERROR, "chroma subsampling not supported in this colorspace\n");
1929 if(f->avctx->bits_per_raw_sample==9)
1930 f->avctx->pix_fmt= PIX_FMT_GBRP9;
1931 else if(f->avctx->bits_per_raw_sample==10)
1932 f->avctx->pix_fmt= PIX_FMT_GBRP10;
1933 else if(f->avctx->bits_per_raw_sample==12)
1934 f->avctx->pix_fmt= PIX_FMT_GBRP12;
1935 else if(f->avctx->bits_per_raw_sample==14)
1936 f->avctx->pix_fmt= PIX_FMT_GBRP14;
1938 if(f->transparency) f->avctx->pix_fmt= PIX_FMT_RGB32;
1939 else f->avctx->pix_fmt= PIX_FMT_0RGB32;
1941 av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
1945 av_dlog(f->avctx, "%d %d %d\n",
1946 f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
1948 context_count= read_quant_tables(c, f->quant_table);
1949 if(context_count < 0){
1950 av_log(f->avctx, AV_LOG_ERROR, "read_quant_table error\n");
1953 }else if(f->version < 3){
1954 f->slice_count= get_symbol(c, state, 0);
1956 const uint8_t *p= c->bytestream_end;
1957 for(f->slice_count = 0; f->slice_count < MAX_SLICES && 3 < p - c->bytestream_start; f->slice_count++){
1958 int trailer = 3 + 5*!!f->ec;
1959 int size = AV_RB24(p-trailer);
1960 if(size + trailer > p - c->bytestream_start)
1962 p -= size + trailer;
1965 if(f->slice_count > (unsigned)MAX_SLICES || f->slice_count <= 0){
1966 av_log(f->avctx, AV_LOG_ERROR, "slice count %d is invalid\n", f->slice_count);
1970 for(j=0; j<f->slice_count; j++){
1971 FFV1Context *fs= f->slice_context[j];
1973 fs->packed_at_lsb= f->packed_at_lsb;
1975 fs->slice_damaged = 0;
1977 if(f->version == 2){
1978 fs->slice_x = get_symbol(c, state, 0) *f->width ;
1979 fs->slice_y = get_symbol(c, state, 0) *f->height;
1980 fs->slice_width =(get_symbol(c, state, 0)+1)*f->width + fs->slice_x;
1981 fs->slice_height=(get_symbol(c, state, 0)+1)*f->height + fs->slice_y;
1983 fs->slice_x /= f->num_h_slices;
1984 fs->slice_y /= f->num_v_slices;
1985 fs->slice_width = fs->slice_width /f->num_h_slices - fs->slice_x;
1986 fs->slice_height = fs->slice_height/f->num_v_slices - fs->slice_y;
1987 if((unsigned)fs->slice_width > f->width || (unsigned)fs->slice_height > f->height)
1989 if( (unsigned)fs->slice_x + (uint64_t)fs->slice_width > f->width
1990 || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height)
1994 for(i=0; i<f->plane_count; i++){
1995 PlaneContext * const p= &fs->plane[i];
1997 if(f->version == 2){
1998 int idx=get_symbol(c, state, 0);
1999 if(idx > (unsigned)f->quant_table_count){
2000 av_log(f->avctx, AV_LOG_ERROR, "quant_table_index out of range\n");
2003 p->quant_table_index= idx;
2004 memcpy(p->quant_table, f->quant_tables[idx], sizeof(p->quant_table));
2005 context_count= f->context_count[idx];
2007 memcpy(p->quant_table, f->quant_table, sizeof(p->quant_table));
2010 if(f->version <= 2){
2011 av_assert0(context_count>=0);
2012 if(p->context_count < context_count){
2013 av_freep(&p->state);
2014 av_freep(&p->vlc_state);
2016 p->context_count= context_count;
2023 static av_cold int decode_init(AVCodecContext *avctx)
2025 FFV1Context *f = avctx->priv_data;
2029 if(avctx->extradata && read_extra_header(f) < 0)
2032 if(init_slice_contexts(f) < 0)
2038 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
2039 const uint8_t *buf = avpkt->data;
2040 int buf_size = avpkt->size;
2041 FFV1Context *f = avctx->priv_data;
2042 RangeCoder * const c= &f->slice_context[0]->c;
2043 AVFrame * const p= &f->picture;
2045 uint8_t keystate= 128;
2046 const uint8_t *buf_p;
2048 AVFrame *picture = data;
2050 /* release previously stored data */
2052 avctx->release_buffer(avctx, p);
2054 ff_init_range_decoder(c, buf, buf_size);
2055 ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
2058 p->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
2059 if(get_rac(c, &keystate)){
2061 f->key_frame_ok = 0;
2062 if(read_header(f) < 0)
2064 f->key_frame_ok = 1;
2066 if (!f->key_frame_ok) {
2067 av_log(avctx, AV_LOG_ERROR, "Cant decode non keyframe without valid keyframe\n");
2068 return AVERROR_INVALIDDATA;
2073 p->reference= 3; //for error concealment
2074 if(avctx->get_buffer(avctx, p) < 0){
2075 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
2079 if(avctx->debug&FF_DEBUG_PICT_INFO)
2080 av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d bps:%d\n",
2081 f->version, p->key_frame, f->ac, f->ec, f->slice_count, f->avctx->bits_per_raw_sample);
2083 buf_p= buf + buf_size;
2084 for(i=f->slice_count-1; i>=0; i--){
2085 FFV1Context *fs= f->slice_context[i];
2086 int trailer = 3 + 5*!!f->ec;
2089 if(i || f->version>2) v = AV_RB24(buf_p-trailer)+trailer;
2090 else v = buf_p - c->bytestream_start;
2091 if(buf_p - c->bytestream_start < v){
2092 av_log(avctx, AV_LOG_ERROR, "Slice pointer chain broken\n");
2098 unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, buf_p, v);
2100 int64_t ts = avpkt->pts != AV_NOPTS_VALUE ? avpkt->pts : avpkt->dts;
2101 av_log(f->avctx, AV_LOG_ERROR, "CRC mismatch %X!", crc);
2102 if(ts != AV_NOPTS_VALUE && avctx->pkt_timebase.num) {
2103 av_log(f->avctx, AV_LOG_ERROR, "at %f seconds\n",ts*av_q2d(avctx->pkt_timebase));
2104 } else if(ts != AV_NOPTS_VALUE) {
2105 av_log(f->avctx, AV_LOG_ERROR, "at %"PRId64"\n", ts);
2107 av_log(f->avctx, AV_LOG_ERROR, "\n");
2109 fs->slice_damaged = 1;
2114 ff_init_range_decoder(&fs->c, buf_p, v);
2116 fs->c.bytestream_end = (uint8_t *)(buf_p + v);
2119 avctx->execute(avctx, decode_slice, &f->slice_context[0], NULL, f->slice_count, sizeof(void*));
2121 for(i=f->slice_count-1; i>=0; i--){
2122 FFV1Context *fs= f->slice_context[i];
2124 if(fs->slice_damaged && f->last_picture.data[0]){
2125 uint8_t *dst[4], *src[4];
2127 int sh = (j==1 || j==2) ? f->chroma_h_shift : 0;
2128 int sv = (j==1 || j==2) ? f->chroma_v_shift : 0;
2129 dst[j] = f->picture .data[j] + f->picture .linesize[j]*
2130 (fs->slice_y>>sv) + (fs->slice_x>>sh);
2131 src[j] = f->last_picture.data[j] + f->last_picture.linesize[j]*
2132 (fs->slice_y>>sv) + (fs->slice_x>>sh);
2134 av_image_copy(dst, f->picture.linesize, (const uint8_t **)src, f->last_picture.linesize,
2135 avctx->pix_fmt, fs->slice_width, fs->slice_height);
2139 f->picture_number++;
2142 *data_size = sizeof(AVFrame);
2144 FFSWAP(AVFrame, f->picture, f->last_picture);
2149 AVCodec ff_ffv1_decoder = {
2151 .type = AVMEDIA_TYPE_VIDEO,
2152 .id = AV_CODEC_ID_FFV1,
2153 .priv_data_size = sizeof(FFV1Context),
2154 .init = decode_init,
2155 .close = common_end,
2156 .decode = decode_frame,
2157 .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
2158 CODEC_CAP_SLICE_THREADS,
2159 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2162 #if CONFIG_FFV1_ENCODER
2164 #define OFFSET(x) offsetof(FFV1Context, x)
2165 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2166 static const AVOption options[] = {
2167 { "slicecrc", "Protect slices with CRCs", OFFSET(ec), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, VE},
2171 static const AVClass class = {
2172 .class_name = "ffv1 encoder",
2173 .item_name = av_default_item_name,
2175 .version = LIBAVUTIL_VERSION_INT,
2178 static const AVCodecDefault ffv1_defaults[] = {
2183 AVCodec ff_ffv1_encoder = {
2185 .type = AVMEDIA_TYPE_VIDEO,
2186 .id = AV_CODEC_ID_FFV1,
2187 .priv_data_size = sizeof(FFV1Context),
2188 .init = encode_init,
2189 .encode2 = encode_frame,
2190 .close = common_end,
2191 .capabilities = CODEC_CAP_SLICE_THREADS,
2192 .defaults = ffv1_defaults,
2193 .pix_fmts = (const enum PixelFormat[]){
2194 PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUVA422P, PIX_FMT_YUV444P,
2195 PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P,
2196 PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16,
2197 PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV444P9, PIX_FMT_YUV422P9,
2198 PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_YUV444P10,
2199 PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_GBRP9, PIX_FMT_GBRP10,
2200 PIX_FMT_GBRP12, PIX_FMT_GBRP14,
2203 .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
2204 .priv_class = &class,