2 * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3 * Copyright (C) 2006 Robert Edele <yartrebo@earthlink.net>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #ifndef AVCODEC_SNOW_H
23 #define AVCODEC_SNOW_H
25 #include "libavutil/motion_vector.h"
32 #include "rangecoder.h"
35 #include "mpegvideo.h"
44 #define QROOT (1<<QSHIFT)
45 #define LOSSLESS_QLOG -128
47 #define MAX_REF_FRAMES 8
49 #define LOG2_OBMC_MAX 8
50 #define OBMC_MAX (1<<(LOG2_OBMC_MAX))
51 typedef struct BlockNode{
52 int16_t mx; ///< Motion vector component X, see mv_scale
53 int16_t my; ///< Motion vector component Y, see mv_scale
54 uint8_t ref; ///< Reference frame index
55 uint8_t color[3]; ///< Color for intra
56 uint8_t type; ///< Bitfield of BLOCK_*
57 //#define TYPE_SPLIT 1
58 #define BLOCK_INTRA 1 ///< Intra block, inter otherwise
59 #define BLOCK_OPT 2 ///< Block needs no checks in this round of iterative motion estiation
60 //#define TYPE_NOCOLOR 4
61 uint8_t level; //FIXME merge into type?
64 static const BlockNode null_block= { //FIXME add border maybe
65 .color= {128,128,128},
73 #define LOG2_MB_SIZE 4
74 #define MB_SIZE (1<<LOG2_MB_SIZE)
75 #define ENCODER_EXTRA_BITS 4
78 typedef struct x_and_coeff{
83 typedef struct SubBand{
88 int qlog; ///< log(qscale)/log[2^(1/6)]
93 int stride_line; ///< Stride measured in lines, not pixels.
94 x_and_coeff * x_coeff;
95 struct SubBand *parent;
96 uint8_t state[/*7*2*/ 7 + 512][32];
102 SubBand band[MAX_DECOMPOSITIONS][4];
105 int8_t hcoeff[HTAPS_MAX/2];
110 int8_t last_hcoeff[HTAPS_MAX/2];
114 typedef struct SnowContext{
116 AVCodecContext *avctx;
121 VideoDSPContext vdsp;
122 H264QpelContext h264qpel;
123 MpegvideoEncDSPContext mpvencdsp;
125 AVFrame *input_picture; ///< new_picture with the internal linesizes
126 AVFrame *current_picture;
127 AVFrame *last_picture[MAX_REF_FRAMES];
128 uint8_t *halfpel_plane[MAX_REF_FRAMES][4][4];
129 AVFrame *mconly_picture;
130 // uint8_t q_context[16];
131 uint8_t header_state[32];
132 uint8_t block_state[128 + 32*128];
136 int spatial_decomposition_type;
137 int last_spatial_decomposition_type;
138 int temporal_decomposition_type;
139 int spatial_decomposition_count;
140 int last_spatial_decomposition_count;
141 int temporal_decomposition_count;
144 int16_t (*ref_mvs[MAX_REF_FRAMES])[2];
145 uint32_t *ref_scores[MAX_REF_FRAMES];
146 DWTELEM *spatial_dwt_buffer;
147 DWTELEM *temp_dwt_buffer;
148 IDWTELEM *spatial_idwt_buffer;
149 IDWTELEM *temp_idwt_buffer;
154 int spatial_scalability;
164 #define QBIAS_SHIFT 3
168 int last_block_max_depth;
170 Plane plane[MAX_PLANES];
172 #define ME_CACHE_SIZE 1024
173 unsigned me_cache[ME_CACHE_SIZE];
174 unsigned me_cache_generation;
180 int iterative_dia_size;
181 int scenechange_threshold;
183 MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MpegEncContext, so this will be removed then (FIXME/XXX)
186 uint8_t *emu_edge_buffer;
188 AVMotionVector *avmv;
190 uint64_t encoding_error[AV_NUM_DATA_POINTERS];
196 extern const uint8_t * const ff_obmc_tab[4];
197 extern uint8_t ff_qexp[QROOT];
198 extern int ff_scale_mv_ref[MAX_REF_FRAMES][MAX_REF_FRAMES];
200 /* C bits used by mmx/sse2/altivec */
202 static av_always_inline void snow_interleave_line_header(int * i, int width, IDWTELEM * low, IDWTELEM * high){
206 low[(*i)+1] = low[((*i)+1)>>1];
211 static av_always_inline void snow_interleave_line_footer(int * i, IDWTELEM * low, IDWTELEM * high){
212 for (; (*i)>=0; (*i)-=2){
213 low[(*i)+1] = high[(*i)>>1];
214 low[*i] = low[(*i)>>1];
218 static av_always_inline void snow_horizontal_compose_lift_lead_out(int i, IDWTELEM * dst, IDWTELEM * src, IDWTELEM * ref, int width, int w, int lift_high, int mul, int add, int shift){
220 dst[i] = src[i] - ((mul * (ref[i] + ref[i + 1]) + add) >> shift);
223 if((width^lift_high)&1){
224 dst[w] = src[w] - ((mul * 2 * ref[w] + add) >> shift);
228 static av_always_inline void snow_horizontal_compose_liftS_lead_out(int i, IDWTELEM * dst, IDWTELEM * src, IDWTELEM * ref, int width, int w){
230 dst[i] = src[i] + ((ref[i] + ref[(i+1)]+W_BO + 4 * src[i]) >> W_BS);
234 dst[w] = src[w] + ((2 * ref[w] + W_BO + 4 * src[w]) >> W_BS);
240 int ff_snow_common_init(AVCodecContext *avctx);
241 int ff_snow_common_init_after_header(AVCodecContext *avctx);
242 void ff_snow_common_end(SnowContext *s);
243 void ff_snow_release_buffer(AVCodecContext *avctx);
244 void ff_snow_reset_contexts(SnowContext *s);
245 int ff_snow_alloc_blocks(SnowContext *s);
246 int ff_snow_frame_start(SnowContext *s);
247 void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride,
248 int sx, int sy, int b_w, int b_h, const BlockNode *block,
249 int plane_index, int w, int h);
250 int ff_snow_get_buffer(SnowContext *s, AVFrame *frame);
251 /* common inline functions */
252 //XXX doublecheck all of them should stay inlined
254 static inline void pred_mv(SnowContext *s, int *mx, int *my, int ref,
255 const BlockNode *left, const BlockNode *top, const BlockNode *tr){
256 if(s->ref_frames == 1){
257 *mx = mid_pred(left->mx, top->mx, tr->mx);
258 *my = mid_pred(left->my, top->my, tr->my);
260 const int *scale = ff_scale_mv_ref[ref];
261 *mx = mid_pred((left->mx * scale[left->ref] + 128) >>8,
262 (top ->mx * scale[top ->ref] + 128) >>8,
263 (tr ->mx * scale[tr ->ref] + 128) >>8);
264 *my = mid_pred((left->my * scale[left->ref] + 128) >>8,
265 (top ->my * scale[top ->ref] + 128) >>8,
266 (tr ->my * scale[tr ->ref] + 128) >>8);
270 static av_always_inline int same_block(BlockNode *a, BlockNode *b){
271 if((a->type&BLOCK_INTRA) && (b->type&BLOCK_INTRA)){
272 return !((a->color[0] - b->color[0]) | (a->color[1] - b->color[1]) | (a->color[2] - b->color[2]));
274 return !((a->mx - b->mx) | (a->my - b->my) | (a->ref - b->ref) | ((a->type ^ b->type)&BLOCK_INTRA));
278 //FIXME name cleanup (b_w, block_w, b_width stuff)
279 //XXX should we really inline it?
280 static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index){
281 const int b_width = s->b_width << s->block_max_depth;
282 const int b_height= s->b_height << s->block_max_depth;
283 const int b_stride= b_width;
284 BlockNode *lt= &s->block[b_x + b_y*b_stride];
286 BlockNode *lb= lt+b_stride;
289 // When src_stride is large enough, it is possible to interleave the blocks.
290 // Otherwise the blocks are written sequentially in the tmp buffer.
291 int tmp_step= src_stride >= 7*MB_SIZE ? MB_SIZE : MB_SIZE*src_stride;
292 uint8_t *tmp = s->scratchbuf;
299 }else if(b_x + 1 >= b_width){
306 }else if(b_y + 1 >= b_height){
311 if(src_x<0){ //FIXME merge with prev & always round internal width up to *16
314 if(!sliced && !offset_dst)
322 obmc -= src_y*obmc_stride;
324 if(!sliced && !offset_dst)
325 dst -= src_y*dst_stride;
332 if(b_w<=0 || b_h<=0) return;
334 if(!sliced && offset_dst)
335 dst += src_x + src_y*dst_stride;
336 dst8+= src_x + src_y*src_stride;
337 // src += src_x + src_y*src_stride;
339 ptmp= tmp + 3*tmp_step;
342 ff_snow_pred_block(s, block[0], tmp, src_stride, src_x, src_y, b_w, b_h, lt, plane_index, w, h);
344 if(same_block(lt, rt)){
349 ff_snow_pred_block(s, block[1], tmp, src_stride, src_x, src_y, b_w, b_h, rt, plane_index, w, h);
352 if(same_block(lt, lb)){
354 }else if(same_block(rt, lb)){
359 ff_snow_pred_block(s, block[2], tmp, src_stride, src_x, src_y, b_w, b_h, lb, plane_index, w, h);
362 if(same_block(lt, rb) ){
364 }else if(same_block(rt, rb)){
366 }else if(same_block(lb, rb)){
370 ff_snow_pred_block(s, block[3], tmp, src_stride, src_x, src_y, b_w, b_h, rb, plane_index, w, h);
373 s->dwt.inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
375 for(y=0; y<b_h; y++){
376 //FIXME ugly misuse of obmc_stride
377 const uint8_t *obmc1= obmc + y*obmc_stride;
378 const uint8_t *obmc2= obmc1+ (obmc_stride>>1);
379 const uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
380 const uint8_t *obmc4= obmc3+ (obmc_stride>>1);
381 for(x=0; x<b_w; x++){
382 int v= obmc1[x] * block[3][x + y*src_stride]
383 +obmc2[x] * block[2][x + y*src_stride]
384 +obmc3[x] * block[1][x + y*src_stride]
385 +obmc4[x] * block[0][x + y*src_stride];
387 v <<= 8 - LOG2_OBMC_MAX;
392 v += dst[x + y*dst_stride];
393 v = (v + (1<<(FRAC_BITS-1))) >> FRAC_BITS;
394 if(v&(~255)) v= ~(v>>31);
395 dst8[x + y*src_stride] = v;
397 dst[x + y*dst_stride] -= v;
404 static av_always_inline void predict_slice(SnowContext *s, IDWTELEM *buf, int plane_index, int add, int mb_y){
405 Plane *p= &s->plane[plane_index];
406 const int mb_w= s->b_width << s->block_max_depth;
407 const int mb_h= s->b_height << s->block_max_depth;
409 int block_size = MB_SIZE >> s->block_max_depth;
410 int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
411 int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
412 const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
413 const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
414 int ref_stride= s->current_picture->linesize[plane_index];
415 uint8_t *dst8= s->current_picture->data[plane_index];
418 av_assert2(s->chroma_h_shift == s->chroma_v_shift); // obmc params assume squares
419 if(s->keyframe || (s->avctx->debug&512)){
424 for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
426 int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
428 if(v&(~255)) v= ~(v>>31);
429 dst8[x + y*ref_stride]= v;
433 for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
435 buf[x + y*w]-= 128<<FRAC_BITS;
443 for(mb_x=0; mb_x<=mb_w; mb_x++){
444 add_yblock(s, 0, NULL, buf, dst8, obmc,
445 block_w*mb_x - block_w/2,
446 block_h*mb_y - block_h/2,
449 w, ref_stride, obmc_stride,
451 add, 1, plane_index);
455 static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add){
456 const int mb_h= s->b_height << s->block_max_depth;
458 for(mb_y=0; mb_y<=mb_h; mb_y++)
459 predict_slice(s, buf, plane_index, add, mb_y);
462 static inline void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type){
463 const int w= s->b_width << s->block_max_depth;
464 const int rem_depth= s->block_max_depth - level;
465 const int index= (x + y*w) << rem_depth;
466 const int block_w= 1<<rem_depth;
467 const int block_h= 1<<rem_depth; //FIXME "w!=h"
480 for(j=0; j<block_h; j++){
481 for(i=0; i<block_w; i++){
482 s->block[index + i + j*w]= block;
487 static inline void init_ref(MotionEstContext *c, uint8_t *src[3], uint8_t *ref[3], uint8_t *ref2[3], int x, int y, int ref_index){
488 SnowContext *s = c->avctx->priv_data;
489 const int offset[3]= {
491 ((y*c->uvstride + x)>>s->chroma_h_shift),
492 ((y*c->uvstride + x)>>s->chroma_h_shift),
496 c->src[0][i]= src [i];
497 c->ref[0][i]= ref [i] + offset[i];
499 av_assert2(!ref_index);
503 /* bitstream functions */
505 extern const int8_t ff_quant3bA[256];
507 #define QEXPSHIFT (7-FRAC_BITS+8) //FIXME try to change this to 0
509 static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
513 const int a= FFABS(v);
514 const int e= av_log2(a);
515 const int el= FFMIN(e, 10);
516 put_rac(c, state+0, 0);
519 put_rac(c, state+1+i, 1); //1..10
522 put_rac(c, state+1+9, 1); //1..10
524 put_rac(c, state+1+FFMIN(i,9), 0);
526 for(i=e-1; i>=el; i--){
527 put_rac(c, state+22+9, (a>>i)&1); //22..31
530 put_rac(c, state+22+i, (a>>i)&1); //22..31
534 put_rac(c, state+11 + el, v < 0); //11..21
536 put_rac(c, state+0, 1);
540 static inline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
541 if(get_rac(c, state+0))
547 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
550 return AVERROR_INVALIDDATA;
554 for(i=e-1; i>=0; i--){
555 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
558 e= -(is_signed && get_rac(c, state+11 + FFMIN(e,10))); //11..21
563 static inline void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2){
565 int r= log2>=0 ? 1<<log2 : 1;
568 av_assert2(log2>=-4);
571 put_rac(c, state+4+log2, 1);
576 put_rac(c, state+4+log2, 0);
578 for(i=log2-1; i>=0; i--){
579 put_rac(c, state+31-i, (v>>i)&1);
583 static inline int get_symbol2(RangeCoder *c, uint8_t *state, int log2){
585 int r= log2>=0 ? 1<<log2 : 1;
588 av_assert2(log2>=-4);
590 while(log2<28 && get_rac(c, state+4+log2)){
596 for(i=log2-1; i>=0; i--){
597 v+= get_rac(c, state+31-i)<<i;
603 static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, int orientation){
604 const int w= b->width;
605 const int h= b->height;
609 x_and_coeff *xc= b->x_coeff;
610 x_and_coeff *prev_xc= NULL;
611 x_and_coeff *prev2_xc= xc;
612 x_and_coeff *parent_xc= parent ? parent->x_coeff : NULL;
613 x_and_coeff *prev_parent_xc= parent_xc;
615 runs= get_symbol2(&s->c, b->state[30], 0);
616 if(runs-- > 0) run= get_symbol2(&s->c, b->state[1], 3);
623 if(y && prev_xc->x == 0){
635 if(prev_xc->x == x + 1)
641 if(x>>1 > parent_xc->x){
644 if(x>>1 == parent_xc->x){
648 if(/*ll|*/l|lt|t|rt|p){
649 int context= av_log2(/*FFABS(ll) + */3*(l>>1) + (lt>>1) + (t&~1) + (rt>>1) + (p>>1));
651 v=get_rac(&s->c, &b->state[0][context]);
653 v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
654 v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
655 if ((uint16_t)v != v) {
656 av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
664 if(runs-- > 0) run= get_symbol2(&s->c, b->state[1], 3);
666 v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
667 v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
668 if ((uint16_t)v != v) {
669 av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
679 av_assert2(run >= 0);
680 if(y) max_run= FFMIN(run, prev_xc->x - x - 2);
681 else max_run= FFMIN(run, w-x-1);
683 max_run= FFMIN(max_run, 2*parent_xc->x - x - 1);
684 av_assert2(max_run >= 0 && max_run <= run);
691 (xc++)->x= w+1; //end marker
697 while(parent_xc->x != parent->width+1)
700 prev_parent_xc= parent_xc;
702 parent_xc= prev_parent_xc;
707 (xc++)->x= w+1; //end marker
710 #endif /* AVCODEC_SNOW_H */