2 * Error resilience / concealment
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * Error resilience / concealment.
32 #include "mpegvideo.h"
34 #include "rectangle.h"
38 * H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264)
39 * but error concealment must support both h264 and h263 thus we must undo this
43 static void decode_mb(MpegEncContext *s, int ref){
44 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
45 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
46 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
48 if(CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264){
49 H264Context *h= (void*)s;
50 h->mb_xy= s->mb_x + s->mb_y*s->mb_stride;
51 memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
53 if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added
55 fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
56 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
57 fill_rectangle(h->mv_cache[0][ scan8[0] ], 4, 4, 8, pack16to32(s->mv[0][0][0],s->mv[0][0][1]), 4);
59 ff_h264_hl_decode_mb(h);
62 MPV_decode_mb(s, s->block);
67 * @param stride the number of MVs to get to the next row
68 * @param mv_step the number of MVs per row or column in a macroblock
70 static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride){
71 if(s->codec_id == CODEC_ID_H264){
72 H264Context *h= (void*)s;
73 assert(s->quarter_sample);
78 *stride= s->b8_stride;
83 * Replace the current MB with a flat dc-only version.
85 static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
87 int dc, dcu, dcv, y, i;
89 dc= s->dc_val[0][mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*s->b8_stride];
91 else if(dc>2040) dc=2040;
95 dest_y[x + (i&1)*8 + (y + (i>>1)*8)*s->linesize]= dc/8;
99 dcu = s->dc_val[1][mb_x + mb_y*s->mb_stride];
100 dcv = s->dc_val[2][mb_x + mb_y*s->mb_stride];
102 else if(dcu>2040) dcu=2040;
104 else if(dcv>2040) dcv=2040;
108 dest_cb[x + y*(s->uvlinesize)]= dcu/8;
109 dest_cr[x + y*(s->uvlinesize)]= dcv/8;
114 static void filter181(int16_t *data, int width, int height, int stride){
117 /* horizontal filter */
118 for(y=1; y<height-1; y++){
119 int prev_dc= data[0 + y*stride];
121 for(x=1; x<width-1; x++){
125 + data[x + y*stride]*8
126 - data[x + 1 + y*stride];
127 dc= (dc*10923 + 32768)>>16;
128 prev_dc= data[x + y*stride];
129 data[x + y*stride]= dc;
133 /* vertical filter */
134 for(x=1; x<width-1; x++){
135 int prev_dc= data[x];
137 for(y=1; y<height-1; y++){
141 + data[x + y *stride]*8
142 - data[x + (y+1)*stride];
143 dc= (dc*10923 + 32768)>>16;
144 prev_dc= data[x + y*stride];
145 data[x + y*stride]= dc;
151 * guess the dc of blocks which do not have an undamaged dc
152 * @param w width in 8 pixel blocks
153 * @param h height in 8 pixel blocks
155 static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, int is_luma){
157 int16_t (*col )[4] = av_malloc(stride*h*sizeof( int16_t)*4);
158 uint16_t (*dist)[4] = av_malloc(stride*h*sizeof(uint16_t)*4);
160 for(b_y=0; b_y<h; b_y++){
163 for(b_x=0; b_x<w; b_x++){
164 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
165 int error_j= s->error_status_table[mb_index_j];
166 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
167 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
168 color= dc[b_x + b_y*stride];
171 col [b_x + b_y*stride][1]= color;
172 dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999;
176 for(b_x=w-1; b_x>=0; b_x--){
177 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
178 int error_j= s->error_status_table[mb_index_j];
179 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
180 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
181 color= dc[b_x + b_y*stride];
184 col [b_x + b_y*stride][0]= color;
185 dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999;
188 for(b_x=0; b_x<w; b_x++){
191 for(b_y=0; b_y<h; b_y++){
192 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
193 int error_j= s->error_status_table[mb_index_j];
194 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
195 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
196 color= dc[b_x + b_y*stride];
199 col [b_x + b_y*stride][3]= color;
200 dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999;
204 for(b_y=h-1; b_y>=0; b_y--){
205 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
206 int error_j= s->error_status_table[mb_index_j];
207 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
208 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
209 color= dc[b_x + b_y*stride];
212 col [b_x + b_y*stride][2]= color;
213 dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999;
217 for(b_y=0; b_y<h; b_y++){
218 for(b_x=0; b_x<w; b_x++){
219 int mb_index, error, j;
220 int64_t guess, weight_sum;
222 mb_index= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
224 error= s->error_status_table[mb_index];
226 if(IS_INTER(s->current_picture.f.mb_type[mb_index])) continue; //inter
227 if(!(error&ER_DC_ERROR)) continue; //dc-ok
233 int64_t weight= 256*256*256*16/dist[b_x + b_y*stride][j];
234 guess+= weight*(int64_t)col[b_x + b_y*stride][j];
237 guess= (guess + weight_sum/2) / weight_sum;
239 dc[b_x + b_y*stride]= guess;
247 * simple horizontal deblocking filter used for error resilience
248 * @param w width in 8 pixel blocks
249 * @param h height in 8 pixel blocks
251 static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
252 int b_x, b_y, mvx_stride, mvy_stride;
253 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
254 set_mv_strides(s, &mvx_stride, &mvy_stride);
255 mvx_stride >>= is_luma;
256 mvy_stride *= mvx_stride;
258 for(b_y=0; b_y<h; b_y++){
259 for(b_x=0; b_x<w-1; b_x++){
261 int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride];
262 int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
263 int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
264 int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
265 int left_damage = left_status&ER_MB_ERROR;
266 int right_damage= right_status&ER_MB_ERROR;
267 int offset= b_x*8 + b_y*stride*8;
268 int16_t *left_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ];
269 int16_t *right_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
271 if(!(left_damage||right_damage)) continue; // both undamaged
273 if( (!left_intra) && (!right_intra)
274 && FFABS(left_mv[0]-right_mv[0]) + FFABS(left_mv[1]+right_mv[1]) < 2) continue;
279 a= dst[offset + 7 + y*stride] - dst[offset + 6 + y*stride];
280 b= dst[offset + 8 + y*stride] - dst[offset + 7 + y*stride];
281 c= dst[offset + 9 + y*stride] - dst[offset + 8 + y*stride];
283 d= FFABS(b) - ((FFABS(a) + FFABS(c) + 1)>>1);
289 if(!(left_damage && right_damage))
293 dst[offset + 7 + y*stride] = cm[dst[offset + 7 + y*stride] + ((d*7)>>4)];
294 dst[offset + 6 + y*stride] = cm[dst[offset + 6 + y*stride] + ((d*5)>>4)];
295 dst[offset + 5 + y*stride] = cm[dst[offset + 5 + y*stride] + ((d*3)>>4)];
296 dst[offset + 4 + y*stride] = cm[dst[offset + 4 + y*stride] + ((d*1)>>4)];
299 dst[offset + 8 + y*stride] = cm[dst[offset + 8 + y*stride] - ((d*7)>>4)];
300 dst[offset + 9 + y*stride] = cm[dst[offset + 9 + y*stride] - ((d*5)>>4)];
301 dst[offset + 10+ y*stride] = cm[dst[offset +10 + y*stride] - ((d*3)>>4)];
302 dst[offset + 11+ y*stride] = cm[dst[offset +11 + y*stride] - ((d*1)>>4)];
310 * simple vertical deblocking filter used for error resilience
311 * @param w width in 8 pixel blocks
312 * @param h height in 8 pixel blocks
314 static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
315 int b_x, b_y, mvx_stride, mvy_stride;
316 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
317 set_mv_strides(s, &mvx_stride, &mvy_stride);
318 mvx_stride >>= is_luma;
319 mvy_stride *= mvx_stride;
321 for(b_y=0; b_y<h-1; b_y++){
322 for(b_x=0; b_x<w; b_x++){
324 int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride];
325 int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
326 int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
327 int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
328 int top_damage = top_status&ER_MB_ERROR;
329 int bottom_damage= bottom_status&ER_MB_ERROR;
330 int offset= b_x*8 + b_y*stride*8;
331 int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
332 int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
334 if(!(top_damage||bottom_damage)) continue; // both undamaged
336 if( (!top_intra) && (!bottom_intra)
337 && FFABS(top_mv[0]-bottom_mv[0]) + FFABS(top_mv[1]+bottom_mv[1]) < 2) continue;
342 a= dst[offset + x + 7*stride] - dst[offset + x + 6*stride];
343 b= dst[offset + x + 8*stride] - dst[offset + x + 7*stride];
344 c= dst[offset + x + 9*stride] - dst[offset + x + 8*stride];
346 d= FFABS(b) - ((FFABS(a) + FFABS(c)+1)>>1);
352 if(!(top_damage && bottom_damage))
356 dst[offset + x + 7*stride] = cm[dst[offset + x + 7*stride] + ((d*7)>>4)];
357 dst[offset + x + 6*stride] = cm[dst[offset + x + 6*stride] + ((d*5)>>4)];
358 dst[offset + x + 5*stride] = cm[dst[offset + x + 5*stride] + ((d*3)>>4)];
359 dst[offset + x + 4*stride] = cm[dst[offset + x + 4*stride] + ((d*1)>>4)];
362 dst[offset + x + 8*stride] = cm[dst[offset + x + 8*stride] - ((d*7)>>4)];
363 dst[offset + x + 9*stride] = cm[dst[offset + x + 9*stride] - ((d*5)>>4)];
364 dst[offset + x + 10*stride] = cm[dst[offset + x + 10*stride] - ((d*3)>>4)];
365 dst[offset + x + 11*stride] = cm[dst[offset + x + 11*stride] - ((d*1)>>4)];
372 static void guess_mv(MpegEncContext *s){
373 uint8_t *fixed = av_malloc(s->mb_stride * s->mb_height);
376 #define MV_UNCHANGED 1
377 const int mb_stride = s->mb_stride;
378 const int mb_width = s->mb_width;
379 const int mb_height= s->mb_height;
380 int i, depth, num_avail;
381 int mb_x, mb_y, mot_step, mot_stride;
383 set_mv_strides(s, &mot_step, &mot_stride);
386 for(i=0; i<s->mb_num; i++){
387 const int mb_xy= s->mb_index2xy[ i ];
389 int error= s->error_status_table[mb_xy];
391 if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
392 if(!(error&ER_MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV
397 else if(s->last_picture.f.data[0] && s->last_picture.f.motion_val[0]){
398 const int mb_y= mb_xy / s->mb_stride;
399 const int mb_x= mb_xy % s->mb_stride;
400 const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
401 s->current_picture.f.motion_val[0][mot_index][0]= s->last_picture.f.motion_val[0][mot_index][0];
402 s->current_picture.f.motion_val[0][mot_index][1]= s->last_picture.f.motion_val[0][mot_index][1];
403 s->current_picture.f.ref_index[0][4*mb_xy] = s->last_picture.f.ref_index[0][4*mb_xy];
407 if((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width/2){
408 for(mb_y=0; mb_y<s->mb_height; mb_y++){
409 for(mb_x=0; mb_x<s->mb_width; mb_x++){
410 const int mb_xy= mb_x + mb_y*s->mb_stride;
412 if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) continue;
413 if(!(s->error_status_table[mb_xy]&ER_MV_ERROR)) continue;
415 s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
417 s->mv_type = MV_TYPE_16X16;
420 s->dsp.clear_blocks(s->block[0]);
432 for(depth=0;; depth++){
433 int changed, pass, none_left;
437 for(pass=0; (changed || pass<2) && pass<10; pass++){
442 for(mb_y=0; mb_y<s->mb_height; mb_y++){
443 for(mb_x=0; mb_x<s->mb_width; mb_x++){
444 const int mb_xy= mb_x + mb_y*s->mb_stride;
445 int mv_predictor[8][2]={{0}};
449 int best_score=256*256*256*64;
451 const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
452 int prev_x, prev_y, prev_ref;
454 if((mb_x^mb_y^pass)&1) continue;
456 if(fixed[mb_xy]==MV_FROZEN) continue;
457 assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
458 assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
461 if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1;
462 if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_FROZEN) j=1;
463 if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_FROZEN) j=1;
464 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_FROZEN) j=1;
468 if(mb_x>0 && fixed[mb_xy-1 ]==MV_CHANGED) j=1;
469 if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_CHANGED) j=1;
470 if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_CHANGED) j=1;
471 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_CHANGED) j=1;
472 if(j==0 && pass>1) continue;
476 if(mb_x>0 && fixed[mb_xy-1]){
477 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_step][0];
478 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_step][1];
479 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-1)];
482 if(mb_x+1<mb_width && fixed[mb_xy+1]){
483 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_step][0];
484 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_step][1];
485 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+1)];
488 if(mb_y>0 && fixed[mb_xy-mb_stride]){
489 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][0];
490 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][1];
491 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-s->mb_stride)];
494 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
495 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][0];
496 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][1];
497 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+s->mb_stride)];
500 if(pred_count==0) continue;
503 int sum_x=0, sum_y=0, sum_r=0;
504 int max_x, max_y, min_x, min_y, max_r, min_r;
506 for(j=0; j<pred_count; j++){
507 sum_x+= mv_predictor[j][0];
508 sum_y+= mv_predictor[j][1];
510 if(j && ref[j] != ref[j-1])
511 goto skip_mean_and_median;
515 mv_predictor[pred_count][0] = sum_x/j;
516 mv_predictor[pred_count][1] = sum_y/j;
517 ref [pred_count] = sum_r/j;
521 min_y= min_x= min_r= 99999;
522 max_y= max_x= max_r=-99999;
524 min_x=min_y=max_x=max_y=min_r=max_r=0;
526 for(j=0; j<pred_count; j++){
527 max_x= FFMAX(max_x, mv_predictor[j][0]);
528 max_y= FFMAX(max_y, mv_predictor[j][1]);
529 max_r= FFMAX(max_r, ref[j]);
530 min_x= FFMIN(min_x, mv_predictor[j][0]);
531 min_y= FFMIN(min_y, mv_predictor[j][1]);
532 min_r= FFMIN(min_r, ref[j]);
534 mv_predictor[pred_count+1][0] = sum_x - max_x - min_x;
535 mv_predictor[pred_count+1][1] = sum_y - max_y - min_y;
536 ref [pred_count+1] = sum_r - max_r - min_r;
539 mv_predictor[pred_count+1][0] /= 2;
540 mv_predictor[pred_count+1][1] /= 2;
541 ref [pred_count+1] /= 2;
545 skip_mean_and_median:
550 if (!fixed[mb_xy] && 0) {
551 if (s->avctx->codec_id == CODEC_ID_H264) {
554 ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
557 if (!s->last_picture.f.motion_val[0] ||
558 !s->last_picture.f.ref_index[0])
560 prev_x = s->last_picture.f.motion_val[0][mot_index][0];
561 prev_y = s->last_picture.f.motion_val[0][mot_index][1];
562 prev_ref = s->last_picture.f.ref_index[0][4*mb_xy];
564 prev_x = s->current_picture.f.motion_val[0][mot_index][0];
565 prev_y = s->current_picture.f.motion_val[0][mot_index][1];
566 prev_ref = s->current_picture.f.ref_index[0][4*mb_xy];
570 mv_predictor[pred_count][0]= prev_x;
571 mv_predictor[pred_count][1]= prev_y;
572 ref [pred_count] = prev_ref;
576 s->mv_dir = MV_DIR_FORWARD;
578 s->mv_type = MV_TYPE_16X16;
581 s->dsp.clear_blocks(s->block[0]);
586 for(j=0; j<pred_count; j++){
588 uint8_t *src = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
590 s->current_picture.f.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0];
591 s->current_picture.f.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1];
593 if(ref[j]<0) //predictor intra or otherwise not available
596 decode_mb(s, ref[j]);
598 if(mb_x>0 && fixed[mb_xy-1]){
601 score += FFABS(src[k*s->linesize-1 ]-src[k*s->linesize ]);
603 if(mb_x+1<mb_width && fixed[mb_xy+1]){
606 score += FFABS(src[k*s->linesize+15]-src[k*s->linesize+16]);
608 if(mb_y>0 && fixed[mb_xy-mb_stride]){
611 score += FFABS(src[k-s->linesize ]-src[k ]);
613 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
616 score += FFABS(src[k+s->linesize*15]-src[k+s->linesize*16]);
619 if(score <= best_score){ // <= will favor the last MV
624 score_sum+= best_score;
625 s->mv[0][0][0]= mv_predictor[best_pred][0];
626 s->mv[0][0][1]= mv_predictor[best_pred][1];
628 for(i=0; i<mot_step; i++)
629 for(j=0; j<mot_step; j++){
630 s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
631 s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
634 decode_mb(s, ref[best_pred]);
637 if(s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y){
638 fixed[mb_xy]=MV_CHANGED;
641 fixed[mb_xy]=MV_UNCHANGED;
645 // printf(".%d/%d", changed, score_sum); fflush(stdout);
651 for(i=0; i<s->mb_num; i++){
652 int mb_xy= s->mb_index2xy[i];
654 fixed[mb_xy]=MV_FROZEN;
656 // printf(":"); fflush(stdout);
662 static int is_intra_more_likely(MpegEncContext *s){
663 int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
665 if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) return 1; //no previous frame available -> use spatial prediction
668 for(i=0; i<s->mb_num; i++){
669 const int mb_xy= s->mb_index2xy[i];
670 const int error= s->error_status_table[mb_xy];
671 if(!((error&ER_DC_ERROR) && (error&ER_MV_ERROR)))
675 if(s->codec_id == CODEC_ID_H264){
676 H264Context *h= (void*)s;
677 if (h->list_count <= 0 || h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0])
681 if(undamaged_count < 5) return 0; //almost all MBs damaged -> use temporal prediction
683 //prevent dsp.sad() check, that requires access to the image
684 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == AV_PICTURE_TYPE_I)
687 skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs
691 for(mb_y= 0; mb_y<s->mb_height-1; mb_y++){
692 for(mb_x= 0; mb_x<s->mb_width; mb_x++){
694 const int mb_xy= mb_x + mb_y*s->mb_stride;
696 error= s->error_status_table[mb_xy];
697 if((error&ER_DC_ERROR) && (error&ER_MV_ERROR))
698 continue; //skip damaged
701 if((j%skip_amount) != 0) continue; //skip a few to speed things up
703 if(s->pict_type==AV_PICTURE_TYPE_I){
704 uint8_t *mb_ptr = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
705 uint8_t *last_mb_ptr= s->last_picture.f.data [0] + mb_x*16 + mb_y*16*s->linesize;
707 if (s->avctx->codec_id == CODEC_ID_H264) {
710 ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
713 is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
714 // FIXME need await_progress() here
715 is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
717 if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
724 //printf("is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
725 return is_intra_likely > 0;
728 void ff_er_frame_start(MpegEncContext *s){
729 if(!s->err_recognition) return;
731 memset(s->error_status_table, ER_MB_ERROR|VP_START|ER_MB_END, s->mb_stride*s->mb_height*sizeof(uint8_t));
732 s->error_count= 3*s->mb_num;
733 s->error_occurred = 0;
738 * @param endx x component of the last macroblock, can be -1 for the last of the previous line
739 * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is assumed that no earlier end or
740 * error of the same type occurred
742 void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status){
743 const int start_i= av_clip(startx + starty * s->mb_width , 0, s->mb_num-1);
744 const int end_i = av_clip(endx + endy * s->mb_width , 0, s->mb_num);
745 const int start_xy= s->mb_index2xy[start_i];
746 const int end_xy = s->mb_index2xy[end_i];
749 if(s->avctx->hwaccel)
752 if(start_i > end_i || start_xy > end_xy){
753 av_log(s->avctx, AV_LOG_ERROR, "internal error, slice end before start\n");
757 if(!s->err_recognition) return;
760 if(status & (ER_AC_ERROR|ER_AC_END)){
761 mask &= ~(ER_AC_ERROR|ER_AC_END);
762 s->error_count -= end_i - start_i + 1;
764 if(status & (ER_DC_ERROR|ER_DC_END)){
765 mask &= ~(ER_DC_ERROR|ER_DC_END);
766 s->error_count -= end_i - start_i + 1;
768 if(status & (ER_MV_ERROR|ER_MV_END)){
769 mask &= ~(ER_MV_ERROR|ER_MV_END);
770 s->error_count -= end_i - start_i + 1;
773 if(status & ER_MB_ERROR) {
774 s->error_occurred = 1;
775 s->error_count= INT_MAX;
779 memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t));
782 for(i=start_xy; i<end_xy; i++){
783 s->error_status_table[ i ] &= mask;
787 if(end_i == s->mb_num)
788 s->error_count= INT_MAX;
790 s->error_status_table[end_xy] &= mask;
791 s->error_status_table[end_xy] |= status;
794 s->error_status_table[start_xy] |= VP_START;
796 if(start_xy > 0 && s->avctx->thread_count <= 1 && s->avctx->skip_top*s->mb_width < start_i){
797 int prev_status= s->error_status_table[ s->mb_index2xy[start_i - 1] ];
799 prev_status &= ~ VP_START;
800 if(prev_status != (ER_MV_END|ER_DC_END|ER_AC_END)) s->error_count= INT_MAX;
804 void ff_er_frame_end(MpegEncContext *s){
805 int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
807 int threshold_part[4]= {100,100,100};
810 int size = s->b8_stride * 2 * s->mb_height;
811 Picture *pic= s->current_picture_ptr;
813 if(!s->err_recognition || s->error_count==0 || s->avctx->lowres ||
815 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
816 s->picture_structure != PICT_FRAME || // we do not support ER of field pictures yet, though it should not crash if enabled
817 s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
819 if (s->current_picture.f.motion_val[0] == NULL) {
820 av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
823 pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
824 pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t));
825 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
827 pic->f.motion_subsample_log2 = 3;
828 s->current_picture= *s->current_picture_ptr;
831 if(s->avctx->debug&FF_DEBUG_ER){
832 for(mb_y=0; mb_y<s->mb_height; mb_y++){
833 for(mb_x=0; mb_x<s->mb_width; mb_x++){
834 int status= s->error_status_table[mb_x + mb_y*s->mb_stride];
836 av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
838 av_log(s->avctx, AV_LOG_DEBUG, "\n");
843 /* handle overlapping slices */
844 for(error_type=1; error_type<=3; error_type++){
847 for(i=s->mb_num-1; i>=0; i--){
848 const int mb_xy= s->mb_index2xy[i];
849 int error= s->error_status_table[mb_xy];
851 if(error&(1<<error_type))
853 if(error&(8<<error_type))
857 s->error_status_table[mb_xy]|= 1<<error_type;
865 /* handle slices with partitions of different length */
866 if(s->partitioned_frame){
869 for(i=s->mb_num-1; i>=0; i--){
870 const int mb_xy= s->mb_index2xy[i];
871 int error= s->error_status_table[mb_xy];
875 if((error&ER_MV_END) || (error&ER_DC_END) || (error&ER_AC_ERROR))
879 s->error_status_table[mb_xy]|= ER_AC_ERROR;
886 /* handle missing slices */
887 if(s->err_recognition&AV_EF_EXPLODE){
890 for(i=s->mb_num-2; i>=s->mb_width+100; i--){ //FIXME +100 hack
891 const int mb_xy= s->mb_index2xy[i];
892 int error1= s->error_status_table[mb_xy ];
893 int error2= s->error_status_table[s->mb_index2xy[i+1]];
898 if( error2==(VP_START|ER_MB_ERROR|ER_MB_END)
899 && error1!=(VP_START|ER_MB_ERROR|ER_MB_END)
900 && ((error1&ER_AC_END) || (error1&ER_DC_END) || (error1&ER_MV_END))){ //end & uninit
905 s->error_status_table[mb_xy]|= ER_MB_ERROR;
910 /* backward mark errors */
912 for(error_type=1; error_type<=3; error_type++){
913 for(i=s->mb_num-1; i>=0; i--){
914 const int mb_xy= s->mb_index2xy[i];
915 int error= s->error_status_table[mb_xy];
917 if(!s->mbskip_table[mb_xy]) //FIXME partition specific
919 if(error&(1<<error_type))
922 if(s->partitioned_frame){
923 if(distance < threshold_part[error_type-1])
924 s->error_status_table[mb_xy]|= 1<<error_type;
926 if(distance < threshold)
927 s->error_status_table[mb_xy]|= 1<<error_type;
936 /* forward mark errors */
938 for(i=0; i<s->mb_num; i++){
939 const int mb_xy= s->mb_index2xy[i];
940 int old_error= s->error_status_table[mb_xy];
942 if(old_error&VP_START)
943 error= old_error& ER_MB_ERROR;
945 error|= old_error& ER_MB_ERROR;
946 s->error_status_table[mb_xy]|= error;
950 /* handle not partitioned case */
951 if(!s->partitioned_frame){
952 for(i=0; i<s->mb_num; i++){
953 const int mb_xy= s->mb_index2xy[i];
954 error= s->error_status_table[mb_xy];
955 if(error&ER_MB_ERROR)
957 s->error_status_table[mb_xy]= error;
962 dc_error= ac_error= mv_error=0;
963 for(i=0; i<s->mb_num; i++){
964 const int mb_xy= s->mb_index2xy[i];
965 error= s->error_status_table[mb_xy];
966 if(error&ER_DC_ERROR) dc_error ++;
967 if(error&ER_AC_ERROR) ac_error ++;
968 if(error&ER_MV_ERROR) mv_error ++;
970 av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors\n", dc_error, ac_error, mv_error);
972 is_intra_likely= is_intra_more_likely(s);
974 /* set unknown mb-type to most likely */
975 for(i=0; i<s->mb_num; i++){
976 const int mb_xy= s->mb_index2xy[i];
977 error= s->error_status_table[mb_xy];
978 if(!((error&ER_DC_ERROR) && (error&ER_MV_ERROR)))
982 s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
984 s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
987 // change inter to intra blocks if no reference frames are available
988 if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
989 for(i=0; i<s->mb_num; i++){
990 const int mb_xy= s->mb_index2xy[i];
991 if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
992 s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
995 /* handle inter blocks with damaged AC */
996 for(mb_y=0; mb_y<s->mb_height; mb_y++){
997 for(mb_x=0; mb_x<s->mb_width; mb_x++){
998 const int mb_xy= mb_x + mb_y * s->mb_stride;
999 const int mb_type= s->current_picture.f.mb_type[mb_xy];
1000 int dir = !s->last_picture.f.data[0];
1001 error= s->error_status_table[mb_xy];
1003 if(IS_INTRA(mb_type)) continue; //intra
1004 if(error&ER_MV_ERROR) continue; //inter with damaged MV
1005 if(!(error&ER_AC_ERROR)) continue; //undamaged inter
1007 s->mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
1010 if(IS_8X8(mb_type)){
1011 int mb_index= mb_x*2 + mb_y*2*s->b8_stride;
1013 s->mv_type = MV_TYPE_8X8;
1015 s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
1016 s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
1019 s->mv_type = MV_TYPE_16X16;
1020 s->mv[0][0][0] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
1021 s->mv[0][0][1] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
1024 s->dsp.clear_blocks(s->block[0]);
1028 decode_mb(s, 0/*FIXME h264 partitioned slices need this set*/);
1033 if(s->pict_type==AV_PICTURE_TYPE_B){
1034 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1035 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1036 int xy= mb_x*2 + mb_y*2*s->b8_stride;
1037 const int mb_xy= mb_x + mb_y * s->mb_stride;
1038 const int mb_type= s->current_picture.f.mb_type[mb_xy];
1039 error= s->error_status_table[mb_xy];
1041 if(IS_INTRA(mb_type)) continue;
1042 if(!(error&ER_MV_ERROR)) continue; //inter with undamaged MV
1043 if(!(error&ER_AC_ERROR)) continue; //undamaged inter
1045 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD;
1046 if(!s->last_picture.f.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
1047 if(!s->next_picture.f.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
1049 s->mv_type = MV_TYPE_16X16;
1053 int time_pp= s->pp_time;
1054 int time_pb= s->pb_time;
1056 if (s->avctx->codec_id == CODEC_ID_H264) {
1059 ff_thread_await_progress((AVFrame *) s->next_picture_ptr,
1062 s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
1063 s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
1064 s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
1065 s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
1073 s->dsp.clear_blocks(s->block[0]);
1082 /* the filters below are not XvMC compatible, skip them */
1083 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1085 /* fill DC for inter blocks */
1086 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1087 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1088 int dc, dcu, dcv, y, n;
1090 uint8_t *dest_y, *dest_cb, *dest_cr;
1091 const int mb_xy= mb_x + mb_y * s->mb_stride;
1092 const int mb_type = s->current_picture.f.mb_type[mb_xy];
1094 error= s->error_status_table[mb_xy];
1096 if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
1097 // if(error&ER_MV_ERROR) continue; //inter data damaged FIXME is this good?
1099 dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
1100 dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1101 dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1103 dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride];
1109 dc+= dest_y[x + (n&1)*8 + (y + (n>>1)*8)*s->linesize];
1112 dc_ptr[(n&1) + (n>>1)*s->b8_stride]= (dc+4)>>3;
1119 dcu+=dest_cb[x + y*(s->uvlinesize)];
1120 dcv+=dest_cr[x + y*(s->uvlinesize)];
1123 s->dc_val[1][mb_x + mb_y*s->mb_stride]= (dcu+4)>>3;
1124 s->dc_val[2][mb_x + mb_y*s->mb_stride]= (dcv+4)>>3;
1128 /* guess DC for damaged blocks */
1129 guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
1130 guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
1131 guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
1133 /* filter luma DC */
1134 filter181(s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride);
1137 /* render DC only intra */
1138 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1139 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1140 uint8_t *dest_y, *dest_cb, *dest_cr;
1141 const int mb_xy= mb_x + mb_y * s->mb_stride;
1142 const int mb_type = s->current_picture.f.mb_type[mb_xy];
1144 error= s->error_status_table[mb_xy];
1146 if(IS_INTER(mb_type)) continue;
1147 if(!(error&ER_AC_ERROR)) continue; //undamaged
1149 dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
1150 dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1151 dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1153 put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
1158 if(s->avctx->error_concealment&FF_EC_DEBLOCK){
1159 /* filter horizontal block boundaries */
1160 h_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
1161 h_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
1162 h_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
1164 /* filter vertical block boundaries */
1165 v_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
1166 v_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
1167 v_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
1171 /* clean a few tables */
1172 for(i=0; i<s->mb_num; i++){
1173 const int mb_xy= s->mb_index2xy[i];
1174 int error= s->error_status_table[mb_xy];
1176 if(s->pict_type!=AV_PICTURE_TYPE_B && (error&(ER_DC_ERROR|ER_MV_ERROR|ER_AC_ERROR))){
1177 s->mbskip_table[mb_xy]=0;
1179 s->mbintra_table[mb_xy]=1;