2 * Error resilience / concealment
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * Error resilience / concealment.
32 #include "mpegvideo.h"
34 #include "rectangle.h"
38 * H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264)
39 * but error concealment must support both h264 and h263 thus we must undo this
43 static void decode_mb(MpegEncContext *s, int ref){
44 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
45 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
46 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
48 if(CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264){
49 H264Context *h= (void*)s;
50 h->mb_xy= s->mb_x + s->mb_y*s->mb_stride;
51 memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
53 if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added
55 fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
56 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
57 fill_rectangle(h->mv_cache[0][ scan8[0] ], 4, 4, 8, pack16to32(s->mv[0][0][0],s->mv[0][0][1]), 4);
59 ff_h264_hl_decode_mb(h);
62 MPV_decode_mb(s, s->block);
67 * @param stride the number of MVs to get to the next row
68 * @param mv_step the number of MVs per row or column in a macroblock
70 static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride){
71 if(s->codec_id == CODEC_ID_H264){
72 H264Context *h= (void*)s;
73 assert(s->quarter_sample);
78 *stride= s->b8_stride;
83 * replaces the current MB with a flat dc only version.
85 static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
87 int dc, dcu, dcv, y, i;
89 dc= s->dc_val[0][mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*s->b8_stride];
91 else if(dc>2040) dc=2040;
95 dest_y[x + (i&1)*8 + (y + (i>>1)*8)*s->linesize]= dc/8;
99 dcu = s->dc_val[1][mb_x + mb_y*s->mb_stride];
100 dcv = s->dc_val[2][mb_x + mb_y*s->mb_stride];
102 else if(dcu>2040) dcu=2040;
104 else if(dcv>2040) dcv=2040;
108 dest_cb[x + y*(s->uvlinesize)]= dcu/8;
109 dest_cr[x + y*(s->uvlinesize)]= dcv/8;
114 static void filter181(int16_t *data, int width, int height, int stride){
117 /* horizontal filter */
118 for(y=1; y<height-1; y++){
119 int prev_dc= data[0 + y*stride];
121 for(x=1; x<width-1; x++){
125 + data[x + y*stride]*8
126 - data[x + 1 + y*stride];
127 dc= (dc*10923 + 32768)>>16;
128 prev_dc= data[x + y*stride];
129 data[x + y*stride]= dc;
133 /* vertical filter */
134 for(x=1; x<width-1; x++){
135 int prev_dc= data[x];
137 for(y=1; y<height-1; y++){
141 + data[x + y *stride]*8
142 - data[x + (y+1)*stride];
143 dc= (dc*10923 + 32768)>>16;
144 prev_dc= data[x + y*stride];
145 data[x + y*stride]= dc;
151 * guess the dc of blocks which do not have an undamaged dc
152 * @param w width in 8 pixel blocks
153 * @param h height in 8 pixel blocks
155 static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, int is_luma){
158 for(b_y=0; b_y<h; b_y++){
159 for(b_x=0; b_x<w; b_x++){
160 int color[4]={1024,1024,1024,1024};
161 int distance[4]={9999,9999,9999,9999};
162 int mb_index, error, j;
163 int64_t guess, weight_sum;
165 mb_index= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
167 error= s->error_status_table[mb_index];
169 if(IS_INTER(s->current_picture.f.mb_type[mb_index])) continue; //inter
170 if(!(error&DC_ERROR)) continue; //dc-ok
173 for(j=b_x+1; j<w; j++){
174 int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
175 int error_j= s->error_status_table[mb_index_j];
176 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
177 if(intra_j==0 || !(error_j&DC_ERROR)){
178 color[0]= dc[j + b_y*stride];
185 for(j=b_x-1; j>=0; j--){
186 int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
187 int error_j= s->error_status_table[mb_index_j];
188 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
189 if(intra_j==0 || !(error_j&DC_ERROR)){
190 color[1]= dc[j + b_y*stride];
197 for(j=b_y+1; j<h; j++){
198 int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
199 int error_j= s->error_status_table[mb_index_j];
200 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
201 if(intra_j==0 || !(error_j&DC_ERROR)){
202 color[2]= dc[b_x + j*stride];
209 for(j=b_y-1; j>=0; j--){
210 int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
211 int error_j= s->error_status_table[mb_index_j];
212 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
213 if(intra_j==0 || !(error_j&DC_ERROR)){
214 color[3]= dc[b_x + j*stride];
223 int64_t weight= 256*256*256*16/distance[j];
224 guess+= weight*(int64_t)color[j];
227 guess= (guess + weight_sum/2) / weight_sum;
229 dc[b_x + b_y*stride]= guess;
235 * simple horizontal deblocking filter used for error resilience
236 * @param w width in 8 pixel blocks
237 * @param h height in 8 pixel blocks
239 static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
240 int b_x, b_y, mvx_stride, mvy_stride;
241 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
242 set_mv_strides(s, &mvx_stride, &mvy_stride);
243 mvx_stride >>= is_luma;
244 mvy_stride *= mvx_stride;
246 for(b_y=0; b_y<h; b_y++){
247 for(b_x=0; b_x<w-1; b_x++){
249 int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride];
250 int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
251 int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
252 int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
253 int left_damage = left_status&(DC_ERROR|AC_ERROR|MV_ERROR);
254 int right_damage= right_status&(DC_ERROR|AC_ERROR|MV_ERROR);
255 int offset= b_x*8 + b_y*stride*8;
256 int16_t *left_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ];
257 int16_t *right_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
259 if(!(left_damage||right_damage)) continue; // both undamaged
261 if( (!left_intra) && (!right_intra)
262 && FFABS(left_mv[0]-right_mv[0]) + FFABS(left_mv[1]+right_mv[1]) < 2) continue;
267 a= dst[offset + 7 + y*stride] - dst[offset + 6 + y*stride];
268 b= dst[offset + 8 + y*stride] - dst[offset + 7 + y*stride];
269 c= dst[offset + 9 + y*stride] - dst[offset + 8 + y*stride];
271 d= FFABS(b) - ((FFABS(a) + FFABS(c) + 1)>>1);
277 if(!(left_damage && right_damage))
281 dst[offset + 7 + y*stride] = cm[dst[offset + 7 + y*stride] + ((d*7)>>4)];
282 dst[offset + 6 + y*stride] = cm[dst[offset + 6 + y*stride] + ((d*5)>>4)];
283 dst[offset + 5 + y*stride] = cm[dst[offset + 5 + y*stride] + ((d*3)>>4)];
284 dst[offset + 4 + y*stride] = cm[dst[offset + 4 + y*stride] + ((d*1)>>4)];
287 dst[offset + 8 + y*stride] = cm[dst[offset + 8 + y*stride] - ((d*7)>>4)];
288 dst[offset + 9 + y*stride] = cm[dst[offset + 9 + y*stride] - ((d*5)>>4)];
289 dst[offset + 10+ y*stride] = cm[dst[offset +10 + y*stride] - ((d*3)>>4)];
290 dst[offset + 11+ y*stride] = cm[dst[offset +11 + y*stride] - ((d*1)>>4)];
298 * simple vertical deblocking filter used for error resilience
299 * @param w width in 8 pixel blocks
300 * @param h height in 8 pixel blocks
302 static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
303 int b_x, b_y, mvx_stride, mvy_stride;
304 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
305 set_mv_strides(s, &mvx_stride, &mvy_stride);
306 mvx_stride >>= is_luma;
307 mvy_stride *= mvx_stride;
309 for(b_y=0; b_y<h-1; b_y++){
310 for(b_x=0; b_x<w; b_x++){
312 int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride];
313 int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
314 int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
315 int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
316 int top_damage = top_status&(DC_ERROR|AC_ERROR|MV_ERROR);
317 int bottom_damage= bottom_status&(DC_ERROR|AC_ERROR|MV_ERROR);
318 int offset= b_x*8 + b_y*stride*8;
319 int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
320 int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
322 if(!(top_damage||bottom_damage)) continue; // both undamaged
324 if( (!top_intra) && (!bottom_intra)
325 && FFABS(top_mv[0]-bottom_mv[0]) + FFABS(top_mv[1]+bottom_mv[1]) < 2) continue;
330 a= dst[offset + x + 7*stride] - dst[offset + x + 6*stride];
331 b= dst[offset + x + 8*stride] - dst[offset + x + 7*stride];
332 c= dst[offset + x + 9*stride] - dst[offset + x + 8*stride];
334 d= FFABS(b) - ((FFABS(a) + FFABS(c)+1)>>1);
340 if(!(top_damage && bottom_damage))
344 dst[offset + x + 7*stride] = cm[dst[offset + x + 7*stride] + ((d*7)>>4)];
345 dst[offset + x + 6*stride] = cm[dst[offset + x + 6*stride] + ((d*5)>>4)];
346 dst[offset + x + 5*stride] = cm[dst[offset + x + 5*stride] + ((d*3)>>4)];
347 dst[offset + x + 4*stride] = cm[dst[offset + x + 4*stride] + ((d*1)>>4)];
350 dst[offset + x + 8*stride] = cm[dst[offset + x + 8*stride] - ((d*7)>>4)];
351 dst[offset + x + 9*stride] = cm[dst[offset + x + 9*stride] - ((d*5)>>4)];
352 dst[offset + x + 10*stride] = cm[dst[offset + x + 10*stride] - ((d*3)>>4)];
353 dst[offset + x + 11*stride] = cm[dst[offset + x + 11*stride] - ((d*1)>>4)];
360 static void guess_mv(MpegEncContext *s){
361 uint8_t fixed[s->mb_stride * s->mb_height];
364 #define MV_UNCHANGED 1
365 const int mb_stride = s->mb_stride;
366 const int mb_width = s->mb_width;
367 const int mb_height= s->mb_height;
368 int i, depth, num_avail;
369 int mb_x, mb_y, mot_step, mot_stride;
371 set_mv_strides(s, &mot_step, &mot_stride);
374 for(i=0; i<s->mb_num; i++){
375 const int mb_xy= s->mb_index2xy[ i ];
377 int error= s->error_status_table[mb_xy];
379 if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
380 if(!(error&MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV
385 else if(s->last_picture.f.data[0] && s->last_picture.f.motion_val[0]){
386 const int mb_y= mb_xy / s->mb_stride;
387 const int mb_x= mb_xy % s->mb_stride;
388 const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
389 s->current_picture.f.motion_val[0][mot_index][0]= s->last_picture.f.motion_val[0][mot_index][0];
390 s->current_picture.f.motion_val[0][mot_index][1]= s->last_picture.f.motion_val[0][mot_index][1];
391 s->current_picture.f.ref_index[0][4*mb_xy] = s->last_picture.f.ref_index[0][4*mb_xy];
395 if((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width/2){
396 for(mb_y=0; mb_y<s->mb_height; mb_y++){
397 for(mb_x=0; mb_x<s->mb_width; mb_x++){
398 const int mb_xy= mb_x + mb_y*s->mb_stride;
400 if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) continue;
401 if(!(s->error_status_table[mb_xy]&MV_ERROR)) continue;
403 s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
405 s->mv_type = MV_TYPE_16X16;
408 s->dsp.clear_blocks(s->block[0]);
420 for(depth=0;; depth++){
421 int changed, pass, none_left;
425 for(pass=0; (changed || pass<2) && pass<10; pass++){
430 for(mb_y=0; mb_y<s->mb_height; mb_y++){
431 for(mb_x=0; mb_x<s->mb_width; mb_x++){
432 const int mb_xy= mb_x + mb_y*s->mb_stride;
433 int mv_predictor[8][2]={{0}};
437 int best_score=256*256*256*64;
439 const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
440 int prev_x, prev_y, prev_ref;
442 if((mb_x^mb_y^pass)&1) continue;
444 if(fixed[mb_xy]==MV_FROZEN) continue;
445 assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
446 assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
449 if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1;
450 if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_FROZEN) j=1;
451 if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_FROZEN) j=1;
452 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_FROZEN) j=1;
456 if(mb_x>0 && fixed[mb_xy-1 ]==MV_CHANGED) j=1;
457 if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_CHANGED) j=1;
458 if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_CHANGED) j=1;
459 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_CHANGED) j=1;
460 if(j==0 && pass>1) continue;
464 if(mb_x>0 && fixed[mb_xy-1]){
465 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_step][0];
466 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_step][1];
467 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-1)];
470 if(mb_x+1<mb_width && fixed[mb_xy+1]){
471 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_step][0];
472 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_step][1];
473 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+1)];
476 if(mb_y>0 && fixed[mb_xy-mb_stride]){
477 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][0];
478 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][1];
479 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-s->mb_stride)];
482 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
483 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][0];
484 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][1];
485 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+s->mb_stride)];
488 if(pred_count==0) continue;
491 int sum_x=0, sum_y=0, sum_r=0;
492 int max_x, max_y, min_x, min_y, max_r, min_r;
494 for(j=0; j<pred_count; j++){
495 sum_x+= mv_predictor[j][0];
496 sum_y+= mv_predictor[j][1];
498 if(j && ref[j] != ref[j-1])
499 goto skip_mean_and_median;
503 mv_predictor[pred_count][0] = sum_x/j;
504 mv_predictor[pred_count][1] = sum_y/j;
505 ref [pred_count] = sum_r/j;
509 min_y= min_x= min_r= 99999;
510 max_y= max_x= max_r=-99999;
512 min_x=min_y=max_x=max_y=min_r=max_r=0;
514 for(j=0; j<pred_count; j++){
515 max_x= FFMAX(max_x, mv_predictor[j][0]);
516 max_y= FFMAX(max_y, mv_predictor[j][1]);
517 max_r= FFMAX(max_r, ref[j]);
518 min_x= FFMIN(min_x, mv_predictor[j][0]);
519 min_y= FFMIN(min_y, mv_predictor[j][1]);
520 min_r= FFMIN(min_r, ref[j]);
522 mv_predictor[pred_count+1][0] = sum_x - max_x - min_x;
523 mv_predictor[pred_count+1][1] = sum_y - max_y - min_y;
524 ref [pred_count+1] = sum_r - max_r - min_r;
527 mv_predictor[pred_count+1][0] /= 2;
528 mv_predictor[pred_count+1][1] /= 2;
529 ref [pred_count+1] /= 2;
533 skip_mean_and_median:
538 if (!fixed[mb_xy] && 0) {
539 if (s->avctx->codec_id == CODEC_ID_H264) {
542 ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
545 if (!s->last_picture.f.motion_val[0] ||
546 !s->last_picture.f.ref_index[0])
548 prev_x = s->last_picture.f.motion_val[0][mot_index][0];
549 prev_y = s->last_picture.f.motion_val[0][mot_index][1];
550 prev_ref = s->last_picture.f.ref_index[0][4*mb_xy];
552 prev_x = s->current_picture.f.motion_val[0][mot_index][0];
553 prev_y = s->current_picture.f.motion_val[0][mot_index][1];
554 prev_ref = s->current_picture.f.ref_index[0][4*mb_xy];
558 mv_predictor[pred_count][0]= prev_x;
559 mv_predictor[pred_count][1]= prev_y;
560 ref [pred_count] = prev_ref;
564 s->mv_dir = MV_DIR_FORWARD;
566 s->mv_type = MV_TYPE_16X16;
569 s->dsp.clear_blocks(s->block[0]);
574 for(j=0; j<pred_count; j++){
576 uint8_t *src = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
578 s->current_picture.f.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0];
579 s->current_picture.f.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1];
581 if(ref[j]<0) //predictor intra or otherwise not available
584 decode_mb(s, ref[j]);
586 if(mb_x>0 && fixed[mb_xy-1]){
589 score += FFABS(src[k*s->linesize-1 ]-src[k*s->linesize ]);
591 if(mb_x+1<mb_width && fixed[mb_xy+1]){
594 score += FFABS(src[k*s->linesize+15]-src[k*s->linesize+16]);
596 if(mb_y>0 && fixed[mb_xy-mb_stride]){
599 score += FFABS(src[k-s->linesize ]-src[k ]);
601 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
604 score += FFABS(src[k+s->linesize*15]-src[k+s->linesize*16]);
607 if(score <= best_score){ // <= will favor the last MV
612 score_sum+= best_score;
613 s->mv[0][0][0]= mv_predictor[best_pred][0];
614 s->mv[0][0][1]= mv_predictor[best_pred][1];
616 for(i=0; i<mot_step; i++)
617 for(j=0; j<mot_step; j++){
618 s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
619 s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
622 decode_mb(s, ref[best_pred]);
625 if(s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y){
626 fixed[mb_xy]=MV_CHANGED;
629 fixed[mb_xy]=MV_UNCHANGED;
633 // printf(".%d/%d", changed, score_sum); fflush(stdout);
639 for(i=0; i<s->mb_num; i++){
640 int mb_xy= s->mb_index2xy[i];
642 fixed[mb_xy]=MV_FROZEN;
644 // printf(":"); fflush(stdout);
648 static int is_intra_more_likely(MpegEncContext *s){
649 int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
651 if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) return 1; //no previous frame available -> use spatial prediction
654 for(i=0; i<s->mb_num; i++){
655 const int mb_xy= s->mb_index2xy[i];
656 const int error= s->error_status_table[mb_xy];
657 if(!((error&DC_ERROR) && (error&MV_ERROR)))
661 if(s->codec_id == CODEC_ID_H264){
662 H264Context *h= (void*)s;
663 if (h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0])
667 if(undamaged_count < 5) return 0; //almost all MBs damaged -> use temporal prediction
669 //prevent dsp.sad() check, that requires access to the image
670 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == AV_PICTURE_TYPE_I)
673 skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs
677 for(mb_y= 0; mb_y<s->mb_height-1; mb_y++){
678 for(mb_x= 0; mb_x<s->mb_width; mb_x++){
680 const int mb_xy= mb_x + mb_y*s->mb_stride;
682 error= s->error_status_table[mb_xy];
683 if((error&DC_ERROR) && (error&MV_ERROR))
684 continue; //skip damaged
687 if((j%skip_amount) != 0) continue; //skip a few to speed things up
689 if(s->pict_type==AV_PICTURE_TYPE_I){
690 uint8_t *mb_ptr = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
691 uint8_t *last_mb_ptr= s->last_picture.f.data [0] + mb_x*16 + mb_y*16*s->linesize;
693 if (s->avctx->codec_id == CODEC_ID_H264) {
696 ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
699 is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
700 // FIXME need await_progress() here
701 is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
703 if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
710 //printf("is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
711 return is_intra_likely > 0;
714 void ff_er_frame_start(MpegEncContext *s){
715 if(!s->error_recognition) return;
717 memset(s->error_status_table, MV_ERROR|AC_ERROR|DC_ERROR|VP_START|AC_END|DC_END|MV_END, s->mb_stride*s->mb_height*sizeof(uint8_t));
718 s->error_count= 3*s->mb_num;
719 s->error_occurred = 0;
724 * @param endx x component of the last macroblock, can be -1 for the last of the previous line
725 * @param status the status at the end (MV_END, AC_ERROR, ...), it is assumed that no earlier end or
726 * error of the same type occurred
728 void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status){
729 const int start_i= av_clip(startx + starty * s->mb_width , 0, s->mb_num-1);
730 const int end_i = av_clip(endx + endy * s->mb_width , 0, s->mb_num);
731 const int start_xy= s->mb_index2xy[start_i];
732 const int end_xy = s->mb_index2xy[end_i];
735 if(s->avctx->hwaccel)
738 if(start_i > end_i || start_xy > end_xy){
739 av_log(s->avctx, AV_LOG_ERROR, "internal error, slice end before start\n");
743 if(!s->error_recognition) return;
746 if(status & (AC_ERROR|AC_END)){
747 mask &= ~(AC_ERROR|AC_END);
748 s->error_count -= end_i - start_i + 1;
750 if(status & (DC_ERROR|DC_END)){
751 mask &= ~(DC_ERROR|DC_END);
752 s->error_count -= end_i - start_i + 1;
754 if(status & (MV_ERROR|MV_END)){
755 mask &= ~(MV_ERROR|MV_END);
756 s->error_count -= end_i - start_i + 1;
759 if(status & (AC_ERROR|DC_ERROR|MV_ERROR)) {
760 s->error_occurred = 1;
761 s->error_count= INT_MAX;
765 memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t));
768 for(i=start_xy; i<end_xy; i++){
769 s->error_status_table[ i ] &= mask;
773 if(end_i == s->mb_num)
774 s->error_count= INT_MAX;
776 s->error_status_table[end_xy] &= mask;
777 s->error_status_table[end_xy] |= status;
780 s->error_status_table[start_xy] |= VP_START;
782 if(start_xy > 0 && s->avctx->thread_count <= 1 && s->avctx->skip_top*s->mb_width < start_i){
783 int prev_status= s->error_status_table[ s->mb_index2xy[start_i - 1] ];
785 prev_status &= ~ VP_START;
786 if(prev_status != (MV_END|DC_END|AC_END)) s->error_count= INT_MAX;
790 void ff_er_frame_end(MpegEncContext *s){
791 int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
793 int threshold_part[4]= {100,100,100};
796 int size = s->b8_stride * 2 * s->mb_height;
797 Picture *pic= s->current_picture_ptr;
799 if(!s->error_recognition || s->error_count==0 || s->avctx->lowres ||
801 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
802 s->picture_structure != PICT_FRAME || // we dont support ER of field pictures yet, though it should not crash if enabled
803 s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
805 if (s->current_picture.f.motion_val[0] == NULL) {
806 av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
809 pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
810 pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t));
811 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
813 pic->f.motion_subsample_log2 = 3;
814 s->current_picture= *s->current_picture_ptr;
817 if(s->avctx->debug&FF_DEBUG_ER){
818 for(mb_y=0; mb_y<s->mb_height; mb_y++){
819 for(mb_x=0; mb_x<s->mb_width; mb_x++){
820 int status= s->error_status_table[mb_x + mb_y*s->mb_stride];
822 av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
824 av_log(s->avctx, AV_LOG_DEBUG, "\n");
829 /* handle overlapping slices */
830 for(error_type=1; error_type<=3; error_type++){
833 for(i=s->mb_num-1; i>=0; i--){
834 const int mb_xy= s->mb_index2xy[i];
835 int error= s->error_status_table[mb_xy];
837 if(error&(1<<error_type))
839 if(error&(8<<error_type))
843 s->error_status_table[mb_xy]|= 1<<error_type;
851 /* handle slices with partitions of different length */
852 if(s->partitioned_frame){
855 for(i=s->mb_num-1; i>=0; i--){
856 const int mb_xy= s->mb_index2xy[i];
857 int error= s->error_status_table[mb_xy];
861 if((error&MV_END) || (error&DC_END) || (error&AC_ERROR))
865 s->error_status_table[mb_xy]|= AC_ERROR;
872 /* handle missing slices */
873 if(s->error_recognition>=4){
876 for(i=s->mb_num-2; i>=s->mb_width+100; i--){ //FIXME +100 hack
877 const int mb_xy= s->mb_index2xy[i];
878 int error1= s->error_status_table[mb_xy ];
879 int error2= s->error_status_table[s->mb_index2xy[i+1]];
884 if( error2==(VP_START|DC_ERROR|AC_ERROR|MV_ERROR|AC_END|DC_END|MV_END)
885 && error1!=(VP_START|DC_ERROR|AC_ERROR|MV_ERROR|AC_END|DC_END|MV_END)
886 && ((error1&AC_END) || (error1&DC_END) || (error1&MV_END))){ //end & uninit
891 s->error_status_table[mb_xy]|= DC_ERROR|AC_ERROR|MV_ERROR;
896 /* backward mark errors */
898 for(error_type=1; error_type<=3; error_type++){
899 for(i=s->mb_num-1; i>=0; i--){
900 const int mb_xy= s->mb_index2xy[i];
901 int error= s->error_status_table[mb_xy];
903 if(!s->mbskip_table[mb_xy]) //FIXME partition specific
905 if(error&(1<<error_type))
908 if(s->partitioned_frame){
909 if(distance < threshold_part[error_type-1])
910 s->error_status_table[mb_xy]|= 1<<error_type;
912 if(distance < threshold)
913 s->error_status_table[mb_xy]|= 1<<error_type;
922 /* forward mark errors */
924 for(i=0; i<s->mb_num; i++){
925 const int mb_xy= s->mb_index2xy[i];
926 int old_error= s->error_status_table[mb_xy];
928 if(old_error&VP_START)
929 error= old_error& (DC_ERROR|AC_ERROR|MV_ERROR);
931 error|= old_error& (DC_ERROR|AC_ERROR|MV_ERROR);
932 s->error_status_table[mb_xy]|= error;
936 /* handle not partitioned case */
937 if(!s->partitioned_frame){
938 for(i=0; i<s->mb_num; i++){
939 const int mb_xy= s->mb_index2xy[i];
940 error= s->error_status_table[mb_xy];
941 if(error&(AC_ERROR|DC_ERROR|MV_ERROR))
942 error|= AC_ERROR|DC_ERROR|MV_ERROR;
943 s->error_status_table[mb_xy]= error;
948 dc_error= ac_error= mv_error=0;
949 for(i=0; i<s->mb_num; i++){
950 const int mb_xy= s->mb_index2xy[i];
951 error= s->error_status_table[mb_xy];
952 if(error&DC_ERROR) dc_error ++;
953 if(error&AC_ERROR) ac_error ++;
954 if(error&MV_ERROR) mv_error ++;
956 av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors\n", dc_error, ac_error, mv_error);
958 is_intra_likely= is_intra_more_likely(s);
960 /* set unknown mb-type to most likely */
961 for(i=0; i<s->mb_num; i++){
962 const int mb_xy= s->mb_index2xy[i];
963 error= s->error_status_table[mb_xy];
964 if(!((error&DC_ERROR) && (error&MV_ERROR)))
968 s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
970 s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
973 // change inter to intra blocks if no reference frames are available
974 if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
975 for(i=0; i<s->mb_num; i++){
976 const int mb_xy= s->mb_index2xy[i];
977 if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
978 s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
981 /* handle inter blocks with damaged AC */
982 for(mb_y=0; mb_y<s->mb_height; mb_y++){
983 for(mb_x=0; mb_x<s->mb_width; mb_x++){
984 const int mb_xy= mb_x + mb_y * s->mb_stride;
985 const int mb_type= s->current_picture.f.mb_type[mb_xy];
986 int dir = !s->last_picture.f.data[0];
987 error= s->error_status_table[mb_xy];
989 if(IS_INTRA(mb_type)) continue; //intra
990 if(error&MV_ERROR) continue; //inter with damaged MV
991 if(!(error&AC_ERROR)) continue; //undamaged inter
993 s->mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
997 int mb_index= mb_x*2 + mb_y*2*s->b8_stride;
999 s->mv_type = MV_TYPE_8X8;
1001 s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
1002 s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
1005 s->mv_type = MV_TYPE_16X16;
1006 s->mv[0][0][0] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
1007 s->mv[0][0][1] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
1010 s->dsp.clear_blocks(s->block[0]);
1014 decode_mb(s, 0/*FIXME h264 partitioned slices need this set*/);
1019 if(s->pict_type==AV_PICTURE_TYPE_B){
1020 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1021 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1022 int xy= mb_x*2 + mb_y*2*s->b8_stride;
1023 const int mb_xy= mb_x + mb_y * s->mb_stride;
1024 const int mb_type= s->current_picture.f.mb_type[mb_xy];
1025 error= s->error_status_table[mb_xy];
1027 if(IS_INTRA(mb_type)) continue;
1028 if(!(error&MV_ERROR)) continue; //inter with undamaged MV
1029 if(!(error&AC_ERROR)) continue; //undamaged inter
1031 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD;
1032 if(!s->last_picture.f.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
1033 if(!s->next_picture.f.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
1035 s->mv_type = MV_TYPE_16X16;
1039 int time_pp= s->pp_time;
1040 int time_pb= s->pb_time;
1042 if (s->avctx->codec_id == CODEC_ID_H264) {
1045 ff_thread_await_progress((AVFrame *) s->next_picture_ptr,
1048 s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
1049 s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
1050 s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
1051 s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
1059 s->dsp.clear_blocks(s->block[0]);
1068 /* the filters below are not XvMC compatible, skip them */
1069 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1071 /* fill DC for inter blocks */
1072 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1073 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1074 int dc, dcu, dcv, y, n;
1076 uint8_t *dest_y, *dest_cb, *dest_cr;
1077 const int mb_xy= mb_x + mb_y * s->mb_stride;
1078 const int mb_type = s->current_picture.f.mb_type[mb_xy];
1080 error= s->error_status_table[mb_xy];
1082 if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
1083 // if(error&MV_ERROR) continue; //inter data damaged FIXME is this good?
1085 dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
1086 dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1087 dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1089 dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride];
1095 dc+= dest_y[x + (n&1)*8 + (y + (n>>1)*8)*s->linesize];
1098 dc_ptr[(n&1) + (n>>1)*s->b8_stride]= (dc+4)>>3;
1105 dcu+=dest_cb[x + y*(s->uvlinesize)];
1106 dcv+=dest_cr[x + y*(s->uvlinesize)];
1109 s->dc_val[1][mb_x + mb_y*s->mb_stride]= (dcu+4)>>3;
1110 s->dc_val[2][mb_x + mb_y*s->mb_stride]= (dcv+4)>>3;
1114 /* guess DC for damaged blocks */
1115 guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
1116 guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
1117 guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
1119 /* filter luma DC */
1120 filter181(s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride);
1123 /* render DC only intra */
1124 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1125 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1126 uint8_t *dest_y, *dest_cb, *dest_cr;
1127 const int mb_xy= mb_x + mb_y * s->mb_stride;
1128 const int mb_type = s->current_picture.f.mb_type[mb_xy];
1130 error= s->error_status_table[mb_xy];
1132 if(IS_INTER(mb_type)) continue;
1133 if(!(error&AC_ERROR)) continue; //undamaged
1135 dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
1136 dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1137 dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1139 put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
1144 if(s->avctx->error_concealment&FF_EC_DEBLOCK){
1145 /* filter horizontal block boundaries */
1146 h_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
1147 h_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
1148 h_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
1150 /* filter vertical block boundaries */
1151 v_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
1152 v_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
1153 v_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
1157 /* clean a few tables */
1158 for(i=0; i<s->mb_num; i++){
1159 const int mb_xy= s->mb_index2xy[i];
1160 int error= s->error_status_table[mb_xy];
1162 if(s->pict_type!=AV_PICTURE_TYPE_B && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){
1163 s->mbskip_table[mb_xy]=0;
1165 s->mbintra_table[mb_xy]=1;