2 * Error resilience / concealment
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * Error resilience / concealment.
32 #include "mpegvideo.h"
34 #include "rectangle.h"
38 * H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264)
39 * but error concealment must support both h264 and h263 thus we must undo this
43 static void decode_mb(MpegEncContext *s, int ref){
44 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
45 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
46 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
48 if(CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264){
49 H264Context *h= (void*)s;
50 h->mb_xy= s->mb_x + s->mb_y*s->mb_stride;
51 memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
53 if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added
55 fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
56 fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
57 fill_rectangle(h->mv_cache[0][ scan8[0] ], 4, 4, 8, pack16to32(s->mv[0][0][0],s->mv[0][0][1]), 4);
59 ff_h264_hl_decode_mb(h);
62 MPV_decode_mb(s, s->block);
67 * @param stride the number of MVs to get to the next row
68 * @param mv_step the number of MVs per row or column in a macroblock
70 static void set_mv_strides(MpegEncContext *s, int *mv_step, int *stride){
71 if(s->codec_id == CODEC_ID_H264){
72 H264Context *h= (void*)s;
73 assert(s->quarter_sample);
78 *stride= s->b8_stride;
83 * Replace the current MB with a flat dc-only version.
85 static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int mb_x, int mb_y)
87 int dc, dcu, dcv, y, i;
89 dc= s->dc_val[0][mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*s->b8_stride];
91 else if(dc>2040) dc=2040;
95 dest_y[x + (i&1)*8 + (y + (i>>1)*8)*s->linesize]= dc/8;
99 dcu = s->dc_val[1][mb_x + mb_y*s->mb_stride];
100 dcv = s->dc_val[2][mb_x + mb_y*s->mb_stride];
102 else if(dcu>2040) dcu=2040;
104 else if(dcv>2040) dcv=2040;
108 dest_cb[x + y*(s->uvlinesize)]= dcu/8;
109 dest_cr[x + y*(s->uvlinesize)]= dcv/8;
114 static void filter181(int16_t *data, int width, int height, int stride){
117 /* horizontal filter */
118 for(y=1; y<height-1; y++){
119 int prev_dc= data[0 + y*stride];
121 for(x=1; x<width-1; x++){
125 + data[x + y*stride]*8
126 - data[x + 1 + y*stride];
127 dc= (dc*10923 + 32768)>>16;
128 prev_dc= data[x + y*stride];
129 data[x + y*stride]= dc;
133 /* vertical filter */
134 for(x=1; x<width-1; x++){
135 int prev_dc= data[x];
137 for(y=1; y<height-1; y++){
141 + data[x + y *stride]*8
142 - data[x + (y+1)*stride];
143 dc= (dc*10923 + 32768)>>16;
144 prev_dc= data[x + y*stride];
145 data[x + y*stride]= dc;
151 * guess the dc of blocks which do not have an undamaged dc
152 * @param w width in 8 pixel blocks
153 * @param h height in 8 pixel blocks
155 static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, int is_luma){
158 for(b_y=0; b_y<h; b_y++){
159 for(b_x=0; b_x<w; b_x++){
160 int color[4]={1024,1024,1024,1024};
161 int distance[4]={9999,9999,9999,9999};
162 int mb_index, error, j;
163 int64_t guess, weight_sum;
165 mb_index= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
167 error= s->error_status_table[mb_index];
169 if(IS_INTER(s->current_picture.f.mb_type[mb_index])) continue; //inter
170 if(!(error&ER_DC_ERROR)) continue; //dc-ok
173 for(j=b_x+1; j<w; j++){
174 int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
175 int error_j= s->error_status_table[mb_index_j];
176 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
177 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
178 color[0]= dc[j + b_y*stride];
185 for(j=b_x-1; j>=0; j--){
186 int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
187 int error_j= s->error_status_table[mb_index_j];
188 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
189 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
190 color[1]= dc[j + b_y*stride];
197 for(j=b_y+1; j<h; j++){
198 int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
199 int error_j= s->error_status_table[mb_index_j];
200 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
201 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
202 color[2]= dc[b_x + j*stride];
209 for(j=b_y-1; j>=0; j--){
210 int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
211 int error_j= s->error_status_table[mb_index_j];
212 int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
213 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
214 color[3]= dc[b_x + j*stride];
223 int64_t weight= 256*256*256*16/distance[j];
224 guess+= weight*(int64_t)color[j];
227 guess= (guess + weight_sum/2) / weight_sum;
229 dc[b_x + b_y*stride]= guess;
235 * simple horizontal deblocking filter used for error resilience
236 * @param w width in 8 pixel blocks
237 * @param h height in 8 pixel blocks
239 static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
240 int b_x, b_y, mvx_stride, mvy_stride;
241 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
242 set_mv_strides(s, &mvx_stride, &mvy_stride);
243 mvx_stride >>= is_luma;
244 mvy_stride *= mvx_stride;
246 for(b_y=0; b_y<h; b_y++){
247 for(b_x=0; b_x<w-1; b_x++){
249 int left_status = s->error_status_table[( b_x >>is_luma) + (b_y>>is_luma)*s->mb_stride];
250 int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
251 int left_intra = IS_INTRA(s->current_picture.f.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
252 int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
253 int left_damage = left_status&ER_MB_ERROR;
254 int right_damage= right_status&ER_MB_ERROR;
255 int offset= b_x*8 + b_y*stride*8;
256 int16_t *left_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride* b_x ];
257 int16_t *right_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
259 if(!(left_damage||right_damage)) continue; // both undamaged
261 if( (!left_intra) && (!right_intra)
262 && FFABS(left_mv[0]-right_mv[0]) + FFABS(left_mv[1]+right_mv[1]) < 2) continue;
267 a= dst[offset + 7 + y*stride] - dst[offset + 6 + y*stride];
268 b= dst[offset + 8 + y*stride] - dst[offset + 7 + y*stride];
269 c= dst[offset + 9 + y*stride] - dst[offset + 8 + y*stride];
271 d= FFABS(b) - ((FFABS(a) + FFABS(c) + 1)>>1);
277 if(!(left_damage && right_damage))
281 dst[offset + 7 + y*stride] = cm[dst[offset + 7 + y*stride] + ((d*7)>>4)];
282 dst[offset + 6 + y*stride] = cm[dst[offset + 6 + y*stride] + ((d*5)>>4)];
283 dst[offset + 5 + y*stride] = cm[dst[offset + 5 + y*stride] + ((d*3)>>4)];
284 dst[offset + 4 + y*stride] = cm[dst[offset + 4 + y*stride] + ((d*1)>>4)];
287 dst[offset + 8 + y*stride] = cm[dst[offset + 8 + y*stride] - ((d*7)>>4)];
288 dst[offset + 9 + y*stride] = cm[dst[offset + 9 + y*stride] - ((d*5)>>4)];
289 dst[offset + 10+ y*stride] = cm[dst[offset +10 + y*stride] - ((d*3)>>4)];
290 dst[offset + 11+ y*stride] = cm[dst[offset +11 + y*stride] - ((d*1)>>4)];
298 * simple vertical deblocking filter used for error resilience
299 * @param w width in 8 pixel blocks
300 * @param h height in 8 pixel blocks
302 static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
303 int b_x, b_y, mvx_stride, mvy_stride;
304 uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
305 set_mv_strides(s, &mvx_stride, &mvy_stride);
306 mvx_stride >>= is_luma;
307 mvy_stride *= mvx_stride;
309 for(b_y=0; b_y<h-1; b_y++){
310 for(b_x=0; b_x<w; b_x++){
312 int top_status = s->error_status_table[(b_x>>is_luma) + ( b_y >>is_luma)*s->mb_stride];
313 int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
314 int top_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
315 int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
316 int top_damage = top_status&ER_MB_ERROR;
317 int bottom_damage= bottom_status&ER_MB_ERROR;
318 int offset= b_x*8 + b_y*stride*8;
319 int16_t *top_mv = s->current_picture.f.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
320 int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
322 if(!(top_damage||bottom_damage)) continue; // both undamaged
324 if( (!top_intra) && (!bottom_intra)
325 && FFABS(top_mv[0]-bottom_mv[0]) + FFABS(top_mv[1]+bottom_mv[1]) < 2) continue;
330 a= dst[offset + x + 7*stride] - dst[offset + x + 6*stride];
331 b= dst[offset + x + 8*stride] - dst[offset + x + 7*stride];
332 c= dst[offset + x + 9*stride] - dst[offset + x + 8*stride];
334 d= FFABS(b) - ((FFABS(a) + FFABS(c)+1)>>1);
340 if(!(top_damage && bottom_damage))
344 dst[offset + x + 7*stride] = cm[dst[offset + x + 7*stride] + ((d*7)>>4)];
345 dst[offset + x + 6*stride] = cm[dst[offset + x + 6*stride] + ((d*5)>>4)];
346 dst[offset + x + 5*stride] = cm[dst[offset + x + 5*stride] + ((d*3)>>4)];
347 dst[offset + x + 4*stride] = cm[dst[offset + x + 4*stride] + ((d*1)>>4)];
350 dst[offset + x + 8*stride] = cm[dst[offset + x + 8*stride] - ((d*7)>>4)];
351 dst[offset + x + 9*stride] = cm[dst[offset + x + 9*stride] - ((d*5)>>4)];
352 dst[offset + x + 10*stride] = cm[dst[offset + x + 10*stride] - ((d*3)>>4)];
353 dst[offset + x + 11*stride] = cm[dst[offset + x + 11*stride] - ((d*1)>>4)];
360 static void guess_mv(MpegEncContext *s){
361 uint8_t *fixed = av_malloc(s->mb_stride * s->mb_height);
364 #define MV_UNCHANGED 1
365 const int mb_stride = s->mb_stride;
366 const int mb_width = s->mb_width;
367 const int mb_height= s->mb_height;
368 int i, depth, num_avail;
369 int mb_x, mb_y, mot_step, mot_stride;
371 set_mv_strides(s, &mot_step, &mot_stride);
374 for(i=0; i<s->mb_num; i++){
375 const int mb_xy= s->mb_index2xy[ i ];
377 int error= s->error_status_table[mb_xy];
379 if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
380 if(!(error&ER_MV_ERROR)) f=MV_FROZEN; //inter with undamaged MV
385 else if(s->last_picture.f.data[0] && s->last_picture.f.motion_val[0]){
386 const int mb_y= mb_xy / s->mb_stride;
387 const int mb_x= mb_xy % s->mb_stride;
388 const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
389 s->current_picture.f.motion_val[0][mot_index][0]= s->last_picture.f.motion_val[0][mot_index][0];
390 s->current_picture.f.motion_val[0][mot_index][1]= s->last_picture.f.motion_val[0][mot_index][1];
391 s->current_picture.f.ref_index[0][4*mb_xy] = s->last_picture.f.ref_index[0][4*mb_xy];
395 if((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width/2){
396 for(mb_y=0; mb_y<s->mb_height; mb_y++){
397 for(mb_x=0; mb_x<s->mb_width; mb_x++){
398 const int mb_xy= mb_x + mb_y*s->mb_stride;
400 if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) continue;
401 if(!(s->error_status_table[mb_xy]&ER_MV_ERROR)) continue;
403 s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
405 s->mv_type = MV_TYPE_16X16;
408 s->dsp.clear_blocks(s->block[0]);
420 for(depth=0;; depth++){
421 int changed, pass, none_left;
425 for(pass=0; (changed || pass<2) && pass<10; pass++){
430 for(mb_y=0; mb_y<s->mb_height; mb_y++){
431 for(mb_x=0; mb_x<s->mb_width; mb_x++){
432 const int mb_xy= mb_x + mb_y*s->mb_stride;
433 int mv_predictor[8][2]={{0}};
437 int best_score=256*256*256*64;
439 const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
440 int prev_x, prev_y, prev_ref;
442 if((mb_x^mb_y^pass)&1) continue;
444 if(fixed[mb_xy]==MV_FROZEN) continue;
445 assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
446 assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
449 if(mb_x>0 && fixed[mb_xy-1 ]==MV_FROZEN) j=1;
450 if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_FROZEN) j=1;
451 if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_FROZEN) j=1;
452 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_FROZEN) j=1;
456 if(mb_x>0 && fixed[mb_xy-1 ]==MV_CHANGED) j=1;
457 if(mb_x+1<mb_width && fixed[mb_xy+1 ]==MV_CHANGED) j=1;
458 if(mb_y>0 && fixed[mb_xy-mb_stride]==MV_CHANGED) j=1;
459 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]==MV_CHANGED) j=1;
460 if(j==0 && pass>1) continue;
464 if(mb_x>0 && fixed[mb_xy-1]){
465 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_step][0];
466 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_step][1];
467 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-1)];
470 if(mb_x+1<mb_width && fixed[mb_xy+1]){
471 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_step][0];
472 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_step][1];
473 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+1)];
476 if(mb_y>0 && fixed[mb_xy-mb_stride]){
477 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][0];
478 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][1];
479 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy-s->mb_stride)];
482 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
483 mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][0];
484 mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][1];
485 ref [pred_count] = s->current_picture.f.ref_index[0][4*(mb_xy+s->mb_stride)];
488 if(pred_count==0) continue;
491 int sum_x=0, sum_y=0, sum_r=0;
492 int max_x, max_y, min_x, min_y, max_r, min_r;
494 for(j=0; j<pred_count; j++){
495 sum_x+= mv_predictor[j][0];
496 sum_y+= mv_predictor[j][1];
498 if(j && ref[j] != ref[j-1])
499 goto skip_mean_and_median;
503 mv_predictor[pred_count][0] = sum_x/j;
504 mv_predictor[pred_count][1] = sum_y/j;
505 ref [pred_count] = sum_r/j;
509 min_y= min_x= min_r= 99999;
510 max_y= max_x= max_r=-99999;
512 min_x=min_y=max_x=max_y=min_r=max_r=0;
514 for(j=0; j<pred_count; j++){
515 max_x= FFMAX(max_x, mv_predictor[j][0]);
516 max_y= FFMAX(max_y, mv_predictor[j][1]);
517 max_r= FFMAX(max_r, ref[j]);
518 min_x= FFMIN(min_x, mv_predictor[j][0]);
519 min_y= FFMIN(min_y, mv_predictor[j][1]);
520 min_r= FFMIN(min_r, ref[j]);
522 mv_predictor[pred_count+1][0] = sum_x - max_x - min_x;
523 mv_predictor[pred_count+1][1] = sum_y - max_y - min_y;
524 ref [pred_count+1] = sum_r - max_r - min_r;
527 mv_predictor[pred_count+1][0] /= 2;
528 mv_predictor[pred_count+1][1] /= 2;
529 ref [pred_count+1] /= 2;
533 skip_mean_and_median:
538 if (!fixed[mb_xy] && 0) {
539 if (s->avctx->codec_id == CODEC_ID_H264) {
542 ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
545 if (!s->last_picture.f.motion_val[0] ||
546 !s->last_picture.f.ref_index[0])
548 prev_x = s->last_picture.f.motion_val[0][mot_index][0];
549 prev_y = s->last_picture.f.motion_val[0][mot_index][1];
550 prev_ref = s->last_picture.f.ref_index[0][4*mb_xy];
552 prev_x = s->current_picture.f.motion_val[0][mot_index][0];
553 prev_y = s->current_picture.f.motion_val[0][mot_index][1];
554 prev_ref = s->current_picture.f.ref_index[0][4*mb_xy];
558 mv_predictor[pred_count][0]= prev_x;
559 mv_predictor[pred_count][1]= prev_y;
560 ref [pred_count] = prev_ref;
564 s->mv_dir = MV_DIR_FORWARD;
566 s->mv_type = MV_TYPE_16X16;
569 s->dsp.clear_blocks(s->block[0]);
574 for(j=0; j<pred_count; j++){
576 uint8_t *src = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
578 s->current_picture.f.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0];
579 s->current_picture.f.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1];
581 if(ref[j]<0) //predictor intra or otherwise not available
584 decode_mb(s, ref[j]);
586 if(mb_x>0 && fixed[mb_xy-1]){
589 score += FFABS(src[k*s->linesize-1 ]-src[k*s->linesize ]);
591 if(mb_x+1<mb_width && fixed[mb_xy+1]){
594 score += FFABS(src[k*s->linesize+15]-src[k*s->linesize+16]);
596 if(mb_y>0 && fixed[mb_xy-mb_stride]){
599 score += FFABS(src[k-s->linesize ]-src[k ]);
601 if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
604 score += FFABS(src[k+s->linesize*15]-src[k+s->linesize*16]);
607 if(score <= best_score){ // <= will favor the last MV
612 score_sum+= best_score;
613 s->mv[0][0][0]= mv_predictor[best_pred][0];
614 s->mv[0][0][1]= mv_predictor[best_pred][1];
616 for(i=0; i<mot_step; i++)
617 for(j=0; j<mot_step; j++){
618 s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
619 s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
622 decode_mb(s, ref[best_pred]);
625 if(s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y){
626 fixed[mb_xy]=MV_CHANGED;
629 fixed[mb_xy]=MV_UNCHANGED;
633 // printf(".%d/%d", changed, score_sum); fflush(stdout);
639 for(i=0; i<s->mb_num; i++){
640 int mb_xy= s->mb_index2xy[i];
642 fixed[mb_xy]=MV_FROZEN;
644 // printf(":"); fflush(stdout);
650 static int is_intra_more_likely(MpegEncContext *s){
651 int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
653 if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) return 1; //no previous frame available -> use spatial prediction
656 for(i=0; i<s->mb_num; i++){
657 const int mb_xy= s->mb_index2xy[i];
658 const int error= s->error_status_table[mb_xy];
659 if(!((error&ER_DC_ERROR) && (error&ER_MV_ERROR)))
663 if(s->codec_id == CODEC_ID_H264){
664 H264Context *h= (void*)s;
665 if (h->list_count <= 0 || h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0])
669 if(undamaged_count < 5) return 0; //almost all MBs damaged -> use temporal prediction
671 //prevent dsp.sad() check, that requires access to the image
672 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == AV_PICTURE_TYPE_I)
675 skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs
679 for(mb_y= 0; mb_y<s->mb_height-1; mb_y++){
680 for(mb_x= 0; mb_x<s->mb_width; mb_x++){
682 const int mb_xy= mb_x + mb_y*s->mb_stride;
684 error= s->error_status_table[mb_xy];
685 if((error&ER_DC_ERROR) && (error&ER_MV_ERROR))
686 continue; //skip damaged
689 if((j%skip_amount) != 0) continue; //skip a few to speed things up
691 if(s->pict_type==AV_PICTURE_TYPE_I){
692 uint8_t *mb_ptr = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
693 uint8_t *last_mb_ptr= s->last_picture.f.data [0] + mb_x*16 + mb_y*16*s->linesize;
695 if (s->avctx->codec_id == CODEC_ID_H264) {
698 ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
701 is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
702 // FIXME need await_progress() here
703 is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
705 if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
712 //printf("is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
713 return is_intra_likely > 0;
716 void ff_er_frame_start(MpegEncContext *s){
717 if(!s->err_recognition) return;
719 memset(s->error_status_table, ER_MB_ERROR|VP_START|ER_MB_END, s->mb_stride*s->mb_height*sizeof(uint8_t));
720 s->error_count= 3*s->mb_num;
721 s->error_occurred = 0;
726 * @param endx x component of the last macroblock, can be -1 for the last of the previous line
727 * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is assumed that no earlier end or
728 * error of the same type occurred
730 void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status){
731 const int start_i= av_clip(startx + starty * s->mb_width , 0, s->mb_num-1);
732 const int end_i = av_clip(endx + endy * s->mb_width , 0, s->mb_num);
733 const int start_xy= s->mb_index2xy[start_i];
734 const int end_xy = s->mb_index2xy[end_i];
737 if(s->avctx->hwaccel)
740 if(start_i > end_i || start_xy > end_xy){
741 av_log(s->avctx, AV_LOG_ERROR, "internal error, slice end before start\n");
745 if(!s->err_recognition) return;
748 if(status & (ER_AC_ERROR|ER_AC_END)){
749 mask &= ~(ER_AC_ERROR|ER_AC_END);
750 s->error_count -= end_i - start_i + 1;
752 if(status & (ER_DC_ERROR|ER_DC_END)){
753 mask &= ~(ER_DC_ERROR|ER_DC_END);
754 s->error_count -= end_i - start_i + 1;
756 if(status & (ER_MV_ERROR|ER_MV_END)){
757 mask &= ~(ER_MV_ERROR|ER_MV_END);
758 s->error_count -= end_i - start_i + 1;
761 if(status & ER_MB_ERROR) {
762 s->error_occurred = 1;
763 s->error_count= INT_MAX;
767 memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t));
770 for(i=start_xy; i<end_xy; i++){
771 s->error_status_table[ i ] &= mask;
775 if(end_i == s->mb_num)
776 s->error_count= INT_MAX;
778 s->error_status_table[end_xy] &= mask;
779 s->error_status_table[end_xy] |= status;
782 s->error_status_table[start_xy] |= VP_START;
784 if(start_xy > 0 && s->avctx->thread_count <= 1 && s->avctx->skip_top*s->mb_width < start_i){
785 int prev_status= s->error_status_table[ s->mb_index2xy[start_i - 1] ];
787 prev_status &= ~ VP_START;
788 if(prev_status != (ER_MV_END|ER_DC_END|ER_AC_END)) s->error_count= INT_MAX;
792 void ff_er_frame_end(MpegEncContext *s){
793 int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
795 int threshold_part[4]= {100,100,100};
798 int size = s->b8_stride * 2 * s->mb_height;
799 Picture *pic= s->current_picture_ptr;
801 if(!s->err_recognition || s->error_count==0 || s->avctx->lowres ||
803 s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
804 s->picture_structure != PICT_FRAME || // we do not support ER of field pictures yet, though it should not crash if enabled
805 s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
807 if (s->current_picture.f.motion_val[0] == NULL) {
808 av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
811 pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
812 pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t));
813 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
815 pic->f.motion_subsample_log2 = 3;
816 s->current_picture= *s->current_picture_ptr;
819 if(s->avctx->debug&FF_DEBUG_ER){
820 for(mb_y=0; mb_y<s->mb_height; mb_y++){
821 for(mb_x=0; mb_x<s->mb_width; mb_x++){
822 int status= s->error_status_table[mb_x + mb_y*s->mb_stride];
824 av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
826 av_log(s->avctx, AV_LOG_DEBUG, "\n");
831 /* handle overlapping slices */
832 for(error_type=1; error_type<=3; error_type++){
835 for(i=s->mb_num-1; i>=0; i--){
836 const int mb_xy= s->mb_index2xy[i];
837 int error= s->error_status_table[mb_xy];
839 if(error&(1<<error_type))
841 if(error&(8<<error_type))
845 s->error_status_table[mb_xy]|= 1<<error_type;
853 /* handle slices with partitions of different length */
854 if(s->partitioned_frame){
857 for(i=s->mb_num-1; i>=0; i--){
858 const int mb_xy= s->mb_index2xy[i];
859 int error= s->error_status_table[mb_xy];
863 if((error&ER_MV_END) || (error&ER_DC_END) || (error&ER_AC_ERROR))
867 s->error_status_table[mb_xy]|= ER_AC_ERROR;
874 /* handle missing slices */
875 if(s->err_recognition&AV_EF_EXPLODE){
878 for(i=s->mb_num-2; i>=s->mb_width+100; i--){ //FIXME +100 hack
879 const int mb_xy= s->mb_index2xy[i];
880 int error1= s->error_status_table[mb_xy ];
881 int error2= s->error_status_table[s->mb_index2xy[i+1]];
886 if( error2==(VP_START|ER_MB_ERROR|ER_MB_END)
887 && error1!=(VP_START|ER_MB_ERROR|ER_MB_END)
888 && ((error1&ER_AC_END) || (error1&ER_DC_END) || (error1&ER_MV_END))){ //end & uninit
893 s->error_status_table[mb_xy]|= ER_MB_ERROR;
898 /* backward mark errors */
900 for(error_type=1; error_type<=3; error_type++){
901 for(i=s->mb_num-1; i>=0; i--){
902 const int mb_xy= s->mb_index2xy[i];
903 int error= s->error_status_table[mb_xy];
905 if(!s->mbskip_table[mb_xy]) //FIXME partition specific
907 if(error&(1<<error_type))
910 if(s->partitioned_frame){
911 if(distance < threshold_part[error_type-1])
912 s->error_status_table[mb_xy]|= 1<<error_type;
914 if(distance < threshold)
915 s->error_status_table[mb_xy]|= 1<<error_type;
924 /* forward mark errors */
926 for(i=0; i<s->mb_num; i++){
927 const int mb_xy= s->mb_index2xy[i];
928 int old_error= s->error_status_table[mb_xy];
930 if(old_error&VP_START)
931 error= old_error& ER_MB_ERROR;
933 error|= old_error& ER_MB_ERROR;
934 s->error_status_table[mb_xy]|= error;
938 /* handle not partitioned case */
939 if(!s->partitioned_frame){
940 for(i=0; i<s->mb_num; i++){
941 const int mb_xy= s->mb_index2xy[i];
942 error= s->error_status_table[mb_xy];
943 if(error&ER_MB_ERROR)
945 s->error_status_table[mb_xy]= error;
950 dc_error= ac_error= mv_error=0;
951 for(i=0; i<s->mb_num; i++){
952 const int mb_xy= s->mb_index2xy[i];
953 error= s->error_status_table[mb_xy];
954 if(error&ER_DC_ERROR) dc_error ++;
955 if(error&ER_AC_ERROR) ac_error ++;
956 if(error&ER_MV_ERROR) mv_error ++;
958 av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors\n", dc_error, ac_error, mv_error);
960 is_intra_likely= is_intra_more_likely(s);
962 /* set unknown mb-type to most likely */
963 for(i=0; i<s->mb_num; i++){
964 const int mb_xy= s->mb_index2xy[i];
965 error= s->error_status_table[mb_xy];
966 if(!((error&ER_DC_ERROR) && (error&ER_MV_ERROR)))
970 s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
972 s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
975 // change inter to intra blocks if no reference frames are available
976 if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
977 for(i=0; i<s->mb_num; i++){
978 const int mb_xy= s->mb_index2xy[i];
979 if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
980 s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
983 /* handle inter blocks with damaged AC */
984 for(mb_y=0; mb_y<s->mb_height; mb_y++){
985 for(mb_x=0; mb_x<s->mb_width; mb_x++){
986 const int mb_xy= mb_x + mb_y * s->mb_stride;
987 const int mb_type= s->current_picture.f.mb_type[mb_xy];
988 int dir = !s->last_picture.f.data[0];
989 error= s->error_status_table[mb_xy];
991 if(IS_INTRA(mb_type)) continue; //intra
992 if(error&ER_MV_ERROR) continue; //inter with damaged MV
993 if(!(error&ER_AC_ERROR)) continue; //undamaged inter
995 s->mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
999 int mb_index= mb_x*2 + mb_y*2*s->b8_stride;
1001 s->mv_type = MV_TYPE_8X8;
1003 s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
1004 s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
1007 s->mv_type = MV_TYPE_16X16;
1008 s->mv[0][0][0] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
1009 s->mv[0][0][1] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
1012 s->dsp.clear_blocks(s->block[0]);
1016 decode_mb(s, 0/*FIXME h264 partitioned slices need this set*/);
1021 if(s->pict_type==AV_PICTURE_TYPE_B){
1022 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1023 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1024 int xy= mb_x*2 + mb_y*2*s->b8_stride;
1025 const int mb_xy= mb_x + mb_y * s->mb_stride;
1026 const int mb_type= s->current_picture.f.mb_type[mb_xy];
1027 error= s->error_status_table[mb_xy];
1029 if(IS_INTRA(mb_type)) continue;
1030 if(!(error&ER_MV_ERROR)) continue; //inter with undamaged MV
1031 if(!(error&ER_AC_ERROR)) continue; //undamaged inter
1033 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD;
1034 if(!s->last_picture.f.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
1035 if(!s->next_picture.f.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
1037 s->mv_type = MV_TYPE_16X16;
1041 int time_pp= s->pp_time;
1042 int time_pb= s->pb_time;
1044 if (s->avctx->codec_id == CODEC_ID_H264) {
1047 ff_thread_await_progress((AVFrame *) s->next_picture_ptr,
1050 s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] * time_pb / time_pp;
1051 s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] * time_pb / time_pp;
1052 s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
1053 s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
1061 s->dsp.clear_blocks(s->block[0]);
1070 /* the filters below are not XvMC compatible, skip them */
1071 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1073 /* fill DC for inter blocks */
1074 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1075 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1076 int dc, dcu, dcv, y, n;
1078 uint8_t *dest_y, *dest_cb, *dest_cr;
1079 const int mb_xy= mb_x + mb_y * s->mb_stride;
1080 const int mb_type = s->current_picture.f.mb_type[mb_xy];
1082 error= s->error_status_table[mb_xy];
1084 if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
1085 // if(error&ER_MV_ERROR) continue; //inter data damaged FIXME is this good?
1087 dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
1088 dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1089 dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1091 dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride];
1097 dc+= dest_y[x + (n&1)*8 + (y + (n>>1)*8)*s->linesize];
1100 dc_ptr[(n&1) + (n>>1)*s->b8_stride]= (dc+4)>>3;
1107 dcu+=dest_cb[x + y*(s->uvlinesize)];
1108 dcv+=dest_cr[x + y*(s->uvlinesize)];
1111 s->dc_val[1][mb_x + mb_y*s->mb_stride]= (dcu+4)>>3;
1112 s->dc_val[2][mb_x + mb_y*s->mb_stride]= (dcv+4)>>3;
1116 /* guess DC for damaged blocks */
1117 guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
1118 guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
1119 guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
1121 /* filter luma DC */
1122 filter181(s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride);
1125 /* render DC only intra */
1126 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1127 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1128 uint8_t *dest_y, *dest_cb, *dest_cr;
1129 const int mb_xy= mb_x + mb_y * s->mb_stride;
1130 const int mb_type = s->current_picture.f.mb_type[mb_xy];
1132 error= s->error_status_table[mb_xy];
1134 if(IS_INTER(mb_type)) continue;
1135 if(!(error&ER_AC_ERROR)) continue; //undamaged
1137 dest_y = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
1138 dest_cb = s->current_picture.f.data[1] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1139 dest_cr = s->current_picture.f.data[2] + mb_x * 8 + mb_y * 8 * s->uvlinesize;
1141 put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
1146 if(s->avctx->error_concealment&FF_EC_DEBLOCK){
1147 /* filter horizontal block boundaries */
1148 h_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
1149 h_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
1150 h_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
1152 /* filter vertical block boundaries */
1153 v_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize , 1);
1154 v_block_filter(s, s->current_picture.f.data[1], s->mb_width , s->mb_height , s->uvlinesize, 0);
1155 v_block_filter(s, s->current_picture.f.data[2], s->mb_width , s->mb_height , s->uvlinesize, 0);
1159 /* clean a few tables */
1160 for(i=0; i<s->mb_num; i++){
1161 const int mb_xy= s->mb_index2xy[i];
1162 int error= s->error_status_table[mb_xy];
1164 if(s->pict_type!=AV_PICTURE_TYPE_B && (error&(ER_DC_ERROR|ER_MV_ERROR|ER_AC_ERROR))){
1165 s->mbskip_table[mb_xy]=0;
1167 s->mbintra_table[mb_xy]=1;