]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/error_resilience.c
vorbisdec: return proper error codes instead of made-up ones
[ffmpeg] / libavcodec / error_resilience.c
index 54f7b1065c46e29d582c40610034ca438262d0fe..23ac904c6ccdbe43ca1e32cd632c3db383fea349 100644 (file)
@@ -3,25 +3,25 @@
  *
  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  *
- * This file is part of FFmpeg.
+ * This file is part of Libav.
  *
- * FFmpeg is free software; you can redistribute it and/or
+ * Libav is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
- * FFmpeg is distributed in the hope that it will be useful,
+ * Libav is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
+ * License along with Libav; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
 /**
- * @file libavcodec/error_resilience.c
+ * @file
  * Error resilience / concealment.
  */
 
@@ -32,6 +32,7 @@
 #include "mpegvideo.h"
 #include "h264.h"
 #include "rectangle.h"
+#include "thread.h"
 
 /*
  * H264 redefines mb_intra so it is not mistakely used (its uninitialized in h264)
@@ -40,9 +41,9 @@
 #undef mb_intra
 
 static void decode_mb(MpegEncContext *s, int ref){
-    s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* s->linesize  ) + s->mb_x * 16;
-    s->dest[1] = s->current_picture.data[1] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift);
-    s->dest[2] = s->current_picture.data[2] + (s->mb_y * (16>>s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16>>s->chroma_x_shift);
+    s->dest[0] = s->current_picture.f.data[0] + (s->mb_y *  16 * s->linesize)                         + s->mb_x *  16;
+    s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
+    s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
 
     if(CONFIG_H264_DECODER && s->codec_id == CODEC_ID_H264){
         H264Context *h= (void*)s;
@@ -51,7 +52,7 @@ static void decode_mb(MpegEncContext *s, int ref){
         assert(ref>=0);
         if(ref >= h->ref_count[0]) //FIXME it is posible albeit uncommon that slice references differ between slices, we take the easy approuch and ignore it for now. If this turns out to have any relevance in practice then correct remapping should be added
             ref=0;
-        fill_rectangle(&s->current_picture.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
+        fill_rectangle(&s->current_picture.f.ref_index[0][4*h->mb_xy], 2, 2, 2, ref, 1);
         fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
         fill_rectangle(h->mv_cache[0][ scan8[0] ], 4, 4, 8, pack16to32(s->mv[0][0][0],s->mv[0][0][1]), 4);
         assert(!FRAME_MBAFF);
@@ -165,14 +166,14 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
 
             error= s->error_status_table[mb_index];
 
-            if(IS_INTER(s->current_picture.mb_type[mb_index])) continue; //inter
+            if(IS_INTER(s->current_picture.f.mb_type[mb_index])) continue; //inter
             if(!(error&DC_ERROR)) continue;           //dc-ok
 
             /* right block */
             for(j=b_x+1; j<w; j++){
                 int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
                 int error_j= s->error_status_table[mb_index_j];
-                int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+                int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
                 if(intra_j==0 || !(error_j&DC_ERROR)){
                     color[0]= dc[j + b_y*stride];
                     distance[0]= j-b_x;
@@ -184,7 +185,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
             for(j=b_x-1; j>=0; j--){
                 int mb_index_j= (j>>is_luma) + (b_y>>is_luma)*s->mb_stride;
                 int error_j= s->error_status_table[mb_index_j];
-                int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+                int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
                 if(intra_j==0 || !(error_j&DC_ERROR)){
                     color[1]= dc[j + b_y*stride];
                     distance[1]= b_x-j;
@@ -196,7 +197,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
             for(j=b_y+1; j<h; j++){
                 int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
                 int error_j= s->error_status_table[mb_index_j];
-                int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+                int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
                 if(intra_j==0 || !(error_j&DC_ERROR)){
                     color[2]= dc[b_x + j*stride];
                     distance[2]= j-b_y;
@@ -208,7 +209,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
             for(j=b_y-1; j>=0; j--){
                 int mb_index_j= (b_x>>is_luma) + (j>>is_luma)*s->mb_stride;
                 int error_j= s->error_status_table[mb_index_j];
-                int intra_j= IS_INTRA(s->current_picture.mb_type[mb_index_j]);
+                int intra_j = IS_INTRA(s->current_picture.f.mb_type[mb_index_j]);
                 if(intra_j==0 || !(error_j&DC_ERROR)){
                     color[3]= dc[b_x + j*stride];
                     distance[3]= b_y-j;
@@ -247,13 +248,13 @@ static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
             int y;
             int left_status = s->error_status_table[( b_x   >>is_luma) + (b_y>>is_luma)*s->mb_stride];
             int right_status= s->error_status_table[((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride];
-            int left_intra=   IS_INTRA(s->current_picture.mb_type      [( b_x   >>is_luma) + (b_y>>is_luma)*s->mb_stride]);
-            int right_intra=  IS_INTRA(s->current_picture.mb_type      [((b_x+1)>>is_luma) + (b_y>>is_luma)*s->mb_stride]);
+            int left_intra  = IS_INTRA(s->current_picture.f.mb_type[( b_x      >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
+            int right_intra = IS_INTRA(s->current_picture.f.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
             int left_damage =  left_status&(DC_ERROR|AC_ERROR|MV_ERROR);
             int right_damage= right_status&(DC_ERROR|AC_ERROR|MV_ERROR);
             int offset= b_x*8 + b_y*stride*8;
-            int16_t *left_mv=  s->current_picture.motion_val[0][mvy_stride*b_y + mvx_stride* b_x   ];
-            int16_t *right_mv= s->current_picture.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
+            int16_t *left_mv=  s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride* b_x   ];
+            int16_t *right_mv= s->current_picture.f.motion_val[0][mvy_stride*b_y + mvx_stride*(b_x+1)];
 
             if(!(left_damage||right_damage)) continue; // both undamaged
 
@@ -310,13 +311,13 @@ static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
             int x;
             int top_status   = s->error_status_table[(b_x>>is_luma) + ( b_y   >>is_luma)*s->mb_stride];
             int bottom_status= s->error_status_table[(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride];
-            int top_intra=     IS_INTRA(s->current_picture.mb_type      [(b_x>>is_luma) + ( b_y   >>is_luma)*s->mb_stride]);
-            int bottom_intra=  IS_INTRA(s->current_picture.mb_type      [(b_x>>is_luma) + ((b_y+1)>>is_luma)*s->mb_stride]);
+            int top_intra    = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ( b_y      >> is_luma) * s->mb_stride]);
+            int bottom_intra = IS_INTRA(s->current_picture.f.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
             int top_damage =      top_status&(DC_ERROR|AC_ERROR|MV_ERROR);
             int bottom_damage= bottom_status&(DC_ERROR|AC_ERROR|MV_ERROR);
             int offset= b_x*8 + b_y*stride*8;
-            int16_t *top_mv=    s->current_picture.motion_val[0][mvy_stride* b_y    + mvx_stride*b_x];
-            int16_t *bottom_mv= s->current_picture.motion_val[0][mvy_stride*(b_y+1) + mvx_stride*b_x];
+            int16_t *top_mv    = s->current_picture.f.motion_val[0][mvy_stride *  b_y      + mvx_stride * b_x];
+            int16_t *bottom_mv = s->current_picture.f.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
 
             if(!(top_damage||bottom_damage)) continue; // both undamaged
 
@@ -375,7 +376,7 @@ static void guess_mv(MpegEncContext *s){
         int f=0;
         int error= s->error_status_table[mb_xy];
 
-        if(IS_INTRA(s->current_picture.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
+        if(IS_INTRA(s->current_picture.f.mb_type[mb_xy])) f=MV_FROZEN; //intra //FIXME check
         if(!(error&MV_ERROR)) f=MV_FROZEN;           //inter with undamaged MV
 
         fixed[mb_xy]= f;
@@ -388,10 +389,10 @@ static void guess_mv(MpegEncContext *s){
             for(mb_x=0; mb_x<s->mb_width; mb_x++){
                 const int mb_xy= mb_x + mb_y*s->mb_stride;
 
-                if(IS_INTRA(s->current_picture.mb_type[mb_xy]))  continue;
+                if(IS_INTRA(s->current_picture.f.mb_type[mb_xy]))  continue;
                 if(!(s->error_status_table[mb_xy]&MV_ERROR)) continue;
 
-                s->mv_dir = s->last_picture.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
+                s->mv_dir = s->last_picture.f.data[0] ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
                 s->mb_intra=0;
                 s->mv_type = MV_TYPE_16X16;
                 s->mb_skipped=0;
@@ -428,14 +429,13 @@ int score_sum=0;
                     int best_score=256*256*256*64;
                     int best_pred=0;
                     const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
-                    int prev_x= s->current_picture.motion_val[0][mot_index][0];
-                    int prev_y= s->current_picture.motion_val[0][mot_index][1];
+                    int prev_x, prev_y, prev_ref;
 
                     if((mb_x^mb_y^pass)&1) continue;
 
                     if(fixed[mb_xy]==MV_FROZEN) continue;
-                    assert(!IS_INTRA(s->current_picture.mb_type[mb_xy]));
-                    assert(s->last_picture_ptr && s->last_picture_ptr->data[0]);
+                    assert(!IS_INTRA(s->current_picture.f.mb_type[mb_xy]));
+                    assert(s->last_picture_ptr && s->last_picture_ptr->f.data[0]);
 
                     j=0;
                     if(mb_x>0           && fixed[mb_xy-1        ]==MV_FROZEN) j=1;
@@ -454,27 +454,27 @@ int score_sum=0;
                     none_left=0;
 
                     if(mb_x>0 && fixed[mb_xy-1]){
-                        mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_step][0];
-                        mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_step][1];
-                        ref         [pred_count]   = s->current_picture.ref_index[0][4*(mb_xy-1)];
+                        mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_step][0];
+                        mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_step][1];
+                        ref         [pred_count]   = s->current_picture.f.ref_index[0][4*(mb_xy-1)];
                         pred_count++;
                     }
                     if(mb_x+1<mb_width && fixed[mb_xy+1]){
-                        mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_step][0];
-                        mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_step][1];
-                        ref         [pred_count]   = s->current_picture.ref_index[0][4*(mb_xy+1)];
+                        mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_step][0];
+                        mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_step][1];
+                        ref         [pred_count]   = s->current_picture.f.ref_index[0][4*(mb_xy+1)];
                         pred_count++;
                     }
                     if(mb_y>0 && fixed[mb_xy-mb_stride]){
-                        mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index - mot_stride*mot_step][0];
-                        mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index - mot_stride*mot_step][1];
-                        ref         [pred_count]   = s->current_picture.ref_index[0][4*(mb_xy-s->mb_stride)];
+                        mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][0];
+                        mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index - mot_stride*mot_step][1];
+                        ref         [pred_count]   = s->current_picture.f.ref_index[0][4*(mb_xy-s->mb_stride)];
                         pred_count++;
                     }
                     if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
-                        mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index + mot_stride*mot_step][0];
-                        mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index + mot_stride*mot_step][1];
-                        ref         [pred_count]   = s->current_picture.ref_index[0][4*(mb_xy+s->mb_stride)];
+                        mv_predictor[pred_count][0]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][0];
+                        mv_predictor[pred_count][1]= s->current_picture.f.motion_val[0][mot_index + mot_stride*mot_step][1];
+                        ref         [pred_count]   = s->current_picture.f.ref_index[0][4*(mb_xy+s->mb_stride)];
                         pred_count++;
                     }
                     if(pred_count==0) continue;
@@ -527,11 +527,31 @@ skip_mean_and_median:
                     /* zero MV */
                     pred_count++;
 
+                    if (!fixed[mb_xy]) {
+                        if (s->avctx->codec_id == CODEC_ID_H264) {
+                            // FIXME
+                        } else {
+                            ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
+                                                     mb_y, 0);
+                        }
+                        if (!s->last_picture.f.motion_val[0] ||
+                            !s->last_picture.f.ref_index[0])
+                            goto skip_last_mv;
+                        prev_x = s->last_picture.f.motion_val[0][mot_index][0];
+                        prev_y = s->last_picture.f.motion_val[0][mot_index][1];
+                        prev_ref = s->last_picture.f.ref_index[0][4*mb_xy];
+                    } else {
+                        prev_x = s->current_picture.f.motion_val[0][mot_index][0];
+                        prev_y = s->current_picture.f.motion_val[0][mot_index][1];
+                        prev_ref = s->current_picture.f.ref_index[0][4*mb_xy];
+                    }
+
                     /* last MV */
-                    mv_predictor[pred_count][0]= s->current_picture.motion_val[0][mot_index][0];
-                    mv_predictor[pred_count][1]= s->current_picture.motion_val[0][mot_index][1];
-                    ref         [pred_count]   = s->current_picture.ref_index[0][4*mb_xy];
+                    mv_predictor[pred_count][0]= prev_x;
+                    mv_predictor[pred_count][1]= prev_y;
+                    ref         [pred_count]   = prev_ref;
                     pred_count++;
+                skip_last_mv:
 
                     s->mv_dir = MV_DIR_FORWARD;
                     s->mb_intra=0;
@@ -545,10 +565,10 @@ skip_mean_and_median:
 
                     for(j=0; j<pred_count; j++){
                         int score=0;
-                        uint8_t *src= s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
+                        uint8_t *src = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
 
-                        s->current_picture.motion_val[0][mot_index][0]= s->mv[0][0][0]= mv_predictor[j][0];
-                        s->current_picture.motion_val[0][mot_index][1]= s->mv[0][0][1]= mv_predictor[j][1];
+                        s->current_picture.f.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0];
+                        s->current_picture.f.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1];
 
                         if(ref[j]<0) //predictor intra or otherwise not available
                             continue;
@@ -587,8 +607,8 @@ score_sum+= best_score;
 
                     for(i=0; i<mot_step; i++)
                         for(j=0; j<mot_step; j++){
-                            s->current_picture.motion_val[0][mot_index+i+j*mot_stride][0]= s->mv[0][0][0];
-                            s->current_picture.motion_val[0][mot_index+i+j*mot_stride][1]= s->mv[0][0][1];
+                            s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
+                            s->current_picture.f.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
                         }
 
                     decode_mb(s, ref[best_pred]);
@@ -620,7 +640,7 @@ score_sum+= best_score;
 static int is_intra_more_likely(MpegEncContext *s){
     int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
 
-    if(!s->last_picture_ptr || !s->last_picture_ptr->data[0]) return 1; //no previous frame available -> use spatial prediction
+    if (!s->last_picture_ptr || !s->last_picture_ptr->f.data[0]) return 1; //no previous frame available -> use spatial prediction
 
     undamaged_count=0;
     for(i=0; i<s->mb_num; i++){
@@ -632,14 +652,14 @@ static int is_intra_more_likely(MpegEncContext *s){
 
     if(s->codec_id == CODEC_ID_H264){
         H264Context *h= (void*)s;
-        if(h->ref_count[0] <= 0 || !h->ref_list[0][0].data[0])
+        if (h->list_count <= 0 || h->ref_count[0] <= 0 || !h->ref_list[0][0].f.data[0])
             return 1;
     }
 
     if(undamaged_count < 5) return 0; //almost all MBs damaged -> use temporal prediction
 
     //prevent dsp.sad() check, that requires access to the image
-    if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == FF_I_TYPE)
+    if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == AV_PICTURE_TYPE_I)
         return 1;
 
     skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs
@@ -658,14 +678,20 @@ static int is_intra_more_likely(MpegEncContext *s){
             j++;
             if((j%skip_amount) != 0) continue; //skip a few to speed things up
 
-            if(s->pict_type==FF_I_TYPE){
-                uint8_t *mb_ptr     = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
-                uint8_t *last_mb_ptr= s->last_picture.data   [0] + mb_x*16 + mb_y*16*s->linesize;
+            if(s->pict_type==AV_PICTURE_TYPE_I){
+                uint8_t *mb_ptr     = s->current_picture.f.data[0] + mb_x*16 + mb_y*16*s->linesize;
+                uint8_t *last_mb_ptr= s->last_picture.f.data   [0] + mb_x*16 + mb_y*16*s->linesize;
 
+                if (s->avctx->codec_id == CODEC_ID_H264) {
+                    // FIXME
+                } else {
+                    ff_thread_await_progress((AVFrame *) s->last_picture_ptr,
+                                             mb_y, 0);
+                }
                 is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr                    , s->linesize, 16);
                 is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
             }else{
-                if(IS_INTRA(s->current_picture.mb_type[mb_xy]))
+                if (IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
                    is_intra_likely++;
                 else
                    is_intra_likely--;
@@ -681,6 +707,7 @@ void ff_er_frame_start(MpegEncContext *s){
 
     memset(s->error_status_table, MV_ERROR|AC_ERROR|DC_ERROR|VP_START|AC_END|DC_END|MV_END, s->mb_stride*s->mb_height*sizeof(uint8_t));
     s->error_count= 3*s->mb_num;
+    s->error_occurred = 0;
 }
 
 /**
@@ -720,7 +747,10 @@ void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int en
         s->error_count -= end_i - start_i + 1;
     }
 
-    if(status & (AC_ERROR|DC_ERROR|MV_ERROR)) s->error_count= INT_MAX;
+    if(status & (AC_ERROR|DC_ERROR|MV_ERROR)) {
+        s->error_occurred = 1;
+        s->error_count= INT_MAX;
+    }
 
     if(mask == ~0x7F){
         memset(&s->error_status_table[start_xy], 0, (end_xy - start_xy) * sizeof(uint8_t));
@@ -763,15 +793,15 @@ void ff_er_frame_end(MpegEncContext *s){
        s->picture_structure != PICT_FRAME || // we dont support ER of field pictures yet, though it should not crash if enabled
        s->error_count==3*s->mb_width*(s->avctx->skip_top + s->avctx->skip_bottom)) return;
 
-    if(s->current_picture.motion_val[0] == NULL){
+    if (s->current_picture.f.motion_val[0] == NULL) {
         av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
 
         for(i=0; i<2; i++){
-            pic->ref_index[i]= av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
+            pic->f.ref_index[i] = av_mallocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
             pic->motion_val_base[i]= av_mallocz((size+4) * 2 * sizeof(uint16_t));
-            pic->motion_val[i]= pic->motion_val_base[i]+4;
+            pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
         }
-        pic->motion_subsample_log2= 3;
+        pic->f.motion_subsample_log2 = 3;
         s->current_picture= *s->current_picture_ptr;
     }
 
@@ -786,7 +816,6 @@ void ff_er_frame_end(MpegEncContext *s){
         }
     }
 
-#if 1
     /* handle overlapping slices */
     for(error_type=1; error_type<=3; error_type++){
         int end_ok=0;
@@ -807,8 +836,7 @@ void ff_er_frame_end(MpegEncContext *s){
                 end_ok=0;
         }
     }
-#endif
-#if 1
+
     /* handle slices with partitions of different length */
     if(s->partitioned_frame){
         int end_ok=0;
@@ -829,7 +857,7 @@ void ff_er_frame_end(MpegEncContext *s){
                 end_ok=0;
         }
     }
-#endif
+
     /* handle missing slices */
     if(s->error_recognition>=4){
         int end_ok=1;
@@ -853,7 +881,6 @@ void ff_er_frame_end(MpegEncContext *s){
         }
     }
 
-#if 1
     /* backward mark errors */
     distance=9999999;
     for(error_type=1; error_type<=3; error_type++){
@@ -878,7 +905,6 @@ void ff_er_frame_end(MpegEncContext *s){
                 distance= 9999999;
         }
     }
-#endif
 
     /* forward mark errors */
     error=0;
@@ -893,7 +919,7 @@ void ff_er_frame_end(MpegEncContext *s){
             s->error_status_table[mb_xy]|= error;
         }
     }
-#if 1
+
     /* handle not partitioned case */
     if(!s->partitioned_frame){
         for(i=0; i<s->mb_num; i++){
@@ -904,7 +930,6 @@ void ff_er_frame_end(MpegEncContext *s){
             s->error_status_table[mb_xy]= error;
         }
     }
-#endif
 
     dc_error= ac_error= mv_error=0;
     for(i=0; i<s->mb_num; i++){
@@ -926,25 +951,25 @@ void ff_er_frame_end(MpegEncContext *s){
             continue;
 
         if(is_intra_likely)
-            s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4;
+            s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
         else
-            s->current_picture.mb_type[mb_xy]= MB_TYPE_16x16 | MB_TYPE_L0;
+            s->current_picture.f.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
     }
 
     // change inter to intra blocks if no reference frames are available
-    if (!s->last_picture.data[0] && !s->next_picture.data[0])
+    if (!s->last_picture.f.data[0] && !s->next_picture.f.data[0])
         for(i=0; i<s->mb_num; i++){
             const int mb_xy= s->mb_index2xy[i];
-            if(!IS_INTRA(s->current_picture.mb_type[mb_xy]))
-                s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA4x4;
+            if (!IS_INTRA(s->current_picture.f.mb_type[mb_xy]))
+                s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
         }
 
     /* handle inter blocks with damaged AC */
     for(mb_y=0; mb_y<s->mb_height; mb_y++){
         for(mb_x=0; mb_x<s->mb_width; mb_x++){
             const int mb_xy= mb_x + mb_y * s->mb_stride;
-            const int mb_type= s->current_picture.mb_type[mb_xy];
-            int dir = !s->last_picture.data[0];
+            const int mb_type= s->current_picture.f.mb_type[mb_xy];
+            int dir = !s->last_picture.f.data[0];
             error= s->error_status_table[mb_xy];
 
             if(IS_INTRA(mb_type)) continue; //intra
@@ -959,13 +984,13 @@ void ff_er_frame_end(MpegEncContext *s){
                 int j;
                 s->mv_type = MV_TYPE_8X8;
                 for(j=0; j<4; j++){
-                    s->mv[0][j][0] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][0];
-                    s->mv[0][j][1] = s->current_picture.motion_val[dir][ mb_index + (j&1) + (j>>1)*s->b8_stride ][1];
+                    s->mv[0][j][0] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
+                    s->mv[0][j][1] = s->current_picture.f.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
                 }
             }else{
                 s->mv_type = MV_TYPE_16X16;
-                s->mv[0][0][0] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
-                s->mv[0][0][1] = s->current_picture.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
+                s->mv[0][0][0] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][0];
+                s->mv[0][0][1] = s->current_picture.f.motion_val[dir][ mb_x*2 + mb_y*2*s->b8_stride ][1];
             }
 
             s->dsp.clear_blocks(s->block[0]);
@@ -977,12 +1002,12 @@ void ff_er_frame_end(MpegEncContext *s){
     }
 
     /* guess MVs */
-    if(s->pict_type==FF_B_TYPE){
+    if(s->pict_type==AV_PICTURE_TYPE_B){
         for(mb_y=0; mb_y<s->mb_height; mb_y++){
             for(mb_x=0; mb_x<s->mb_width; mb_x++){
                 int xy= mb_x*2 + mb_y*2*s->b8_stride;
                 const int mb_xy= mb_x + mb_y * s->mb_stride;
-                const int mb_type= s->current_picture.mb_type[mb_xy];
+                const int mb_type= s->current_picture.f.mb_type[mb_xy];
                 error= s->error_status_table[mb_xy];
 
                 if(IS_INTRA(mb_type)) continue;
@@ -990,8 +1015,8 @@ void ff_er_frame_end(MpegEncContext *s){
                 if(!(error&AC_ERROR)) continue;           //undamaged inter
 
                 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD;
-                if(!s->last_picture.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
-                if(!s->next_picture.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
+                if(!s->last_picture.f.data[0]) s->mv_dir &= ~MV_DIR_FORWARD;
+                if(!s->next_picture.f.data[0]) s->mv_dir &= ~MV_DIR_BACKWARD;
                 s->mb_intra=0;
                 s->mv_type = MV_TYPE_16X16;
                 s->mb_skipped=0;
@@ -1000,10 +1025,16 @@ void ff_er_frame_end(MpegEncContext *s){
                     int time_pp= s->pp_time;
                     int time_pb= s->pb_time;
 
-                    s->mv[0][0][0] = s->next_picture.motion_val[0][xy][0]*time_pb/time_pp;
-                    s->mv[0][0][1] = s->next_picture.motion_val[0][xy][1]*time_pb/time_pp;
-                    s->mv[1][0][0] = s->next_picture.motion_val[0][xy][0]*(time_pb - time_pp)/time_pp;
-                    s->mv[1][0][1] = s->next_picture.motion_val[0][xy][1]*(time_pb - time_pp)/time_pp;
+                    if (s->avctx->codec_id == CODEC_ID_H264) {
+                        //FIXME
+                    } else {
+                        ff_thread_await_progress((AVFrame *) s->next_picture_ptr,
+                                                 mb_y, 0);
+                    }
+                    s->mv[0][0][0] = s->next_picture.f.motion_val[0][xy][0] *            time_pb  / time_pp;
+                    s->mv[0][0][1] = s->next_picture.f.motion_val[0][xy][1] *            time_pb  / time_pp;
+                    s->mv[1][0][0] = s->next_picture.f.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
+                    s->mv[1][0][1] = s->next_picture.f.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
                 }else{
                     s->mv[0][0][0]= 0;
                     s->mv[0][0][1]= 0;
@@ -1030,16 +1061,16 @@ void ff_er_frame_end(MpegEncContext *s){
             int16_t *dc_ptr;
             uint8_t *dest_y, *dest_cb, *dest_cr;
             const int mb_xy= mb_x + mb_y * s->mb_stride;
-            const int mb_type= s->current_picture.mb_type[mb_xy];
+            const int mb_type = s->current_picture.f.mb_type[mb_xy];
 
             error= s->error_status_table[mb_xy];
 
             if(IS_INTRA(mb_type) && s->partitioned_frame) continue;
 //            if(error&MV_ERROR) continue; //inter data damaged FIXME is this good?
 
-            dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
-            dest_cb= s->current_picture.data[1] + mb_x*8  + mb_y*8 *s->uvlinesize;
-            dest_cr= s->current_picture.data[2] + mb_x*8  + mb_y*8 *s->uvlinesize;
+            dest_y  = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
+            dest_cb = s->current_picture.f.data[1] + mb_x *  8 + mb_y *  8 * s->uvlinesize;
+            dest_cr = s->current_picture.f.data[2] + mb_x *  8 + mb_y *  8 * s->uvlinesize;
 
             dc_ptr= &s->dc_val[0][mb_x*2 + mb_y*2*s->b8_stride];
             for(n=0; n<4; n++){
@@ -1065,47 +1096,45 @@ void ff_er_frame_end(MpegEncContext *s){
             s->dc_val[2][mb_x + mb_y*s->mb_stride]= (dcv+4)>>3;
         }
     }
-#if 1
+
     /* guess DC for damaged blocks */
     guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
     guess_dc(s, s->dc_val[1], s->mb_width  , s->mb_height  , s->mb_stride, 0);
     guess_dc(s, s->dc_val[2], s->mb_width  , s->mb_height  , s->mb_stride, 0);
-#endif
+
     /* filter luma DC */
     filter181(s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride);
 
-#if 1
     /* render DC only intra */
     for(mb_y=0; mb_y<s->mb_height; mb_y++){
         for(mb_x=0; mb_x<s->mb_width; mb_x++){
             uint8_t *dest_y, *dest_cb, *dest_cr;
             const int mb_xy= mb_x + mb_y * s->mb_stride;
-            const int mb_type= s->current_picture.mb_type[mb_xy];
+            const int mb_type = s->current_picture.f.mb_type[mb_xy];
 
             error= s->error_status_table[mb_xy];
 
             if(IS_INTER(mb_type)) continue;
             if(!(error&AC_ERROR)) continue;              //undamaged
 
-            dest_y = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
-            dest_cb= s->current_picture.data[1] + mb_x*8  + mb_y*8 *s->uvlinesize;
-            dest_cr= s->current_picture.data[2] + mb_x*8  + mb_y*8 *s->uvlinesize;
+            dest_y  = s->current_picture.f.data[0] + mb_x * 16 + mb_y * 16 * s->linesize;
+            dest_cb = s->current_picture.f.data[1] + mb_x *  8 + mb_y *  8 * s->uvlinesize;
+            dest_cr = s->current_picture.f.data[2] + mb_x *  8 + mb_y *  8 * s->uvlinesize;
 
             put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
         }
     }
-#endif
 
     if(s->avctx->error_concealment&FF_EC_DEBLOCK){
         /* filter horizontal block boundaries */
-        h_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize  , 1);
-        h_block_filter(s, s->current_picture.data[1], s->mb_width  , s->mb_height  , s->uvlinesize, 0);
-        h_block_filter(s, s->current_picture.data[2], s->mb_width  , s->mb_height  , s->uvlinesize, 0);
+        h_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize  , 1);
+        h_block_filter(s, s->current_picture.f.data[1], s->mb_width  , s->mb_height  , s->uvlinesize, 0);
+        h_block_filter(s, s->current_picture.f.data[2], s->mb_width  , s->mb_height  , s->uvlinesize, 0);
 
         /* filter vertical block boundaries */
-        v_block_filter(s, s->current_picture.data[0], s->mb_width*2, s->mb_height*2, s->linesize  , 1);
-        v_block_filter(s, s->current_picture.data[1], s->mb_width  , s->mb_height  , s->uvlinesize, 0);
-        v_block_filter(s, s->current_picture.data[2], s->mb_width  , s->mb_height  , s->uvlinesize, 0);
+        v_block_filter(s, s->current_picture.f.data[0], s->mb_width*2, s->mb_height*2, s->linesize  , 1);
+        v_block_filter(s, s->current_picture.f.data[1], s->mb_width  , s->mb_height  , s->uvlinesize, 0);
+        v_block_filter(s, s->current_picture.f.data[2], s->mb_width  , s->mb_height  , s->uvlinesize, 0);
     }
 
 ec_clean:
@@ -1114,7 +1143,7 @@ ec_clean:
         const int mb_xy= s->mb_index2xy[i];
         int error= s->error_status_table[mb_xy];
 
-        if(s->pict_type!=FF_B_TYPE && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){
+        if(s->pict_type!=AV_PICTURE_TYPE_B && (error&(DC_ERROR|MV_ERROR|AC_ERROR))){
             s->mbskip_table[mb_xy]=0;
         }
         s->mbintra_table[mb_xy]=1;