]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/vc1dec.c
vc1dec: Update destination pointers in context for fields
[ffmpeg] / libavcodec / vc1dec.c
index 73c221e29c1b985b7bc1ab2b3ef30a08f0a28e2c..1238da2859aa7a6122d47168c1df1e0dc9a61c71 100644 (file)
@@ -27,8 +27,8 @@
  */
 
 #include "internal.h"
-#include "dsputil.h"
 #include "avcodec.h"
+#include "error_resilience.h"
 #include "mpegvideo.h"
 #include "h263.h"
 #include "h264chroma.h"
@@ -73,6 +73,16 @@ enum Imode {
 };
 /** @} */ //imode defines
 
+static void init_block_index(VC1Context *v)
+{
+    MpegEncContext *s = &v->s;
+    ff_init_block_index(s);
+    if (v->field_mode && v->second_field) {
+        s->dest[0] += s->current_picture_ptr->f.linesize[0];
+        s->dest[1] += s->current_picture_ptr->f.linesize[1];
+        s->dest[2] += s->current_picture_ptr->f.linesize[2];
+    }
+}
 
 /** @} */ //Bitplane group
 
@@ -80,7 +90,7 @@ static void vc1_put_signed_blocks_clamped(VC1Context *v)
 {
     MpegEncContext *s = &v->s;
     int topleft_mb_pos, top_mb_pos;
-    int stride_y, fieldtx;
+    int stride_y, fieldtx = 0;
     int v_dist;
 
     /* The put pixels loop is always one MB row behind the decoding loop,
@@ -93,7 +103,8 @@ static void vc1_put_signed_blocks_clamped(VC1Context *v)
     if (!s->first_slice_line) {
         if (s->mb_x) {
             topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
-            fieldtx        = v->fieldtx_plane[topleft_mb_pos];
+            if (v->fcm == ILACE_FRAME)
+                fieldtx = v->fieldtx_plane[topleft_mb_pos];
             stride_y       = s->linesize << fieldtx;
             v_dist         = (16 - fieldtx) >> (fieldtx == 0);
             s->dsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][0],
@@ -117,7 +128,8 @@ static void vc1_put_signed_blocks_clamped(VC1Context *v)
         }
         if (s->mb_x == s->mb_width - 1) {
             top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
-            fieldtx    = v->fieldtx_plane[top_mb_pos];
+            if (v->fcm == ILACE_FRAME)
+                fieldtx = v->fieldtx_plane[top_mb_pos];
             stride_y   = s->linesize << fieldtx;
             v_dist     = fieldtx ? 15 : 8;
             s->dsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][0],
@@ -331,7 +343,6 @@ static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
 static void vc1_mc_1mv(VC1Context *v, int dir)
 {
     MpegEncContext *s = &v->s;
-    DSPContext *dsp   = &v->s.dsp;
     H264ChromaContext *h264chroma = &v->h264chroma;
     uint8_t *srcY, *srcU, *srcV;
     int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
@@ -348,8 +359,8 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
 
     // store motion vectors for further use in B frames
     if (s->pict_type == AV_PICTURE_TYPE_P) {
-        s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
-        s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
+        s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
+        s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
     }
 
     uvmx = (mx + ((mx & 3) == 3)) >> 1;
@@ -494,13 +505,8 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
         srcY += s->mspel * (1 + s->linesize);
     }
 
-    if (v->field_mode && v->cur_field_type) {
-        off    = s->current_picture_ptr->f.linesize[0];
-        off_uv = s->current_picture_ptr->f.linesize[1];
-    } else {
-        off    = 0;
-        off_uv = 0;
-    }
+    off    = 0;
+    off_uv = 0;
     if (s->mspel) {
         dxy = ((my & 3) << 2) | (mx & 3);
         v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off    , srcY    , s->linesize, v->rnd);
@@ -511,9 +517,9 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
     } else { // hpel mc - always used for luma
         dxy = (my & 2) | ((mx & 2) >> 1);
         if (!v->rnd)
-            dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
+            s->hdsp.put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
         else
-            dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
+            s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
     }
 
     if (s->flags & CODEC_FLAG_GRAY) return;
@@ -545,7 +551,6 @@ static inline int median4(int a, int b, int c, int d)
 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
 {
     MpegEncContext *s = &v->s;
-    DSPContext *dsp = &v->s.dsp;
     uint8_t *srcY;
     int dxy, mx, my, src_x, src_y;
     int off;
@@ -604,8 +609,8 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
             ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
             break;
         }
-        s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
-        s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
+        s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
+        s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
         for (k = 0; k < 4; k++)
             v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
     }
@@ -631,8 +636,6 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
         off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
     else
         off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
-    if (v->field_mode && v->cur_field_type)
-        off += s->current_picture_ptr->f.linesize[0];
 
     src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
     if (!fieldmv)
@@ -707,9 +710,9 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
     } else { // hpel mc - always used for luma
         dxy = (my & 2) | ((mx & 2) >> 1);
         if (!v->rnd)
-            dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
+            s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
         else
-            dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
+            s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
     }
 }
 
@@ -798,8 +801,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
         valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
         chroma_ref_type = v->reffield;
         if (!valid_count) {
-            s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
-            s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
+            s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
+            s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
             v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
             return; //no need to do MC for intra blocks
         }
@@ -813,8 +816,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
     }
     if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
         return;
-    s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
-    s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
+    s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
+    s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
     uvmx = (tx + ((tx & 3) == 3)) >> 1;
     uvmy = (ty + ((ty & 3) == 3)) >> 1;
 
@@ -863,7 +866,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
             srcU += s->current_picture_ptr->f.linesize[1];
             srcV += s->current_picture_ptr->f.linesize[2];
         }
-        off = v->cur_field_type ? s->current_picture_ptr->f.linesize[1] : 0;
+        off = 0;
     }
 
     if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
@@ -1376,30 +1379,30 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
     xy   = s->block_index[n];
 
     if (s->mb_intra) {
-        s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
-        s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
-        s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
-        s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
+        s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
+        s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
+        s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
+        s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
         if (mv1) { /* duplicate motion data for 1-MV block */
-            s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0]        = 0;
-            s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1]        = 0;
-            s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0]     = 0;
-            s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1]     = 0;
-            s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
-            s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
+            s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0]        = 0;
+            s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1]        = 0;
+            s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0]     = 0;
+            s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1]     = 0;
+            s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
+            s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
             v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
-            s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0]        = 0;
-            s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1]        = 0;
-            s->current_picture.f.motion_val[1][xy + wrap][0]                     = 0;
-            s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1]     = 0;
-            s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
-            s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
+            s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0]        = 0;
+            s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1]        = 0;
+            s->current_picture.motion_val[1][xy + wrap][0]                     = 0;
+            s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1]     = 0;
+            s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
+            s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
         }
         return;
     }
 
-    C = s->current_picture.f.motion_val[dir][xy -    1 + v->blocks_off];
-    A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
+    C = s->current_picture.motion_val[dir][xy -    1 + v->blocks_off];
+    A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
     if (mv1) {
         if (v->field_mode && mixedmv_pic)
             off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
@@ -1421,7 +1424,7 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
             off = -1;
         }
     }
-    B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
+    B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
 
     a_valid = !s->first_slice_line || (n == 2 || n == 3);
     b_valid = a_valid && (s->mb_width > 1);
@@ -1584,15 +1587,15 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
     if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
         y_bias = 1;
     /* store MV using signed modulus of MV range defined in 4.11 */
-    s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
-    s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
+    s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
+    s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
     if (mv1) { /* duplicate motion data for 1-MV block */
-        s->current_picture.f.motion_val[dir][xy +    1 +     v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
-        s->current_picture.f.motion_val[dir][xy +    1 +     v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
-        s->current_picture.f.motion_val[dir][xy + wrap +     v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
-        s->current_picture.f.motion_val[dir][xy + wrap +     v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
-        s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
-        s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
+        s->current_picture.motion_val[dir][xy +    1 +     v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
+        s->current_picture.motion_val[dir][xy +    1 +     v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
+        s->current_picture.motion_val[dir][xy + wrap +     v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
+        s->current_picture.motion_val[dir][xy + wrap +     v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
+        s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
+        s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
         v->mv_f[dir][xy +    1 + v->blocks_off] = v->mv_f[dir][xy +            v->blocks_off];
         v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
     }
@@ -1616,24 +1619,24 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
     xy = s->block_index[n];
 
     if (s->mb_intra) {
-        s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
-        s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
-        s->current_picture.f.motion_val[1][xy][0] = 0;
-        s->current_picture.f.motion_val[1][xy][1] = 0;
+        s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
+        s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
+        s->current_picture.motion_val[1][xy][0] = 0;
+        s->current_picture.motion_val[1][xy][1] = 0;
         if (mvn == 1) { /* duplicate motion data for 1-MV block */
-            s->current_picture.f.motion_val[0][xy + 1][0]        = 0;
-            s->current_picture.f.motion_val[0][xy + 1][1]        = 0;
-            s->current_picture.f.motion_val[0][xy + wrap][0]     = 0;
-            s->current_picture.f.motion_val[0][xy + wrap][1]     = 0;
-            s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
-            s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
+            s->current_picture.motion_val[0][xy + 1][0]        = 0;
+            s->current_picture.motion_val[0][xy + 1][1]        = 0;
+            s->current_picture.motion_val[0][xy + wrap][0]     = 0;
+            s->current_picture.motion_val[0][xy + wrap][1]     = 0;
+            s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
+            s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
             v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
-            s->current_picture.f.motion_val[1][xy + 1][0]        = 0;
-            s->current_picture.f.motion_val[1][xy + 1][1]        = 0;
-            s->current_picture.f.motion_val[1][xy + wrap][0]     = 0;
-            s->current_picture.f.motion_val[1][xy + wrap][1]     = 0;
-            s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
-            s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
+            s->current_picture.motion_val[1][xy + 1][0]        = 0;
+            s->current_picture.motion_val[1][xy + 1][1]        = 0;
+            s->current_picture.motion_val[1][xy + wrap][0]     = 0;
+            s->current_picture.motion_val[1][xy + wrap][1]     = 0;
+            s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
+            s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
         }
         return;
     }
@@ -1643,14 +1646,14 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
     if (s->mb_x || (n == 1) || (n == 3)) {
         if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
             || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
-            A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
-            A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
+            A[0] = s->current_picture.motion_val[0][xy - 1][0];
+            A[1] = s->current_picture.motion_val[0][xy - 1][1];
             a_valid = 1;
         } else { // current block has frame mv and cand. has field MV (so average)
-            A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
-                    + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
-            A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
-                    + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
+            A[0] = (s->current_picture.motion_val[0][xy - 1][0]
+                    + s->current_picture.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
+            A[1] = (s->current_picture.motion_val[0][xy - 1][1]
+                    + s->current_picture.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
             a_valid = 1;
         }
         if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
@@ -1670,11 +1673,11 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
                 if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
                     n_adj = (n & 2) | (n & 1);
                 }
-                B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
-                B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
+                B[0] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
+                B[1] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
                 if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
-                    B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
-                    B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
+                    B[0] = (B[0] + s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
+                    B[1] = (B[1] + s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
                 }
             }
             if (s->mb_width > 1) {
@@ -1685,11 +1688,11 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
                     if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
                         n_adj = n & 2;
                     }
-                    C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
-                    C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
+                    C[0] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
+                    C[1] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
                     if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
-                        C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
-                        C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
+                        C[0] = (1 + C[0] + (s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
+                        C[1] = (1 + C[1] + (s->current_picture.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
                     }
                     if (s->mb_x == s->mb_width - 1) {
                         if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
@@ -1699,11 +1702,11 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
                             if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
                                 n_adj = n | 1;
                             }
-                            C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
-                            C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
+                            C[0] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
+                            C[1] = s->current_picture.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
                             if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
-                                C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
-                                C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
+                                C[0] = (1 + C[0] + s->current_picture.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
+                                C[1] = (1 + C[1] + s->current_picture.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
                             }
                         } else
                             c_valid = 0;
@@ -1714,12 +1717,12 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
     } else {
         pos_b   = s->block_index[1];
         b_valid = 1;
-        B[0]    = s->current_picture.f.motion_val[0][pos_b][0];
-        B[1]    = s->current_picture.f.motion_val[0][pos_b][1];
+        B[0]    = s->current_picture.motion_val[0][pos_b][0];
+        B[1]    = s->current_picture.motion_val[0][pos_b][1];
         pos_c   = s->block_index[0];
         c_valid = 1;
-        C[0]    = s->current_picture.f.motion_val[0][pos_c][0];
-        C[1]    = s->current_picture.f.motion_val[0][pos_c][1];
+        C[0]    = s->current_picture.motion_val[0][pos_c][0];
+        C[1]    = s->current_picture.motion_val[0][pos_c][1];
     }
 
     total_valid = a_valid + b_valid + c_valid;
@@ -1807,18 +1810,18 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
     }
 
     /* store MV using signed modulus of MV range defined in 4.11 */
-    s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
-    s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
+    s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
+    s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
     if (mvn == 1) { /* duplicate motion data for 1-MV block */
-        s->current_picture.f.motion_val[0][xy +    1    ][0] = s->current_picture.f.motion_val[0][xy][0];
-        s->current_picture.f.motion_val[0][xy +    1    ][1] = s->current_picture.f.motion_val[0][xy][1];
-        s->current_picture.f.motion_val[0][xy + wrap    ][0] = s->current_picture.f.motion_val[0][xy][0];
-        s->current_picture.f.motion_val[0][xy + wrap    ][1] = s->current_picture.f.motion_val[0][xy][1];
-        s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
-        s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
+        s->current_picture.motion_val[0][xy +    1    ][0] = s->current_picture.motion_val[0][xy][0];
+        s->current_picture.motion_val[0][xy +    1    ][1] = s->current_picture.motion_val[0][xy][1];
+        s->current_picture.motion_val[0][xy + wrap    ][0] = s->current_picture.motion_val[0][xy][0];
+        s->current_picture.motion_val[0][xy + wrap    ][1] = s->current_picture.motion_val[0][xy][1];
+        s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
+        s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
     } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
-        s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
-        s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
+        s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
+        s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
         s->mv[0][n + 1][0] = s->mv[0][n][0];
         s->mv[0][n + 1][1] = s->mv[0][n][1];
     }
@@ -1829,7 +1832,6 @@ static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
 static void vc1_interp_mc(VC1Context *v)
 {
     MpegEncContext *s = &v->s;
-    DSPContext *dsp = &v->s.dsp;
     H264ChromaContext *h264chroma = &v->h264chroma;
     uint8_t *srcY, *srcU, *srcV;
     int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
@@ -1931,13 +1933,8 @@ static void vc1_interp_mc(VC1Context *v)
         srcY += s->mspel * (1 + s->linesize);
     }
 
-    if (v->field_mode && v->cur_field_type) {
-        off    = s->current_picture_ptr->f.linesize[0];
-        off_uv = s->current_picture_ptr->f.linesize[1];
-    } else {
-        off    = 0;
-        off_uv = 0;
-    }
+    off    = 0;
+    off_uv = 0;
 
     if (s->mspel) {
         dxy = ((my & 3) << 2) | (mx & 3);
@@ -1950,9 +1947,9 @@ static void vc1_interp_mc(VC1Context *v)
         dxy = (my & 2) | ((mx & 2) >> 1);
 
         if (!v->rnd)
-            dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
+            s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
         else
-            dsp->avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
+            s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
     }
 
     if (s->flags & CODEC_FLAG_GRAY) return;
@@ -2041,17 +2038,17 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
     xy = s->block_index[0];
 
     if (s->mb_intra) {
-        s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
-        s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
-        s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
-        s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
+        s->current_picture.motion_val[0][xy + v->blocks_off][0] =
+        s->current_picture.motion_val[0][xy + v->blocks_off][1] =
+        s->current_picture.motion_val[1][xy + v->blocks_off][0] =
+        s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
         return;
     }
     if (!v->field_mode) {
-        s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
-        s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
-        s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
-        s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
+        s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
+        s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
+        s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
+        s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
 
         /* Pullback predicted motion vectors as specified in 8.4.5.4 */
         s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width  << 6) - 4 - (s->mb_x << 6));
@@ -2060,18 +2057,18 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
         s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
     }
     if (direct) {
-        s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
-        s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
-        s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
-        s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
+        s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
+        s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
+        s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
+        s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
         return;
     }
 
     if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
-        C   = s->current_picture.f.motion_val[0][xy - 2];
-        A   = s->current_picture.f.motion_val[0][xy - wrap * 2];
+        C   = s->current_picture.motion_val[0][xy - 2];
+        A   = s->current_picture.motion_val[0][xy - wrap * 2];
         off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
-        B   = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
+        B   = s->current_picture.motion_val[0][xy - wrap * 2 + off];
 
         if (!s->mb_x) C[0] = C[1] = 0;
         if (!s->first_slice_line) { // predictor A is not out of bounds
@@ -2146,10 +2143,10 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
         s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
     }
     if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
-        C   = s->current_picture.f.motion_val[1][xy - 2];
-        A   = s->current_picture.f.motion_val[1][xy - wrap * 2];
+        C   = s->current_picture.motion_val[1][xy - 2];
+        A   = s->current_picture.motion_val[1][xy - wrap * 2];
         off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
-        B   = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
+        B   = s->current_picture.motion_val[1][xy - wrap * 2 + off];
 
         if (!s->mb_x)
             C[0] = C[1] = 0;
@@ -2225,10 +2222,10 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
         s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
         s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
     }
-    s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
-    s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
-    s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
-    s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
+    s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
+    s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
+    s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
+    s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
 }
 
 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
@@ -2239,14 +2236,14 @@ static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dm
 
     if (v->bmvtype == BMV_TYPE_DIRECT) {
         int total_opp, k, f;
-        if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
-            s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
+        if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
+            s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
                                       v->bfraction, 0, s->quarter_sample);
-            s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
+            s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
                                       v->bfraction, 0, s->quarter_sample);
-            s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
+            s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
                                       v->bfraction, 1, s->quarter_sample);
-            s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
+            s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
                                       v->bfraction, 1, s->quarter_sample);
 
             total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
@@ -2261,10 +2258,10 @@ static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dm
         }
         v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
         for (k = 0; k < 4; k++) {
-            s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
-            s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
-            s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
-            s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
+            s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
+            s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
+            s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
+            s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
             v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
             v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
         }
@@ -2382,17 +2379,17 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
     b = dc_val[ - 1 - wrap];
     a = dc_val[ - wrap];
     /* scale predictors if needed */
-    q1 = s->current_picture.f.qscale_table[mb_pos];
+    q1 = s->current_picture.qscale_table[mb_pos];
     dqscale_index = s->y_dc_scale_table[q1] - 1;
     if (dqscale_index < 0)
         return 0;
     if (c_avail && (n != 1 && n != 3)) {
-        q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+        q2 = s->current_picture.qscale_table[mb_pos - 1];
         if (q2 && q2 != q1)
             c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
     }
     if (a_avail && (n != 2 && n != 3)) {
-        q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+        q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
         if (q2 && q2 != q1)
             a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
     }
@@ -2402,7 +2399,7 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
             off--;
         if (n != 2)
             off -= s->mb_stride;
-        q2 = s->current_picture.f.qscale_table[off];
+        q2 = s->current_picture.qscale_table[off];
         if (q2 && q2 != q1)
             b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
     }
@@ -2773,11 +2770,11 @@ static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
     else // top
         ac_val -= 16 * s->block_wrap[n];
 
-    q1 = s->current_picture.f.qscale_table[mb_pos];
+    q1 = s->current_picture.qscale_table[mb_pos];
     if ( dc_pred_dir && c_avail && mb_pos)
-        q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+        q2 = s->current_picture.qscale_table[mb_pos - 1];
     if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
-        q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+        q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
     if ( dc_pred_dir && n == 1)
         q2 = q1;
     if (!dc_pred_dir && n == 2)
@@ -2996,11 +2993,11 @@ static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
     else //top
         ac_val -= 16 * s->block_wrap[n];
 
-    q1 = s->current_picture.f.qscale_table[mb_pos];
+    q1 = s->current_picture.qscale_table[mb_pos];
     if (dc_pred_dir && c_avail && mb_pos)
-        q2 = s->current_picture.f.qscale_table[mb_pos - 1];
+        q2 = s->current_picture.qscale_table[mb_pos - 1];
     if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
-        q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
+        q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
     if ( dc_pred_dir && n == 1)
         q2 = q1;
     if (!dc_pred_dir && n == 2)
@@ -3318,7 +3315,7 @@ static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_
             bottom_is_intra = (block_num < 2) ? (mb_is_intra          >> ((block_num + 2) * 4))
                                               : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
             mv_stride       = s->b8_stride;
-            mv              = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
+            mv              = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
         }
 
         if (bottom_is_intra & 1 || block_is_intra & 1 ||
@@ -3380,7 +3377,7 @@ static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_
                                              : (mb_cbp                              >> ((block_num + 1) * 4));
             right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
                                              : (mb_is_intra                         >> ((block_num + 1) * 4));
-            mv             = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
+            mv             = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
         }
         if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
             v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
@@ -3474,10 +3471,10 @@ static int vc1_decode_p_mb(VC1Context *v)
             GET_MVDATA(dmv_x, dmv_y);
 
             if (s->mb_intra) {
-                s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
-                s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
+                s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+                s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
             }
-            s->current_picture.f.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
+            s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
             vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
 
             /* FIXME Set DC val for inter block ? */
@@ -3494,7 +3491,7 @@ static int vc1_decode_p_mb(VC1Context *v)
                 mquant = v->pq;
                 cbp    = 0;
             }
-            s->current_picture.f.qscale_table[mb_pos] = mquant;
+            s->current_picture.qscale_table[mb_pos] = mquant;
 
             if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
                 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
@@ -3548,8 +3545,8 @@ static int vc1_decode_p_mb(VC1Context *v)
                 v->mb_type[0][s->block_index[i]] = 0;
                 s->dc_val[0][s->block_index[i]]  = 0;
             }
-            s->current_picture.f.mb_type[mb_pos]      = MB_TYPE_SKIP;
-            s->current_picture.f.qscale_table[mb_pos] = 0;
+            s->current_picture.mb_type[mb_pos]      = MB_TYPE_SKIP;
+            s->current_picture.qscale_table[mb_pos] = 0;
             vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
             vc1_mc_1mv(v, 0);
         }
@@ -3592,7 +3589,7 @@ static int vc1_decode_p_mb(VC1Context *v)
             if (!intra_count && !coded_inter)
                 goto end;
             GET_MQUANT();
-            s->current_picture.f.qscale_table[mb_pos] = mquant;
+            s->current_picture.qscale_table[mb_pos] = mquant;
             /* test if block is intra and has pred */
             {
                 int intrapred = 0;
@@ -3655,7 +3652,7 @@ static int vc1_decode_p_mb(VC1Context *v)
             }
         } else { // skipped MB
             s->mb_intra                               = 0;
-            s->current_picture.f.qscale_table[mb_pos] = 0;
+            s->current_picture.qscale_table[mb_pos] = 0;
             for (i = 0; i < 6; i++) {
                 v->mb_type[0][s->block_index[i]] = 0;
                 s->dc_val[0][s->block_index[i]]  = 0;
@@ -3665,7 +3662,7 @@ static int vc1_decode_p_mb(VC1Context *v)
                 vc1_mc_4mv_luma(v, i, 0);
             }
             vc1_mc_4mv_chroma(v, 0);
-            s->current_picture.f.qscale_table[mb_pos] = 0;
+            s->current_picture.qscale_table[mb_pos] = 0;
         }
     }
 end:
@@ -3740,9 +3737,9 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
             break;
         }
         if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
-            s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
-            s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
-            s->current_picture.f.mb_type[mb_pos]                     = MB_TYPE_INTRA;
+            s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+            s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+            s->current_picture.mb_type[mb_pos]                     = MB_TYPE_INTRA;
             s->mb_intra = v->is_intra[s->mb_x] = 1;
             for (i = 0; i < 6; i++)
                 v->mb_type[0][s->block_index[i]] = 1;
@@ -3752,7 +3749,7 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
                 cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
             v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
             GET_MQUANT();
-            s->current_picture.f.qscale_table[mb_pos] = mquant;
+            s->current_picture.qscale_table[mb_pos] = mquant;
             /* Set DC scale - y and c use the same (not sure if necessary here) */
             s->y_dc_scale = s->y_dc_scale_table[mquant];
             s->c_dc_scale = s->c_dc_scale_table[mquant];
@@ -3844,7 +3841,7 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
             }
             if (cbp)
                 GET_MQUANT();  // p. 227
-            s->current_picture.f.qscale_table[mb_pos] = mquant;
+            s->current_picture.qscale_table[mb_pos] = mquant;
             if (!v->ttmbf && cbp)
                 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
             for (i = 0; i < 6; i++) {
@@ -3873,8 +3870,8 @@ static int vc1_decode_p_mb_intfr(VC1Context *v)
             v->mb_type[0][s->block_index[i]] = 0;
             s->dc_val[0][s->block_index[i]] = 0;
         }
-        s->current_picture.f.mb_type[mb_pos]      = MB_TYPE_SKIP;
-        s->current_picture.f.qscale_table[mb_pos] = 0;
+        s->current_picture.mb_type[mb_pos]      = MB_TYPE_SKIP;
+        s->current_picture.qscale_table[mb_pos] = 0;
         v->blk_mv_type[s->block_index[0]] = 0;
         v->blk_mv_type[s->block_index[1]] = 0;
         v->blk_mv_type[s->block_index[2]] = 0;
@@ -3911,11 +3908,11 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
     idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
     if (idx_mbmode <= 1) { // intra MB
         s->mb_intra = v->is_intra[s->mb_x] = 1;
-        s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
-        s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
-        s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
+        s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
+        s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
+        s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
         GET_MQUANT();
-        s->current_picture.f.qscale_table[mb_pos] = mquant;
+        s->current_picture.qscale_table[mb_pos] = mquant;
         /* Set DC scale - y and c use the same (not sure if necessary here) */
         s->y_dc_scale = s->y_dc_scale_table[mquant];
         s->c_dc_scale = s->c_dc_scale_table[mquant];
@@ -3941,13 +3938,12 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
                 continue;
             v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
             off  = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
-            off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
             s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
             // TODO: loop filter
         }
     } else {
         s->mb_intra = v->is_intra[s->mb_x] = 0;
-        s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
+        s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
         for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
         if (idx_mbmode <= 5) { // 1-MV
             dmv_x = dmv_y = pred_flag = 0;
@@ -3978,7 +3974,7 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
         if (cbp) {
             GET_MQUANT();
         }
-        s->current_picture.f.qscale_table[mb_pos] = mquant;
+        s->current_picture.qscale_table[mb_pos] = mquant;
         if (!v->ttmbf && cbp) {
             ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
         }
@@ -3988,8 +3984,6 @@ static int vc1_decode_p_mb_intfi(VC1Context *v)
             dst_idx += i >> 2;
             val = ((cbp >> (5 - i)) & 1);
             off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
-            if (v->cur_field_type)
-                off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
             if (val) {
                 pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
                                          first_block, s->dest[dst_idx] + off,
@@ -4044,7 +4038,7 @@ static void vc1_decode_b_mb(VC1Context *v)
         v->mb_type[0][s->block_index[i]] = 0;
         s->dc_val[0][s->block_index[i]]  = 0;
     }
-    s->current_picture.f.qscale_table[mb_pos] = 0;
+    s->current_picture.qscale_table[mb_pos] = 0;
 
     if (!direct) {
         if (!skipped) {
@@ -4081,7 +4075,7 @@ static void vc1_decode_b_mb(VC1Context *v)
         cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
         GET_MQUANT();
         s->mb_intra = 0;
-        s->current_picture.f.qscale_table[mb_pos] = mquant;
+        s->current_picture.qscale_table[mb_pos] = mquant;
         if (!v->ttmbf)
             ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
         dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
@@ -4096,7 +4090,7 @@ static void vc1_decode_b_mb(VC1Context *v)
         }
         if (s->mb_intra && !mb_has_coeffs) {
             GET_MQUANT();
-            s->current_picture.f.qscale_table[mb_pos] = mquant;
+            s->current_picture.qscale_table[mb_pos] = mquant;
             s->ac_pred = get_bits1(gb);
             cbp = 0;
             vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
@@ -4118,7 +4112,7 @@ static void vc1_decode_b_mb(VC1Context *v)
                 s->ac_pred = get_bits1(gb);
             cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
             GET_MQUANT();
-            s->current_picture.f.qscale_table[mb_pos] = mquant;
+            s->current_picture.qscale_table[mb_pos] = mquant;
             if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
                 ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
         }
@@ -4185,11 +4179,11 @@ static void vc1_decode_b_mb_intfi(VC1Context *v)
     idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
     if (idx_mbmode <= 1) { // intra MB
         s->mb_intra = v->is_intra[s->mb_x] = 1;
-        s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
-        s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
-        s->current_picture.f.mb_type[mb_pos + v->mb_off]         = MB_TYPE_INTRA;
+        s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+        s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+        s->current_picture.mb_type[mb_pos + v->mb_off]         = MB_TYPE_INTRA;
         GET_MQUANT();
-        s->current_picture.f.qscale_table[mb_pos] = mquant;
+        s->current_picture.qscale_table[mb_pos] = mquant;
         /* Set DC scale - y and c use the same (not sure if necessary here) */
         s->y_dc_scale = s->y_dc_scale_table[mquant];
         s->c_dc_scale = s->c_dc_scale_table[mquant];
@@ -4218,13 +4212,12 @@ static void vc1_decode_b_mb_intfi(VC1Context *v)
                 for (j = 0; j < 64; j++)
                     s->block[i][j] <<= 1;
             off  = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
-            off += v->cur_field_type ? ((i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0]) : 0;
             s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
             // TODO: yet to perform loop filter
         }
     } else {
         s->mb_intra = v->is_intra[s->mb_x] = 0;
-        s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
+        s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
         for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
         if (v->fmb_is_raw)
             fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
@@ -4290,7 +4283,7 @@ static void vc1_decode_b_mb_intfi(VC1Context *v)
         if (cbp) {
             GET_MQUANT();
         }
-        s->current_picture.f.qscale_table[mb_pos] = mquant;
+        s->current_picture.qscale_table[mb_pos] = mquant;
         if (!v->ttmbf && cbp) {
             ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
         }
@@ -4300,8 +4293,6 @@ static void vc1_decode_b_mb_intfi(VC1Context *v)
             dst_idx += i >> 2;
             val = ((cbp >> (5 - i)) & 1);
             off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
-            if (v->cur_field_type)
-                off += (i & 4) ? s->current_picture_ptr->f.linesize[1] : s->current_picture_ptr->f.linesize[0];
             if (val) {
                 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
                                    first_block, s->dest[dst_idx] + off,
@@ -4360,7 +4351,7 @@ static void vc1_decode_i_blocks(VC1Context *v)
     s->first_slice_line = 1;
     for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
         s->mb_x = 0;
-        ff_init_block_index(s);
+        init_block_index(v);
         for (; s->mb_x < v->end_mb_x; s->mb_x++) {
             uint8_t *dst[6];
             ff_update_block_index(s);
@@ -4372,10 +4363,10 @@ static void vc1_decode_i_blocks(VC1Context *v)
             dst[5] = s->dest[2];
             s->dsp.clear_blocks(s->block[0]);
             mb_pos = s->mb_x + s->mb_y * s->mb_width;
-            s->current_picture.f.mb_type[mb_pos]                     = MB_TYPE_INTRA;
-            s->current_picture.f.qscale_table[mb_pos]                = v->pq;
-            s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
-            s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
+            s->current_picture.mb_type[mb_pos]                     = MB_TYPE_INTRA;
+            s->current_picture.qscale_table[mb_pos]                = v->pq;
+            s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+            s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
 
             // do actual MB decoding and displaying
             cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
@@ -4441,14 +4432,14 @@ static void vc1_decode_i_blocks(VC1Context *v)
             }
         }
         if (!v->s.loop_filter)
-            ff_draw_horiz_band(s, s->mb_y * 16, 16);
+            ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
         else if (s->mb_y)
-            ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
+            ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
 
         s->first_slice_line = 0;
     }
     if (v->s.loop_filter)
-        ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
+        ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
 
     /* This is intentionally mb_height and not end_mb_y - unlike in advanced
      * profile, these only differ are when decoding MSS2 rectangles. */
@@ -4500,21 +4491,21 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
     s->mb_y             = s->start_mb_y;
     if (s->start_mb_y) {
         s->mb_x = 0;
-        ff_init_block_index(s);
+        init_block_index(v);
         memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
                (1 + s->b8_stride) * sizeof(*s->coded_block));
     }
     for (; s->mb_y < s->end_mb_y; s->mb_y++) {
         s->mb_x = 0;
-        ff_init_block_index(s);
+        init_block_index(v);
         for (;s->mb_x < s->mb_width; s->mb_x++) {
             int16_t (*block)[64] = v->block[v->cur_blk_idx];
             ff_update_block_index(s);
             s->dsp.clear_blocks(block[0]);
             mb_pos = s->mb_x + s->mb_y * s->mb_stride;
-            s->current_picture.f.mb_type[mb_pos + v->mb_off]                         = MB_TYPE_INTRA;
-            s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
-            s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
+            s->current_picture.mb_type[mb_pos + v->mb_off]                         = MB_TYPE_INTRA;
+            s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
+            s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
 
             // do actual MB decoding and displaying
             if (v->fieldtx_is_raw)
@@ -4530,7 +4521,7 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
 
             GET_MQUANT();
 
-            s->current_picture.f.qscale_table[mb_pos] = mquant;
+            s->current_picture.qscale_table[mb_pos] = mquant;
             /* Set DC scale - y and c use the same */
             s->y_dc_scale = s->y_dc_scale_table[mquant];
             s->c_dc_scale = s->c_dc_scale_table[mquant];
@@ -4569,15 +4560,16 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
             }
         }
         if (!v->s.loop_filter)
-            ff_draw_horiz_band(s, s->mb_y * 16, 16);
+            ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
         else if (s->mb_y)
-            ff_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
+            ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
         s->first_slice_line = 0;
     }
 
     /* raw bottom MB row */
     s->mb_x = 0;
-    ff_init_block_index(s);
+    init_block_index(v);
+
     for (;s->mb_x < s->mb_width; s->mb_x++) {
         ff_update_block_index(s);
         vc1_put_signed_blocks_clamped(v);
@@ -4585,7 +4577,7 @@ static void vc1_decode_i_blocks_adv(VC1Context *v)
             vc1_loop_filter_iblk_delayed(v, v->pq);
     }
     if (v->s.loop_filter)
-        ff_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
+        ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
     ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
                     (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
 }
@@ -4620,12 +4612,13 @@ static void vc1_decode_p_blocks(VC1Context *v)
         break;
     }
 
-    apply_loop_filter   = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
+    apply_loop_filter   = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
+                          v->fcm == PROGRESSIVE;
     s->first_slice_line = 1;
     memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
     for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
         s->mb_x = 0;
-        ff_init_block_index(s);
+        init_block_index(v);
         for (; s->mb_x < s->mb_width; s->mb_x++) {
             ff_update_block_index(s);
 
@@ -4634,7 +4627,7 @@ static void vc1_decode_p_blocks(VC1Context *v)
             else if (v->fcm == ILACE_FRAME)
                 vc1_decode_p_mb_intfr(v);
             else vc1_decode_p_mb(v);
-            if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE)
+            if (s->mb_y != s->start_mb_y && apply_loop_filter)
                 vc1_apply_p_loop_filter(v);
             if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
                 // TODO: may need modification to handle slice coding
@@ -4648,19 +4641,19 @@ static void vc1_decode_p_blocks(VC1Context *v)
         memmove(v->ttblk_base,    v->ttblk,    sizeof(v->ttblk_base[0])    * s->mb_stride);
         memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
         memmove(v->luma_mv_base,  v->luma_mv,  sizeof(v->luma_mv_base[0])  * s->mb_stride);
-        if (s->mb_y != s->start_mb_y) ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
+        if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
         s->first_slice_line = 0;
     }
     if (apply_loop_filter) {
         s->mb_x = 0;
-        ff_init_block_index(s);
+        init_block_index(v);
         for (; s->mb_x < s->mb_width; s->mb_x++) {
             ff_update_block_index(s);
             vc1_apply_p_loop_filter(v);
         }
     }
     if (s->end_mb_y >= s->start_mb_y)
-        ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
+        ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
     ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
                     (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
 }
@@ -4697,7 +4690,7 @@ static void vc1_decode_b_blocks(VC1Context *v)
     s->first_slice_line = 1;
     for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
         s->mb_x = 0;
-        ff_init_block_index(s);
+        init_block_index(v);
         for (; s->mb_x < s->mb_width; s->mb_x++) {
             ff_update_block_index(s);
 
@@ -4715,13 +4708,13 @@ static void vc1_decode_b_blocks(VC1Context *v)
             if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
         }
         if (!v->s.loop_filter)
-            ff_draw_horiz_band(s, s->mb_y * 16, 16);
+            ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
         else if (s->mb_y)
-            ff_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
+            ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
         s->first_slice_line = 0;
     }
     if (v->s.loop_filter)
-        ff_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
+        ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
     ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
                     (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
 }
@@ -4734,12 +4727,12 @@ static void vc1_decode_skip_blocks(VC1Context *v)
     s->first_slice_line = 1;
     for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
         s->mb_x = 0;
-        ff_init_block_index(s);
+        init_block_index(v);
         ff_update_block_index(s);
         memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize,   s->linesize   * 16);
         memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y *  8 * s->uvlinesize, s->uvlinesize *  8);
         memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y *  8 * s->uvlinesize, s->uvlinesize *  8);
-        ff_draw_horiz_band(s, s->mb_y * 16, 16);
+        ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
         s->first_slice_line = 0;
     }
     s->pict_type = AV_PICTURE_TYPE_P;
@@ -4850,7 +4843,7 @@ static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
     for (sprite = 0; sprite <= v->two_sprites; sprite++) {
         vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
         if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
-            av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
+            avpriv_request_sample(avctx, "Non-zero rotation coefficients");
         av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
         for (i = 0; i < 7; i++)
             av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
@@ -5024,12 +5017,8 @@ static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
         v->two_sprites = 0;
     }
 
-    if (v->sprite_output_frame.data[0])
-        avctx->release_buffer(avctx, &v->sprite_output_frame);
-
-    v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
-    v->sprite_output_frame.reference = 0;
-    if (ff_get_buffer(avctx, &v->sprite_output_frame) < 0) {
+    av_frame_unref(&v->sprite_output_frame);
+    if (ff_get_buffer(avctx, &v->sprite_output_frame, 0) < 0) {
         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
         return -1;
     }
@@ -5279,9 +5268,8 @@ av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
     VC1Context *v = avctx->priv_data;
     int i;
 
-    if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
-        && v->sprite_output_frame.data[0])
-        avctx->release_buffer(avctx, &v->sprite_output_frame);
+    av_frame_unref(&v->sprite_output_frame);
+
     for (i = 0; i < 4; i++)
         av_freep(&v->sr_rows[i >> 1][i & 1]);
     av_freep(&v->hrd_rate);
@@ -5315,7 +5303,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *avpkt)
 {
     const uint8_t *buf = avpkt->data;
-    int buf_size = avpkt->size, n_slices = 0, i;
+    int buf_size = avpkt->size, n_slices = 0, i, ret;
     VC1Context *v = avctx->priv_data;
     MpegEncContext *s = &v->s;
     AVFrame *pict = data;
@@ -5332,7 +5320,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
     if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
         /* special case for last picture */
         if (s->low_delay == 0 && s->next_picture_ptr) {
-            *pict = s->next_picture_ptr->f;
+            if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
+                return ret;
             s->next_picture_ptr = NULL;
 
             *got_frame = 1;
@@ -5635,7 +5624,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                 get_bits_count(&s->gb), s->gb.size_in_bits);
 //  if (get_bits_count(&s->gb) > buf_size * 8)
 //      return -1;
-        ff_er_frame_end(&s->er);
+        if (!v->field_mode)
+            ff_er_frame_end(&s->er);
     }
 
     ff_MPV_frame_end(s);
@@ -5650,17 +5640,21 @@ image:
         if (vc1_decode_sprites(v, &s->gb))
             goto err;
 #endif
-        *pict      = v->sprite_output_frame;
+        if ((ret = av_frame_ref(pict, &v->sprite_output_frame)) < 0)
+            goto err;
         *got_frame = 1;
     } else {
         if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
-            *pict = s->current_picture_ptr->f;
+            if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
+                goto err;
+            ff_print_debug_info(s, s->current_picture_ptr);
         } else if (s->last_picture_ptr != NULL) {
-            *pict = s->last_picture_ptr->f;
+            if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
+                goto err;
+            ff_print_debug_info(s, s->last_picture_ptr);
         }
         if (s->last_picture_ptr || s->low_delay) {
             *got_frame = 1;
-            ff_print_debug_info(s, pict);
         }
     }
 
@@ -5688,6 +5682,20 @@ static const AVProfile profiles[] = {
     { FF_PROFILE_UNKNOWN },
 };
 
+static const enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[] = {
+#if CONFIG_DXVA2
+    AV_PIX_FMT_DXVA2_VLD,
+#endif
+#if CONFIG_VAAPI
+    AV_PIX_FMT_VAAPI_VLD,
+#endif
+#if CONFIG_VDPAU
+    AV_PIX_FMT_VDPAU,
+#endif
+    AV_PIX_FMT_YUV420P,
+    AV_PIX_FMT_NONE
+};
+
 AVCodec ff_vc1_decoder = {
     .name           = "vc1",
     .type           = AVMEDIA_TYPE_VIDEO,
@@ -5699,7 +5707,7 @@ AVCodec ff_vc1_decoder = {
     .flush          = ff_mpeg_flush,
     .capabilities   = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
     .long_name      = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
-    .pix_fmts       = ff_hwaccel_pixfmt_list_420,
+    .pix_fmts       = vc1_hwaccel_pixfmt_list_420,
     .profiles       = NULL_IF_CONFIG_SMALL(profiles)
 };
 
@@ -5715,7 +5723,7 @@ AVCodec ff_wmv3_decoder = {
     .flush          = ff_mpeg_flush,
     .capabilities   = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
     .long_name      = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
-    .pix_fmts       = ff_hwaccel_pixfmt_list_420,
+    .pix_fmts       = vc1_hwaccel_pixfmt_list_420,
     .profiles       = NULL_IF_CONFIG_SMALL(profiles)
 };
 #endif