]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/mpegvideo.c
h264.c: constify all uses of H264Context in slice and lower-level functions
[ffmpeg] / libavcodec / mpegvideo.c
index 7ffb2615df99eca9aea134b6d8de533703b7d8bd..b88dd58a1d0d97c1c47e38564067ed17cb3c6154 100644 (file)
 #include "libavutil/avassert.h"
 #include "libavutil/imgutils.h"
 #include "libavutil/internal.h"
+#include "libavutil/timer.h"
 #include "avcodec.h"
-#include "dsputil.h"
+#include "blockdsp.h"
+#include "idctdsp.h"
 #include "internal.h"
 #include "mathops.h"
+#include "mpegutils.h"
 #include "mpegvideo.h"
 #include "mjpegenc.h"
 #include "msmpeg4.h"
+#include "qpeldsp.h"
 #include "xvmc_internal.h"
 #include "thread.h"
 #include <limits.h>
 
-static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale);
-static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale);
-static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale);
-static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale);
-static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale);
-static void dct_unquantize_h263_intra_c(MpegEncContext *s,
-                                  int16_t *block, int n, int qscale);
-static void dct_unquantize_h263_inter_c(MpegEncContext *s,
-                                  int16_t *block, int n, int qscale);
-
 static const uint8_t ff_default_chroma_qscale_table[32] = {
 //   0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
      0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
@@ -118,11 +107,246 @@ const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
     mpeg2_dc_scale_table3,
 };
 
-const enum AVPixelFormat ff_pixfmt_list_420[] = {
-    AV_PIX_FMT_YUV420P,
-    AV_PIX_FMT_NONE
+const uint8_t ff_alternate_horizontal_scan[64] = {
+     0,  1,  2,  3,  8,  9, 16, 17,
+    10, 11,  4,  5,  6,  7, 15, 14,
+    13, 12, 19, 18, 24, 25, 32, 33,
+    26, 27, 20, 21, 22, 23, 28, 29,
+    30, 31, 34, 35, 40, 41, 48, 49,
+    42, 43, 36, 37, 38, 39, 44, 45,
+    46, 47, 50, 51, 56, 57, 58, 59,
+    52, 53, 54, 55, 60, 61, 62, 63,
 };
 
+const uint8_t ff_alternate_vertical_scan[64] = {
+     0,  8, 16, 24,  1,  9,  2, 10,
+    17, 25, 32, 40, 48, 56, 57, 49,
+    41, 33, 26, 18,  3, 11,  4, 12,
+    19, 27, 34, 42, 50, 58, 35, 43,
+    51, 59, 20, 28,  5, 13,  6, 14,
+    21, 29, 36, 44, 52, 60, 37, 45,
+    53, 61, 22, 30,  7, 15, 23, 31,
+    38, 46, 54, 62, 39, 47, 55, 63,
+};
+
+static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
+                                   int16_t *block, int n, int qscale)
+{
+    int i, level, nCoeffs;
+    const uint16_t *quant_matrix;
+
+    nCoeffs= s->block_last_index[n];
+
+    if (n < 4)
+        block[0] = block[0] * s->y_dc_scale;
+    else
+        block[0] = block[0] * s->c_dc_scale;
+    /* XXX: only mpeg1 */
+    quant_matrix = s->intra_matrix;
+    for(i=1;i<=nCoeffs;i++) {
+        int j= s->intra_scantable.permutated[i];
+        level = block[j];
+        if (level) {
+            if (level < 0) {
+                level = -level;
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+                level = (level - 1) | 1;
+                level = -level;
+            } else {
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+                level = (level - 1) | 1;
+            }
+            block[j] = level;
+        }
+    }
+}
+
+static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
+                                   int16_t *block, int n, int qscale)
+{
+    int i, level, nCoeffs;
+    const uint16_t *quant_matrix;
+
+    nCoeffs= s->block_last_index[n];
+
+    quant_matrix = s->inter_matrix;
+    for(i=0; i<=nCoeffs; i++) {
+        int j= s->intra_scantable.permutated[i];
+        level = block[j];
+        if (level) {
+            if (level < 0) {
+                level = -level;
+                level = (((level << 1) + 1) * qscale *
+                         ((int) (quant_matrix[j]))) >> 4;
+                level = (level - 1) | 1;
+                level = -level;
+            } else {
+                level = (((level << 1) + 1) * qscale *
+                         ((int) (quant_matrix[j]))) >> 4;
+                level = (level - 1) | 1;
+            }
+            block[j] = level;
+        }
+    }
+}
+
+static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
+                                   int16_t *block, int n, int qscale)
+{
+    int i, level, nCoeffs;
+    const uint16_t *quant_matrix;
+
+    if(s->alternate_scan) nCoeffs= 63;
+    else nCoeffs= s->block_last_index[n];
+
+    if (n < 4)
+        block[0] = block[0] * s->y_dc_scale;
+    else
+        block[0] = block[0] * s->c_dc_scale;
+    quant_matrix = s->intra_matrix;
+    for(i=1;i<=nCoeffs;i++) {
+        int j= s->intra_scantable.permutated[i];
+        level = block[j];
+        if (level) {
+            if (level < 0) {
+                level = -level;
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+                level = -level;
+            } else {
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+            }
+            block[j] = level;
+        }
+    }
+}
+
+static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
+                                   int16_t *block, int n, int qscale)
+{
+    int i, level, nCoeffs;
+    const uint16_t *quant_matrix;
+    int sum=-1;
+
+    if(s->alternate_scan) nCoeffs= 63;
+    else nCoeffs= s->block_last_index[n];
+
+    if (n < 4)
+        block[0] = block[0] * s->y_dc_scale;
+    else
+        block[0] = block[0] * s->c_dc_scale;
+    quant_matrix = s->intra_matrix;
+    for(i=1;i<=nCoeffs;i++) {
+        int j= s->intra_scantable.permutated[i];
+        level = block[j];
+        if (level) {
+            if (level < 0) {
+                level = -level;
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+                level = -level;
+            } else {
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+            }
+            block[j] = level;
+            sum+=level;
+        }
+    }
+    block[63]^=sum&1;
+}
+
+static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
+                                   int16_t *block, int n, int qscale)
+{
+    int i, level, nCoeffs;
+    const uint16_t *quant_matrix;
+    int sum=-1;
+
+    if(s->alternate_scan) nCoeffs= 63;
+    else nCoeffs= s->block_last_index[n];
+
+    quant_matrix = s->inter_matrix;
+    for(i=0; i<=nCoeffs; i++) {
+        int j= s->intra_scantable.permutated[i];
+        level = block[j];
+        if (level) {
+            if (level < 0) {
+                level = -level;
+                level = (((level << 1) + 1) * qscale *
+                         ((int) (quant_matrix[j]))) >> 4;
+                level = -level;
+            } else {
+                level = (((level << 1) + 1) * qscale *
+                         ((int) (quant_matrix[j]))) >> 4;
+            }
+            block[j] = level;
+            sum+=level;
+        }
+    }
+    block[63]^=sum&1;
+}
+
+static void dct_unquantize_h263_intra_c(MpegEncContext *s,
+                                  int16_t *block, int n, int qscale)
+{
+    int i, level, qmul, qadd;
+    int nCoeffs;
+
+    assert(s->block_last_index[n]>=0);
+
+    qmul = qscale << 1;
+
+    if (!s->h263_aic) {
+        if (n < 4)
+            block[0] = block[0] * s->y_dc_scale;
+        else
+            block[0] = block[0] * s->c_dc_scale;
+        qadd = (qscale - 1) | 1;
+    }else{
+        qadd = 0;
+    }
+    if(s->ac_pred)
+        nCoeffs=63;
+    else
+        nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
+
+    for(i=1; i<=nCoeffs; i++) {
+        level = block[i];
+        if (level) {
+            if (level < 0) {
+                level = level * qmul - qadd;
+            } else {
+                level = level * qmul + qadd;
+            }
+            block[i] = level;
+        }
+    }
+}
+
+static void dct_unquantize_h263_inter_c(MpegEncContext *s,
+                                  int16_t *block, int n, int qscale)
+{
+    int i, level, qmul, qadd;
+    int nCoeffs;
+
+    assert(s->block_last_index[n]>=0);
+
+    qadd = (qscale - 1) | 1;
+    qmul = qscale << 1;
+
+    nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
+
+    for(i=0; i<=nCoeffs; i++) {
+        level = block[i];
+        if (level) {
+            if (level < 0) {
+                level = level * qmul - qadd;
+            } else {
+                level = level * qmul + qadd;
+            }
+            block[i] = level;
+        }
+    }
+}
+
 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
                               int (*mv)[2][4][2],
                               int mb_x, int mb_y, int mb_intra, int mb_skipped)
@@ -140,21 +364,24 @@ static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
     ff_init_block_index(s);
     ff_update_block_index(s);
 
-    s->dsp.clear_blocks(s->block[0]);
+    s->bdsp.clear_blocks(s->block[0]);
 
-    s->dest[0] = s->current_picture.f.data[0] + (s->mb_y *  16                       * s->linesize)   + s->mb_x *  16;
-    s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
-    s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
+    s->dest[0] = s->current_picture.f->data[0] + (s->mb_y *  16                       * s->linesize)   + s->mb_x *  16;
+    s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
+    s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
 
-    assert(ref == 0);
-    ff_MPV_decode_mb(s, s->block);
+    if (ref)
+        av_log(s->avctx, AV_LOG_DEBUG,
+               "Interlaced error concealment is not fully implemented\n");
+    ff_mpv_decode_mb(s, s->block);
 }
 
 /* init common dct for both encoder and decoder */
-av_cold int ff_dct_common_init(MpegEncContext *s)
+static av_cold int dct_init(MpegEncContext *s)
 {
-    ff_dsputil_init(&s->dsp, s->avctx);
+    ff_blockdsp_init(&s->bdsp, s->avctx);
     ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
+    ff_mpegvideodsp_init(&s->mdsp);
     ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
 
     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
@@ -166,32 +393,38 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
 
+    if (HAVE_INTRINSICS_NEON)
+        ff_mpv_common_init_neon(s);
+
     if (ARCH_ARM)
-        ff_MPV_common_init_arm(s);
-    if (ARCH_BFIN)
-        ff_MPV_common_init_bfin(s);
+        ff_mpv_common_init_arm(s);
     if (ARCH_PPC)
-        ff_MPV_common_init_ppc(s);
+        ff_mpv_common_init_ppc(s);
     if (ARCH_X86)
-        ff_MPV_common_init_x86(s);
+        ff_mpv_common_init_x86(s);
+
+    return 0;
+}
+
+av_cold void ff_mpv_idct_init(MpegEncContext *s)
+{
+    ff_idctdsp_init(&s->idsp, s->avctx);
 
     /* load & permutate scantables
      * note: only wmv uses different ones
      */
     if (s->alternate_scan) {
-        ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
-        ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
+        ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
+        ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
     } else {
-        ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
-        ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
+        ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
+        ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
     }
-    ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
-    ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
-
-    return 0;
+    ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
+    ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
 }
 
-int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
+static int frame_size_alloc(MpegEncContext *s, int linesize)
 {
     int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
 
@@ -221,31 +454,49 @@ fail:
  */
 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
 {
+    int edges_needed = av_codec_is_encoder(s->avctx->codec);
     int r, ret;
 
-    pic->tf.f = &pic->f;
+    pic->tf.f = pic->f;
     if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
         s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
-        s->codec_id != AV_CODEC_ID_MSS2)
+        s->codec_id != AV_CODEC_ID_MSS2) {
+        if (edges_needed) {
+            pic->f->width  = s->avctx->width  + 2 * EDGE_WIDTH;
+            pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
+        }
+
         r = ff_thread_get_buffer(s->avctx, &pic->tf,
                                  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
-    else {
-        pic->f.width  = s->avctx->width;
-        pic->f.height = s->avctx->height;
-        pic->f.format = s->avctx->pix_fmt;
-        r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
+    else {
+        pic->f->width  = s->avctx->width;
+        pic->f->height = s->avctx->height;
+        pic->f->format = s->avctx->pix_fmt;
+        r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
     }
 
-    if (r < 0 || !pic->f.buf[0]) {
+    if (r < 0 || !pic->f->buf[0]) {
         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
-               r, pic->f.data[0]);
+               r, pic->f->data[0]);
         return -1;
     }
 
+    if (edges_needed) {
+        int i;
+        for (i = 0; pic->f->data[i]; i++) {
+            int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
+                         pic->f->linesize[i] +
+                         (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
+            pic->f->data[i] += offset;
+        }
+        pic->f->width  = s->avctx->width;
+        pic->f->height = s->avctx->height;
+    }
+
     if (s->avctx->hwaccel) {
         assert(!pic->hwaccel_picture_private);
-        if (s->avctx->hwaccel->priv_data_size) {
-            pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
+        if (s->avctx->hwaccel->frame_priv_data_size) {
+            pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
             if (!pic->hwaccel_priv_buf) {
                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
                 return -1;
@@ -254,15 +505,15 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
         }
     }
 
-    if (s->linesize && (s->linesize   != pic->f.linesize[0] ||
-                        s->uvlinesize != pic->f.linesize[1])) {
+    if (s->linesize && (s->linesize   != pic->f->linesize[0] ||
+                        s->uvlinesize != pic->f->linesize[1])) {
         av_log(s->avctx, AV_LOG_ERROR,
                "get_buffer() failed (stride changed)\n");
         ff_mpeg_unref_picture(s, pic);
         return -1;
     }
 
-    if (pic->f.linesize[1] != pic->f.linesize[2]) {
+    if (pic->f->linesize[1] != pic->f->linesize[2]) {
         av_log(s->avctx, AV_LOG_ERROR,
                "get_buffer() failed (uv stride mismatch)\n");
         ff_mpeg_unref_picture(s, pic);
@@ -270,7 +521,7 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
     }
 
     if (!s->edge_emu_buffer &&
-        (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
+        (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
         av_log(s->avctx, AV_LOG_ERROR,
                "get_buffer() failed to allocate context scratch buffers.\n");
         ff_mpeg_unref_picture(s, pic);
@@ -280,7 +531,7 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
     return 0;
 }
 
-static void free_picture_tables(Picture *pic)
+void ff_free_picture_tables(Picture *pic)
 {
     int i;
 
@@ -369,16 +620,16 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
     int i, ret;
 
     if (shared) {
-        assert(pic->f.data[0]);
+        assert(pic->f->data[0]);
         pic->shared = 1;
     } else {
-        assert(!pic->f.buf[0]);
+        assert(!pic->f->buf[0]);
 
         if (alloc_frame_buffer(s, pic) < 0)
             return -1;
 
-        s->linesize   = pic->f.linesize[0];
-        s->uvlinesize = pic->f.linesize[1];
+        s->linesize   = pic->f->linesize[0];
+        s->uvlinesize = pic->f->linesize[1];
     }
 
     if (!pic->qscale_table_buf)
@@ -409,7 +660,7 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
 fail:
     av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
     ff_mpeg_unref_picture(s, pic);
-    free_picture_tables(pic);
+    ff_free_picture_tables(pic);
     return AVERROR(ENOMEM);
 }
 
@@ -420,20 +671,20 @@ void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
 {
     int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
 
-    pic->tf.f = &pic->f;
+    pic->tf.f = pic->f;
     /* WM Image / Screen codecs allocate internal buffers with different
      * dimensions / colorspaces; ignore user-defined callbacks for these. */
     if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
         s->codec_id != AV_CODEC_ID_VC1IMAGE  &&
         s->codec_id != AV_CODEC_ID_MSS2)
         ff_thread_release_buffer(s->avctx, &pic->tf);
-    else
-        av_frame_unref(&pic->f);
+    else if (pic->f)
+        av_frame_unref(pic->f);
 
     av_buffer_unref(&pic->hwaccel_priv_buf);
 
     if (pic->needs_realloc)
-        free_picture_tables(pic);
+        ff_free_picture_tables(pic);
 
     memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
 }
@@ -449,7 +700,7 @@ do {\
         av_buffer_unref(&dst->table);\
         dst->table = av_buffer_ref(src->table);\
         if (!dst->table) {\
-            free_picture_tables(dst);\
+            ff_free_picture_tables(dst);\
             return AVERROR(ENOMEM);\
         }\
     }\
@@ -484,11 +735,11 @@ int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
 {
     int ret;
 
-    av_assert0(!dst->f.buf[0]);
-    av_assert0(src->f.buf[0]);
+    av_assert0(!dst->f->buf[0]);
+    av_assert0(src->f->buf[0]);
 
-    src->tf.f = &src->f;
-    dst->tf.f = &dst->f;
+    src->tf.f = src->f;
+    dst->tf.f = dst->f;
     ret = ff_thread_ref_frame(&dst->tf, &src->tf);
     if (ret < 0)
         goto fail;
@@ -518,6 +769,15 @@ fail:
     return ret;
 }
 
+static void exchange_uv(MpegEncContext *s)
+{
+    int16_t (*tmp)[64];
+
+    tmp           = s->pblocks[4];
+    s->pblocks[4] = s->pblocks[5];
+    s->pblocks[5] = tmp;
+}
+
 static int init_duplicate_context(MpegEncContext *s)
 {
     int y_size = s->b8_stride * (2 * s->mb_height + 1);
@@ -548,6 +808,8 @@ static int init_duplicate_context(MpegEncContext *s)
     for (i = 0; i < 12; i++) {
         s->pblocks[i] = &s->block[i];
     }
+    if (s->avctx->codec_tag == AV_RL32("VCR2"))
+        exchange_uv(s);
 
     if (s->out_format == FMT_H263) {
         /* ac values */
@@ -560,12 +822,12 @@ static int init_duplicate_context(MpegEncContext *s)
 
     return 0;
 fail:
-    return -1; // free() through ff_MPV_common_end()
+    return -1; // free() through ff_mpv_common_end()
 }
 
 static void free_duplicate_context(MpegEncContext *s)
 {
-    if (s == NULL)
+    if (!s)
         return;
 
     av_freep(&s->edge_emu_buffer);
@@ -622,8 +884,10 @@ int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
     for (i = 0; i < 12; i++) {
         dst->pblocks[i] = &dst->block[i];
     }
+    if (dst->avctx->codec_tag == AV_RL32("VCR2"))
+        exchange_uv(dst);
     if (!dst->edge_emu_buffer &&
-        (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
+        (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
         av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
                "scratch buffers.\n");
         return ret;
@@ -645,13 +909,16 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
     // FIXME can parameters change on I-frames?
     // in that case dst may need a reinit
     if (!s->context_initialized) {
+        int err;
         memcpy(s, s1, sizeof(MpegEncContext));
 
         s->avctx                 = dst;
         s->bitstream_buffer      = NULL;
         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
 
-        ff_MPV_common_init(s);
+        ff_mpv_idct_init(s);
+        if ((err = ff_mpv_common_init(s)) < 0)
+            return err;
     }
 
     if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
@@ -659,7 +926,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
         s->context_reinit = 0;
         s->height = s1->height;
         s->width  = s1->width;
-        if ((err = ff_MPV_common_frame_size_change(s)) < 0)
+        if ((err = ff_mpv_common_frame_size_change(s)) < 0)
             return err;
     }
 
@@ -670,11 +937,10 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
 
     s->coded_picture_number = s1->coded_picture_number;
     s->picture_number       = s1->picture_number;
-    s->input_picture_number = s1->input_picture_number;
 
     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
         ff_mpeg_unref_picture(s, &s->picture[i]);
-        if (s1->picture[i].f.buf[0] &&
+        if (s1->picture[i].f->buf[0] &&
             (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
             return ret;
     }
@@ -682,7 +948,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
 #define UPDATE_PICTURE(pic)\
 do {\
     ff_mpeg_unref_picture(s, &s->pic);\
-    if (s1->pic.f.buf[0])\
+    if (s1->pic.f->buf[0])\
         ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
     else\
         ret = update_picture_tables(&s->pic, &s1->pic);\
@@ -694,6 +960,11 @@ do {\
     UPDATE_PICTURE(last_picture);
     UPDATE_PICTURE(next_picture);
 
+#define REBASE_PICTURE(pic, new_ctx, old_ctx)                                 \
+    ((pic && pic >= old_ctx->picture &&                                       \
+      pic < old_ctx->picture + MAX_PICTURE_COUNT) ?                           \
+        &new_ctx->picture[pic - old_ctx->picture] : NULL)
+
     s->last_picture_ptr    = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
     s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
     s->next_picture_ptr    = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
@@ -703,8 +974,9 @@ do {\
     s->workaround_bugs      = s1->workaround_bugs;
 
     // MPEG4 timing info
-    memcpy(&s->time_increment_bits, &s1->time_increment_bits,
-           (char *) &s1->shape - (char *) &s1->time_increment_bits);
+    memcpy(&s->last_time_base, &s1->last_time_base,
+           (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
+           (char *) &s1->last_time_base);
 
     // B-frame info
     s->max_b_frames = s1->max_b_frames;
@@ -730,7 +1002,7 @@ do {\
     // linesize dependend scratch buffer allocation
     if (!s->edge_emu_buffer)
         if (s1->linesize) {
-            if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
+            if (frame_size_alloc(s, s1->linesize) < 0) {
                 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
                        "scratch buffers.\n");
                 return AVERROR(ENOMEM);
@@ -748,11 +1020,7 @@ do {\
     if (!s1->first_field) {
         s->last_pict_type = s1->pict_type;
         if (s1->current_picture_ptr)
-            s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
-
-        if (s1->pict_type != AV_PICTURE_TYPE_B) {
-            s->last_non_b_pict_type = s1->pict_type;
-        }
+            s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
     }
 
     return 0;
@@ -764,7 +1032,7 @@ do {\
  * The changed fields will not depend upon the
  * prior state of the MpegEncContext.
  */
-void ff_MPV_common_defaults(MpegEncContext *s)
+void ff_mpv_common_defaults(MpegEncContext *s)
 {
     s->y_dc_scale_table      =
     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
@@ -775,9 +1043,6 @@ void ff_MPV_common_defaults(MpegEncContext *s)
 
     s->coded_picture_number  = 0;
     s->picture_number        = 0;
-    s->input_picture_number  = 0;
-
-    s->picture_in_gop_number = 0;
 
     s->f_code                = 1;
     s->b_code                = 1;
@@ -790,9 +1055,9 @@ void ff_MPV_common_defaults(MpegEncContext *s)
  * the changed fields will not depend upon
  * the prior state of the MpegEncContext.
  */
-void ff_MPV_decode_defaults(MpegEncContext *s)
+void ff_mpv_decode_defaults(MpegEncContext *s)
 {
-    ff_MPV_common_defaults(s);
+    ff_mpv_common_defaults(s);
 }
 
 static int init_er(MpegEncContext *s)
@@ -802,7 +1067,6 @@ static int init_er(MpegEncContext *s)
     int i;
 
     er->avctx       = s->avctx;
-    er->dsp         = &s->dsp;
 
     er->mb_index2xy = s->mb_index2xy;
     er->mb_num      = s->mb_num;
@@ -842,7 +1106,6 @@ static int init_context_frame(MpegEncContext *s)
     s->mb_width   = (s->width + 15) / 16;
     s->mb_stride  = s->mb_width + 1;
     s->b8_stride  = s->mb_width * 2 + 1;
-    s->b4_stride  = s->mb_width * 4 + 1;
     mb_array_size = s->mb_height * s->mb_stride;
     mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
 
@@ -976,7 +1239,7 @@ fail:
  * init common structure for both encoder and decoder.
  * this assumes that some variables like width/height are already set
  */
-av_cold int ff_MPV_common_init(MpegEncContext *s)
+av_cold int ff_mpv_common_init(MpegEncContext *s)
 {
     int i;
     int nb_slices = (HAVE_THREADS &&
@@ -1012,7 +1275,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
         av_image_check_size(s->width, s->height, 0, s->avctx))
         return -1;
 
-    ff_dct_common_init(s);
+    dct_init(s);
 
     s->flags  = s->avctx->flags;
     s->flags2 = s->avctx->flags2;
@@ -1025,50 +1288,29 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
     /* convert fourcc to upper case */
     s->codec_tag          = avpriv_toupper4(s->avctx->codec_tag);
 
-    s->stream_codec_tag   = avpriv_toupper4(s->avctx->stream_codec_tag);
-
-    if (s->width && s->height) {
-        s->avctx->coded_frame = &s->current_picture.f;
-
-        if (s->encoding) {
-            if (s->msmpeg4_version) {
-                FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
-                                  2 * 2 * (MAX_LEVEL + 1) *
-                                  (MAX_RUN + 1) * 2 * sizeof(int), fail);
-            }
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
-
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
-                              64 * 32   * sizeof(int), fail);
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
-                              64 * 32   * sizeof(int), fail);
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
-                              64 * 32 * 2 * sizeof(uint16_t), fail);
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
-                              64 * 32 * 2 * sizeof(uint16_t), fail);
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
-                              MAX_PICTURE_COUNT * sizeof(Picture *), fail);
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
-                              MAX_PICTURE_COUNT * sizeof(Picture *), fail);
-
-            if (s->avctx->noise_reduction) {
-                FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
-                                  2 * 64 * sizeof(uint16_t), fail);
-            }
-        }
-    }
-
     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
                       MAX_PICTURE_COUNT * sizeof(Picture), fail);
     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-        avcodec_get_frame_defaults(&s->picture[i].f);
+        s->picture[i].f = av_frame_alloc();
+        if (!s->picture[i].f)
+            goto fail;
     }
     memset(&s->next_picture, 0, sizeof(s->next_picture));
     memset(&s->last_picture, 0, sizeof(s->last_picture));
     memset(&s->current_picture, 0, sizeof(s->current_picture));
-    avcodec_get_frame_defaults(&s->next_picture.f);
-    avcodec_get_frame_defaults(&s->last_picture.f);
-    avcodec_get_frame_defaults(&s->current_picture.f);
+    memset(&s->new_picture, 0, sizeof(s->new_picture));
+    s->next_picture.f = av_frame_alloc();
+    if (!s->next_picture.f)
+        goto fail;
+    s->last_picture.f = av_frame_alloc();
+    if (!s->last_picture.f)
+        goto fail;
+    s->current_picture.f = av_frame_alloc();
+    if (!s->current_picture.f)
+        goto fail;
+    s->new_picture.f = av_frame_alloc();
+    if (!s->new_picture.f)
+        goto fail;
 
     if (s->width && s->height) {
         if (init_context_frame(s))
@@ -1106,7 +1348,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
 
     return 0;
  fail:
-    ff_MPV_common_end(s);
+    ff_mpv_common_end(s);
     return -1;
 }
 
@@ -1115,7 +1357,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
  * Is used during resolution changes to avoid a full reinitialization of the
  * codec.
  */
-static int free_context_frame(MpegEncContext *s)
+static void free_context_frame(MpegEncContext *s)
 {
     int i, j, k;
 
@@ -1161,11 +1403,9 @@ static int free_context_frame(MpegEncContext *s)
     av_freep(&s->bits_tab);
 
     s->linesize = s->uvlinesize = 0;
-
-    return 0;
 }
 
-int ff_MPV_common_frame_size_change(MpegEncContext *s)
+int ff_mpv_common_frame_size_change(MpegEncContext *s)
 {
     int i, err = 0;
 
@@ -1179,8 +1419,7 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
     } else
         free_duplicate_context(s);
 
-    if ((err = free_context_frame(s)) < 0)
-        return err;
+    free_context_frame(s);
 
     if (s->picture)
         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
@@ -1198,8 +1437,8 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
         s->mb_height = (s->height + 15) / 16;
 
     if ((s->width || s->height) &&
-        av_image_check_size(s->width, s->height, 0, s->avctx))
-        return AVERROR_INVALIDDATA;
+        (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
+        goto fail;
 
     if ((err = init_context_frame(s)))
         goto fail;
@@ -1215,7 +1454,7 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
             }
 
             for (i = 0; i < nb_slices; i++) {
-                if (init_duplicate_context(s->thread_context[i]) < 0)
+                if ((err = init_duplicate_context(s->thread_context[i])) < 0)
                     goto fail;
                     s->thread_context[i]->start_mb_y =
                         (s->mb_height * (i) + nb_slices / 2) / nb_slices;
@@ -1233,12 +1472,12 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
 
     return 0;
  fail:
-    ff_MPV_common_end(s);
+    ff_mpv_common_end(s);
     return err;
 }
 
 /* init common structure for both encoder and decoder */
-void ff_MPV_common_end(MpegEncContext *s)
+void ff_mpv_common_end(MpegEncContext *s)
 {
     int i;
 
@@ -1258,32 +1497,26 @@ void ff_MPV_common_end(MpegEncContext *s)
     av_freep(&s->bitstream_buffer);
     s->allocated_bitstream_buffer_size = 0;
 
-    av_freep(&s->avctx->stats_out);
-    av_freep(&s->ac_stats);
-
-    av_freep(&s->q_intra_matrix);
-    av_freep(&s->q_inter_matrix);
-    av_freep(&s->q_intra_matrix16);
-    av_freep(&s->q_inter_matrix16);
-    av_freep(&s->input_picture);
-    av_freep(&s->reordered_input_picture);
-    av_freep(&s->dct_offset);
-
     if (s->picture) {
         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-            free_picture_tables(&s->picture[i]);
+            ff_free_picture_tables(&s->picture[i]);
             ff_mpeg_unref_picture(s, &s->picture[i]);
+            av_frame_free(&s->picture[i].f);
         }
     }
     av_freep(&s->picture);
-    free_picture_tables(&s->last_picture);
+    ff_free_picture_tables(&s->last_picture);
     ff_mpeg_unref_picture(s, &s->last_picture);
-    free_picture_tables(&s->current_picture);
+    av_frame_free(&s->last_picture.f);
+    ff_free_picture_tables(&s->current_picture);
     ff_mpeg_unref_picture(s, &s->current_picture);
-    free_picture_tables(&s->next_picture);
+    av_frame_free(&s->current_picture.f);
+    ff_free_picture_tables(&s->next_picture);
     ff_mpeg_unref_picture(s, &s->next_picture);
-    free_picture_tables(&s->new_picture);
+    av_frame_free(&s->next_picture.f);
+    ff_free_picture_tables(&s->new_picture);
     ff_mpeg_unref_picture(s, &s->new_picture);
+    av_frame_free(&s->new_picture.f);
 
     free_context_frame(s);
 
@@ -1386,22 +1619,20 @@ av_cold void ff_init_vlc_rl(RLTable *rl)
     }
 }
 
-void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
+static void release_unused_pictures(MpegEncContext *s)
 {
     int i;
 
     /* release non reference frames */
     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-        if (!s->picture[i].reference &&
-            (remove_current || &s->picture[i] !=  s->current_picture_ptr)) {
+        if (!s->picture[i].reference)
             ff_mpeg_unref_picture(s, &s->picture[i]);
-        }
     }
 }
 
 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
 {
-    if (pic->f.buf[0] == NULL)
+    if (!pic->f->buf[0])
         return 1;
     if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
         return 1;
@@ -1414,7 +1645,7 @@ static int find_unused_picture(MpegEncContext *s, int shared)
 
     if (shared) {
         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-            if (s->picture[i].f.buf[0] == NULL)
+            if (!s->picture[i].f->buf[0])
                 return i;
         }
     } else {
@@ -1434,40 +1665,18 @@ int ff_find_unused_picture(MpegEncContext *s, int shared)
     if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
         if (s->picture[ret].needs_realloc) {
             s->picture[ret].needs_realloc = 0;
-            free_picture_tables(&s->picture[ret]);
+            ff_free_picture_tables(&s->picture[ret]);
             ff_mpeg_unref_picture(s, &s->picture[ret]);
-            avcodec_get_frame_defaults(&s->picture[ret].f);
         }
     }
     return ret;
 }
 
-static void update_noise_reduction(MpegEncContext *s)
-{
-    int intra, i;
-
-    for (intra = 0; intra < 2; intra++) {
-        if (s->dct_count[intra] > (1 << 16)) {
-            for (i = 0; i < 64; i++) {
-                s->dct_error_sum[intra][i] >>= 1;
-            }
-            s->dct_count[intra] >>= 1;
-        }
-
-        for (i = 0; i < 64; i++) {
-            s->dct_offset[intra][i] = (s->avctx->noise_reduction *
-                                       s->dct_count[intra] +
-                                       s->dct_error_sum[intra][i] / 2) /
-                                      (s->dct_error_sum[intra][i] + 1);
-        }
-    }
-}
-
 /**
- * generic function for encode/decode called after coding/decoding
- * the header and before a frame is coded/decoded.
+ * generic function called after decoding
+ * the header and before a frame is decoded.
  */
-int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
+int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
 {
     int i, ret;
     Picture *pic;
@@ -1476,73 +1685,68 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
     /* mark & release old frames */
     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
         s->last_picture_ptr != s->next_picture_ptr &&
-        s->last_picture_ptr->f.buf[0]) {
+        s->last_picture_ptr->f->buf[0]) {
         ff_mpeg_unref_picture(s, s->last_picture_ptr);
     }
 
     /* release forgotten pictures */
     /* if (mpeg124/h263) */
-    if (!s->encoding) {
-        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-            if (&s->picture[i] != s->last_picture_ptr &&
-                &s->picture[i] != s->next_picture_ptr &&
-                s->picture[i].reference && !s->picture[i].needs_realloc) {
-                if (!(avctx->active_thread_type & FF_THREAD_FRAME))
-                    av_log(avctx, AV_LOG_ERROR,
-                           "releasing zombie picture\n");
-                ff_mpeg_unref_picture(s, &s->picture[i]);
-            }
+    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+        if (&s->picture[i] != s->last_picture_ptr &&
+            &s->picture[i] != s->next_picture_ptr &&
+            s->picture[i].reference && !s->picture[i].needs_realloc) {
+            if (!(avctx->active_thread_type & FF_THREAD_FRAME))
+                av_log(avctx, AV_LOG_ERROR,
+                       "releasing zombie picture\n");
+            ff_mpeg_unref_picture(s, &s->picture[i]);
         }
     }
 
     ff_mpeg_unref_picture(s, &s->current_picture);
 
-    if (!s->encoding) {
-        ff_release_unused_pictures(s, 1);
+    release_unused_pictures(s);
 
-        if (s->current_picture_ptr &&
-            s->current_picture_ptr->f.buf[0] == NULL) {
-            // we already have a unused image
-            // (maybe it was set before reading the header)
-            pic = s->current_picture_ptr;
-        } else {
-            i   = ff_find_unused_picture(s, 0);
-            if (i < 0) {
-                av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
-                return i;
-            }
-            pic = &s->picture[i];
+    if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
+        // we already have a unused image
+        // (maybe it was set before reading the header)
+        pic = s->current_picture_ptr;
+    } else {
+        i   = ff_find_unused_picture(s, 0);
+        if (i < 0) {
+            av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
+            return i;
         }
+        pic = &s->picture[i];
+    }
 
-        pic->reference = 0;
-        if (!s->droppable) {
-            if (s->pict_type != AV_PICTURE_TYPE_B)
-                pic->reference = 3;
-        }
+    pic->reference = 0;
+    if (!s->droppable) {
+        if (s->pict_type != AV_PICTURE_TYPE_B)
+            pic->reference = 3;
+    }
 
-        pic->f.coded_picture_number = s->coded_picture_number++;
+    pic->f->coded_picture_number = s->coded_picture_number++;
 
-        if (ff_alloc_picture(s, pic, 0) < 0)
-            return -1;
+    if (ff_alloc_picture(s, pic, 0) < 0)
+        return -1;
 
-        s->current_picture_ptr = pic;
-        // FIXME use only the vars from current_pic
-        s->current_picture_ptr->f.top_field_first = s->top_field_first;
-        if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
-            s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
-            if (s->picture_structure != PICT_FRAME)
-                s->current_picture_ptr->f.top_field_first =
-                    (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
-        }
-        s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
-                                                     !s->progressive_sequence;
-        s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
+    s->current_picture_ptr = pic;
+    // FIXME use only the vars from current_pic
+    s->current_picture_ptr->f->top_field_first = s->top_field_first;
+    if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
+        s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
+        if (s->picture_structure != PICT_FRAME)
+            s->current_picture_ptr->f->top_field_first =
+                (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
     }
+    s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
+                                                 !s->progressive_sequence;
+    s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
 
-    s->current_picture_ptr->f.pict_type = s->pict_type;
+    s->current_picture_ptr->f->pict_type = s->pict_type;
     // if (s->flags && CODEC_FLAG_QSCALE)
     //     s->current_picture_ptr->quality = s->new_picture_ptr->quality;
-    s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
+    s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
 
     if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
                                    s->current_picture_ptr)) < 0)
@@ -1555,13 +1759,12 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
     }
     av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
             s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
-            s->last_picture_ptr    ? s->last_picture_ptr->f.data[0]    : NULL,
-            s->next_picture_ptr    ? s->next_picture_ptr->f.data[0]    : NULL,
-            s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
+            s->last_picture_ptr    ? s->last_picture_ptr->f->data[0]    : NULL,
+            s->next_picture_ptr    ? s->next_picture_ptr->f->data[0]    : NULL,
+            s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
             s->pict_type, s->droppable);
 
-    if ((s->last_picture_ptr == NULL ||
-         s->last_picture_ptr->f.buf[0] == NULL) &&
+    if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
         (s->pict_type != AV_PICTURE_TYPE_I ||
          s->picture_structure != PICT_FRAME)) {
         int h_chroma_shift, v_chroma_shift;
@@ -1581,25 +1784,28 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
             return i;
         }
         s->last_picture_ptr = &s->picture[i];
+
+        s->last_picture_ptr->reference   = 3;
+        s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
+
         if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
             s->last_picture_ptr = NULL;
             return -1;
         }
 
-        memset(s->last_picture_ptr->f.data[0], 0,
-               avctx->height * s->last_picture_ptr->f.linesize[0]);
-        memset(s->last_picture_ptr->f.data[1], 0x80,
+        memset(s->last_picture_ptr->f->data[0], 0,
+               avctx->height * s->last_picture_ptr->f->linesize[0]);
+        memset(s->last_picture_ptr->f->data[1], 0x80,
                (avctx->height >> v_chroma_shift) *
-               s->last_picture_ptr->f.linesize[1]);
-        memset(s->last_picture_ptr->f.data[2], 0x80,
+               s->last_picture_ptr->f->linesize[1]);
+        memset(s->last_picture_ptr->f->data[2], 0x80,
                (avctx->height >> v_chroma_shift) *
-               s->last_picture_ptr->f.linesize[2]);
+               s->last_picture_ptr->f->linesize[2]);
 
         ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
         ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
     }
-    if ((s->next_picture_ptr == NULL ||
-         s->next_picture_ptr->f.buf[0] == NULL) &&
+    if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
         s->pict_type == AV_PICTURE_TYPE_B) {
         /* Allocate a dummy frame */
         i = ff_find_unused_picture(s, 0);
@@ -1608,6 +1814,10 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
             return i;
         }
         s->next_picture_ptr = &s->picture[i];
+
+        s->next_picture_ptr->reference   = 3;
+        s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
+
         if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
             s->next_picture_ptr = NULL;
             return -1;
@@ -1618,21 +1828,21 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
 
     if (s->last_picture_ptr) {
         ff_mpeg_unref_picture(s, &s->last_picture);
-        if (s->last_picture_ptr->f.buf[0] &&
+        if (s->last_picture_ptr->f->buf[0] &&
             (ret = ff_mpeg_ref_picture(s, &s->last_picture,
                                        s->last_picture_ptr)) < 0)
             return ret;
     }
     if (s->next_picture_ptr) {
         ff_mpeg_unref_picture(s, &s->next_picture);
-        if (s->next_picture_ptr->f.buf[0] &&
+        if (s->next_picture_ptr->f->buf[0] &&
             (ret = ff_mpeg_ref_picture(s, &s->next_picture,
                                        s->next_picture_ptr)) < 0)
             return ret;
     }
 
     if (s->pict_type != AV_PICTURE_TYPE_I &&
-        !(s->last_picture_ptr && s->last_picture_ptr->f.buf[0])) {
+        !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
         av_log(s, AV_LOG_ERROR,
                "Non-reference picture received and no reference available\n");
         return AVERROR_INVALIDDATA;
@@ -1642,12 +1852,12 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
         int i;
         for (i = 0; i < 4; i++) {
             if (s->picture_structure == PICT_BOTTOM_FIELD) {
-                s->current_picture.f.data[i] +=
-                    s->current_picture.f.linesize[i];
+                s->current_picture.f->data[i] +=
+                    s->current_picture.f->linesize[i];
             }
-            s->current_picture.f.linesize[i] *= 2;
-            s->last_picture.f.linesize[i]    *= 2;
-            s->next_picture.f.linesize[i]    *= 2;
+            s->current_picture.f->linesize[i] *= 2;
+            s->last_picture.f->linesize[i]    *= 2;
+            s->next_picture.f->linesize[i]    *= 2;
         }
     }
 
@@ -1667,11 +1877,6 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
     }
 
-    if (s->dct_error_sum) {
-        assert(s->avctx->noise_reduction && s->encoding);
-        update_noise_reduction(s);
-    }
-
 #if FF_API_XVMC
 FF_DISABLE_DEPRECATION_WARNINGS
     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
@@ -1682,12 +1887,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
     return 0;
 }
 
-/* generic function for encode/decode called after a
- * frame has been coded/decoded. */
-void ff_MPV_frame_end(MpegEncContext *s)
+/* called after a frame has been decoded. */
+void ff_mpv_frame_end(MpegEncContext *s)
 {
-    int i;
-
 #if FF_API_XVMC
 FF_DISABLE_DEPRECATION_WARNINGS
     /* redraw edges for the frame if decoding didn't complete */
@@ -1697,62 +1899,9 @@ FF_DISABLE_DEPRECATION_WARNINGS
     } else
 FF_ENABLE_DEPRECATION_WARNINGS
 #endif /* FF_API_XVMC */
-    if ((s->er.error_count || s->encoding) &&
-        !s->avctx->hwaccel &&
-        s->unrestricted_mv &&
-        s->current_picture.reference &&
-        !s->intra_only &&
-        !(s->flags & CODEC_FLAG_EMU_EDGE)) {
-        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
-        int hshift = desc->log2_chroma_w;
-        int vshift = desc->log2_chroma_h;
-        s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
-                          s->h_edge_pos, s->v_edge_pos,
-                          EDGE_WIDTH, EDGE_WIDTH,
-                          EDGE_TOP | EDGE_BOTTOM);
-        s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
-                          s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
-                          EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
-                          EDGE_TOP | EDGE_BOTTOM);
-        s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
-                          s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
-                          EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
-                          EDGE_TOP | EDGE_BOTTOM);
-    }
 
     emms_c();
 
-    s->last_pict_type                 = s->pict_type;
-    s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
-    if (s->pict_type!= AV_PICTURE_TYPE_B) {
-        s->last_non_b_pict_type = s->pict_type;
-    }
-#if 0
-    /* copy back current_picture variables */
-    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-        if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
-            s->picture[i] = s->current_picture;
-            break;
-        }
-    }
-    assert(i < MAX_PICTURE_COUNT);
-#endif
-
-    if (s->encoding) {
-        /* release non-reference frames */
-        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-            if (!s->picture[i].reference)
-                ff_mpeg_unref_picture(s, &s->picture[i]);
-        }
-    }
-    // clear copies, to avoid confusion
-#if 0
-    memset(&s->last_picture,    0, sizeof(Picture));
-    memset(&s->next_picture,    0, sizeof(Picture));
-    memset(&s->current_picture, 0, sizeof(Picture));
-#endif
-    s->avctx->coded_frame = &s->current_picture_ptr->f;
-
     if (s->current_picture.reference)
         ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
 }
@@ -1765,7 +1914,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p)
     AVFrame *pict;
     if (s->avctx->hwaccel || !p || !p->mb_type)
         return;
-    pict = &p->f;
+    pict = p->f;
 
     if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
         int x,y;
@@ -1860,7 +2009,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p)
 /**
  * find the lowest MB row referenced in the MVs
  */
-int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
+int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
 {
     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
     int my, off, i, mvs;
@@ -1900,7 +2049,7 @@ static inline void put_dct(MpegEncContext *s,
                            int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
 {
     s->dct_unquantize_intra(s, block, i, qscale);
-    s->dsp.idct_put (dest, line_size, block);
+    s->idsp.idct_put(dest, line_size, block);
 }
 
 /* add block[] to dest[] */
@@ -1908,7 +2057,7 @@ static inline void add_dct(MpegEncContext *s,
                            int16_t *block, int i, uint8_t *dest, int line_size)
 {
     if (s->block_last_index[i] >= 0) {
-        s->dsp.idct_add (dest, line_size, block);
+        s->idsp.idct_add(dest, line_size, block);
     }
 }
 
@@ -1918,7 +2067,7 @@ static inline void add_dequant_dct(MpegEncContext *s,
     if (s->block_last_index[i] >= 0) {
         s->dct_unquantize_inter(s, block, i, qscale);
 
-        s->dsp.idct_add (dest, line_size, block);
+        s->idsp.idct_add(dest, line_size, block);
     }
 }
 
@@ -1966,7 +2115,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s)
    s->interlaced_dct : true if interlaced dct used (mpeg2)
  */
 static av_always_inline
-void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
+void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
                             int is_mpeg12)
 {
     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
@@ -1986,7 +2135,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
        for(i=0; i<6; i++){
            for(j=0; j<64; j++){
-               av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
+               av_log(s->avctx, AV_LOG_DEBUG, "%5d",
+                      block[i][s->idsp.idct_permutation[j]]);
            }
            av_log(s->avctx, AV_LOG_DEBUG, "\n");
        }
@@ -2013,8 +2163,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
         int dct_linesize, dct_offset;
         op_pixels_func (*op_pix)[4];
         qpel_mc_func (*op_qpix)[16];
-        const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
-        const int uvlinesize = s->current_picture.f.linesize[1];
+        const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
+        const int uvlinesize = s->current_picture.f->linesize[1];
         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
         const int block_size = 8;
 
@@ -2055,12 +2205,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
                     if (s->mv_dir & MV_DIR_FORWARD) {
                         ff_thread_await_progress(&s->last_picture_ptr->tf,
-                                                 ff_MPV_lowest_referenced_row(s, 0),
+                                                 ff_mpv_lowest_referenced_row(s, 0),
                                                  0);
                     }
                     if (s->mv_dir & MV_DIR_BACKWARD) {
                         ff_thread_await_progress(&s->next_picture_ptr->tf,
-                                                 ff_MPV_lowest_referenced_row(s, 1),
+                                                 ff_mpv_lowest_referenced_row(s, 1),
                                                  0);
                     }
                 }
@@ -2072,12 +2222,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
                     op_pix = s->hdsp.put_no_rnd_pixels_tab;
                 }
                 if (s->mv_dir & MV_DIR_FORWARD) {
-                    ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
+                    ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
                     op_pix = s->hdsp.avg_pixels_tab;
                     op_qpix= s->me.qpel_avg;
                 }
                 if (s->mv_dir & MV_DIR_BACKWARD) {
-                    ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
+                    ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
                 }
             }
 
@@ -2163,29 +2313,29 @@ FF_ENABLE_DEPRECATION_WARNINGS
                     }
                 }
             }else{
-                s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
-                s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
-                s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
-                s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
+                s->idsp.idct_put(dest_y,                           dct_linesize, block[0]);
+                s->idsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
+                s->idsp.idct_put(dest_y + dct_offset,              dct_linesize, block[2]);
+                s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
 
                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
                     if(s->chroma_y_shift){
-                        s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
-                        s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
+                        s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
+                        s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
                     }else{
 
                         dct_linesize = uvlinesize << s->interlaced_dct;
                         dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
 
-                        s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
-                        s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
-                        s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
-                        s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
+                        s->idsp.idct_put(dest_cb,              dct_linesize, block[4]);
+                        s->idsp.idct_put(dest_cr,              dct_linesize, block[5]);
+                        s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
+                        s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
                         if(!s->chroma_x_shift){//Chroma444
-                            s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
-                            s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
-                            s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
-                            s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
+                            s->idsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
+                            s->idsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
+                            s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
+                            s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
                         }
                     }
                 }//gray
@@ -2200,104 +2350,26 @@ skip_idct:
     }
 }
 
-void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
+void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
+{
 #if !CONFIG_SMALL
     if(s->out_format == FMT_MPEG1) {
-        MPV_decode_mb_internal(s, block, 1);
+        mpv_decode_mb_internal(s, block, 1);
     } else
 #endif
-        MPV_decode_mb_internal(s, block, 0);
-}
-
-/**
- * @param h is the normal height, this will be reduced automatically if needed for the last row
- */
-void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
-                        Picture *last, int y, int h, int picture_structure,
-                        int first_field, int draw_edges, int low_delay,
-                        int v_edge_pos, int h_edge_pos)
-{
-    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
-    int hshift = desc->log2_chroma_w;
-    int vshift = desc->log2_chroma_h;
-    const int field_pic = picture_structure != PICT_FRAME;
-    if(field_pic){
-        h <<= 1;
-        y <<= 1;
-    }
-
-    if (!avctx->hwaccel &&
-        draw_edges &&
-        cur->reference &&
-        !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
-        int *linesize = cur->f.linesize;
-        int sides = 0, edge_h;
-        if (y==0) sides |= EDGE_TOP;
-        if (y + h >= v_edge_pos)
-            sides |= EDGE_BOTTOM;
-
-        edge_h= FFMIN(h, v_edge_pos - y);
-
-        dsp->draw_edges(cur->f.data[0] + y * linesize[0],
-                        linesize[0], h_edge_pos, edge_h,
-                        EDGE_WIDTH, EDGE_WIDTH, sides);
-        dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
-                        linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
-                        EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
-        dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
-                        linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
-                        EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
-    }
-
-    h = FFMIN(h, avctx->height - y);
-
-    if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
-
-    if (avctx->draw_horiz_band) {
-        AVFrame *src;
-        int offset[AV_NUM_DATA_POINTERS];
-        int i;
-
-        if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
-           (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
-            src = &cur->f;
-        else if (last)
-            src = &last->f;
-        else
-            return;
-
-        if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
-            picture_structure == PICT_FRAME &&
-            avctx->codec_id != AV_CODEC_ID_SVQ3) {
-            for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
-                offset[i] = 0;
-        }else{
-            offset[0]= y * src->linesize[0];
-            offset[1]=
-            offset[2]= (y >> vshift) * src->linesize[1];
-            for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
-                offset[i] = 0;
-        }
-
-        emms_c();
-
-        avctx->draw_horiz_band(avctx, src, offset,
-                               y, picture_structure, h);
-    }
+        mpv_decode_mb_internal(s, block, 0);
 }
 
 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
 {
-    int draw_edges = s->unrestricted_mv && !s->intra_only;
-    ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
-                       &s->last_picture, y, h, s->picture_structure,
-                       s->first_field, draw_edges, s->low_delay,
-                       s->v_edge_pos, s->h_edge_pos);
+    ff_draw_horiz_band(s->avctx, s->current_picture.f,
+                       s->last_picture.f, y, h, s->picture_structure,
+                       s->first_field, s->low_delay);
 }
 
 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
-    const int linesize   = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
-    const int uvlinesize = s->current_picture.f.linesize[1];
+    const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
+    const int uvlinesize = s->current_picture.f->linesize[1];
     const int mb_size= 4;
 
     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
@@ -2308,9 +2380,9 @@ void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
     //block_index is not used by mpeg2, so it is not affected by chroma_format
 
-    s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) <<  mb_size);
-    s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
-    s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
+    s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) <<  mb_size);
+    s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
+    s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
 
     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
     {
@@ -2360,7 +2432,7 @@ void ff_mpeg_flush(AVCodecContext *avctx){
     int i;
     MpegEncContext *s = avctx->priv_data;
 
-    if(s==NULL || s->picture==NULL)
+    if (!s || !s->picture)
         return;
 
     for (i = 0; i < MAX_PICTURE_COUNT; i++)
@@ -2383,224 +2455,6 @@ void ff_mpeg_flush(AVCodecContext *avctx){
     s->pp_time=0;
 }
 
-static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale)
-{
-    int i, level, nCoeffs;
-    const uint16_t *quant_matrix;
-
-    nCoeffs= s->block_last_index[n];
-
-    if (n < 4)
-        block[0] = block[0] * s->y_dc_scale;
-    else
-        block[0] = block[0] * s->c_dc_scale;
-    /* XXX: only mpeg1 */
-    quant_matrix = s->intra_matrix;
-    for(i=1;i<=nCoeffs;i++) {
-        int j= s->intra_scantable.permutated[i];
-        level = block[j];
-        if (level) {
-            if (level < 0) {
-                level = -level;
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-                level = (level - 1) | 1;
-                level = -level;
-            } else {
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-                level = (level - 1) | 1;
-            }
-            block[j] = level;
-        }
-    }
-}
-
-static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale)
-{
-    int i, level, nCoeffs;
-    const uint16_t *quant_matrix;
-
-    nCoeffs= s->block_last_index[n];
-
-    quant_matrix = s->inter_matrix;
-    for(i=0; i<=nCoeffs; i++) {
-        int j= s->intra_scantable.permutated[i];
-        level = block[j];
-        if (level) {
-            if (level < 0) {
-                level = -level;
-                level = (((level << 1) + 1) * qscale *
-                         ((int) (quant_matrix[j]))) >> 4;
-                level = (level - 1) | 1;
-                level = -level;
-            } else {
-                level = (((level << 1) + 1) * qscale *
-                         ((int) (quant_matrix[j]))) >> 4;
-                level = (level - 1) | 1;
-            }
-            block[j] = level;
-        }
-    }
-}
-
-static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale)
-{
-    int i, level, nCoeffs;
-    const uint16_t *quant_matrix;
-
-    if(s->alternate_scan) nCoeffs= 63;
-    else nCoeffs= s->block_last_index[n];
-
-    if (n < 4)
-        block[0] = block[0] * s->y_dc_scale;
-    else
-        block[0] = block[0] * s->c_dc_scale;
-    quant_matrix = s->intra_matrix;
-    for(i=1;i<=nCoeffs;i++) {
-        int j= s->intra_scantable.permutated[i];
-        level = block[j];
-        if (level) {
-            if (level < 0) {
-                level = -level;
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-                level = -level;
-            } else {
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-            }
-            block[j] = level;
-        }
-    }
-}
-
-static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale)
-{
-    int i, level, nCoeffs;
-    const uint16_t *quant_matrix;
-    int sum=-1;
-
-    if(s->alternate_scan) nCoeffs= 63;
-    else nCoeffs= s->block_last_index[n];
-
-    if (n < 4)
-        block[0] = block[0] * s->y_dc_scale;
-    else
-        block[0] = block[0] * s->c_dc_scale;
-    quant_matrix = s->intra_matrix;
-    for(i=1;i<=nCoeffs;i++) {
-        int j= s->intra_scantable.permutated[i];
-        level = block[j];
-        if (level) {
-            if (level < 0) {
-                level = -level;
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-                level = -level;
-            } else {
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-            }
-            block[j] = level;
-            sum+=level;
-        }
-    }
-    block[63]^=sum&1;
-}
-
-static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale)
-{
-    int i, level, nCoeffs;
-    const uint16_t *quant_matrix;
-    int sum=-1;
-
-    if(s->alternate_scan) nCoeffs= 63;
-    else nCoeffs= s->block_last_index[n];
-
-    quant_matrix = s->inter_matrix;
-    for(i=0; i<=nCoeffs; i++) {
-        int j= s->intra_scantable.permutated[i];
-        level = block[j];
-        if (level) {
-            if (level < 0) {
-                level = -level;
-                level = (((level << 1) + 1) * qscale *
-                         ((int) (quant_matrix[j]))) >> 4;
-                level = -level;
-            } else {
-                level = (((level << 1) + 1) * qscale *
-                         ((int) (quant_matrix[j]))) >> 4;
-            }
-            block[j] = level;
-            sum+=level;
-        }
-    }
-    block[63]^=sum&1;
-}
-
-static void dct_unquantize_h263_intra_c(MpegEncContext *s,
-                                  int16_t *block, int n, int qscale)
-{
-    int i, level, qmul, qadd;
-    int nCoeffs;
-
-    assert(s->block_last_index[n]>=0);
-
-    qmul = qscale << 1;
-
-    if (!s->h263_aic) {
-        if (n < 4)
-            block[0] = block[0] * s->y_dc_scale;
-        else
-            block[0] = block[0] * s->c_dc_scale;
-        qadd = (qscale - 1) | 1;
-    }else{
-        qadd = 0;
-    }
-    if(s->ac_pred)
-        nCoeffs=63;
-    else
-        nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
-
-    for(i=1; i<=nCoeffs; i++) {
-        level = block[i];
-        if (level) {
-            if (level < 0) {
-                level = level * qmul - qadd;
-            } else {
-                level = level * qmul + qadd;
-            }
-            block[i] = level;
-        }
-    }
-}
-
-static void dct_unquantize_h263_inter_c(MpegEncContext *s,
-                                  int16_t *block, int n, int qscale)
-{
-    int i, level, qmul, qadd;
-    int nCoeffs;
-
-    assert(s->block_last_index[n]>=0);
-
-    qadd = (qscale - 1) | 1;
-    qmul = qscale << 1;
-
-    nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
-
-    for(i=0; i<=nCoeffs; i++) {
-        level = block[i];
-        if (level) {
-            if (level < 0) {
-                level = level * qmul - qadd;
-            } else {
-                level = level * qmul + qadd;
-            }
-            block[i] = level;
-        }
-    }
-}
-
 /**
  * set qscale and update qscale dependent variables.
  */
@@ -2618,26 +2472,8 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
 }
 
-void ff_MPV_report_decode_progress(MpegEncContext *s)
+void ff_mpv_report_decode_progress(MpegEncContext *s)
 {
     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
         ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
 }
-
-#if CONFIG_ERROR_RESILIENCE
-void ff_mpeg_er_frame_start(MpegEncContext *s)
-{
-    ERContext *er = &s->er;
-
-    er->cur_pic  = s->current_picture_ptr;
-    er->last_pic = s->last_picture_ptr;
-    er->next_pic = s->next_picture_ptr;
-
-    er->pp_time           = s->pp_time;
-    er->pb_time           = s->pb_time;
-    er->quarter_sample    = s->quarter_sample;
-    er->partitioned_frame = s->partitioned_frame;
-
-    ff_er_frame_start(er);
-}
-#endif /* CONFIG_ERROR_RESILIENCE */