]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/mpegvideo.c
x86: dsputil: Simplify xvmc deprecation conditional
[ffmpeg] / libavcodec / mpegvideo.c
index 718f946302ad02106b7105f26369aa2aa4ea1f7b..6ecfd727c13bf861b8f0f6da67d2527f6fe428d9 100644 (file)
@@ -30,6 +30,7 @@
 #include "libavutil/attributes.h"
 #include "libavutil/avassert.h"
 #include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
 #include "avcodec.h"
 #include "dsputil.h"
 #include "internal.h"
 #include "thread.h"
 #include <limits.h>
 
-static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale);
-static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale);
-static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale);
-static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale);
-static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale);
-static void dct_unquantize_h263_intra_c(MpegEncContext *s,
-                                  int16_t *block, int n, int qscale);
-static void dct_unquantize_h263_inter_c(MpegEncContext *s,
-                                  int16_t *block, int n, int qscale);
-
 static const uint8_t ff_default_chroma_qscale_table[32] = {
 //   0   1   2   3   4   5   6   7   8   9  10  11  12  13  14  15
      0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
@@ -122,6 +108,224 @@ const enum AVPixelFormat ff_pixfmt_list_420[] = {
     AV_PIX_FMT_NONE
 };
 
+static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
+                                   int16_t *block, int n, int qscale)
+{
+    int i, level, nCoeffs;
+    const uint16_t *quant_matrix;
+
+    nCoeffs= s->block_last_index[n];
+
+    if (n < 4)
+        block[0] = block[0] * s->y_dc_scale;
+    else
+        block[0] = block[0] * s->c_dc_scale;
+    /* XXX: only mpeg1 */
+    quant_matrix = s->intra_matrix;
+    for(i=1;i<=nCoeffs;i++) {
+        int j= s->intra_scantable.permutated[i];
+        level = block[j];
+        if (level) {
+            if (level < 0) {
+                level = -level;
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+                level = (level - 1) | 1;
+                level = -level;
+            } else {
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+                level = (level - 1) | 1;
+            }
+            block[j] = level;
+        }
+    }
+}
+
+static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
+                                   int16_t *block, int n, int qscale)
+{
+    int i, level, nCoeffs;
+    const uint16_t *quant_matrix;
+
+    nCoeffs= s->block_last_index[n];
+
+    quant_matrix = s->inter_matrix;
+    for(i=0; i<=nCoeffs; i++) {
+        int j= s->intra_scantable.permutated[i];
+        level = block[j];
+        if (level) {
+            if (level < 0) {
+                level = -level;
+                level = (((level << 1) + 1) * qscale *
+                         ((int) (quant_matrix[j]))) >> 4;
+                level = (level - 1) | 1;
+                level = -level;
+            } else {
+                level = (((level << 1) + 1) * qscale *
+                         ((int) (quant_matrix[j]))) >> 4;
+                level = (level - 1) | 1;
+            }
+            block[j] = level;
+        }
+    }
+}
+
+static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
+                                   int16_t *block, int n, int qscale)
+{
+    int i, level, nCoeffs;
+    const uint16_t *quant_matrix;
+
+    if(s->alternate_scan) nCoeffs= 63;
+    else nCoeffs= s->block_last_index[n];
+
+    if (n < 4)
+        block[0] = block[0] * s->y_dc_scale;
+    else
+        block[0] = block[0] * s->c_dc_scale;
+    quant_matrix = s->intra_matrix;
+    for(i=1;i<=nCoeffs;i++) {
+        int j= s->intra_scantable.permutated[i];
+        level = block[j];
+        if (level) {
+            if (level < 0) {
+                level = -level;
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+                level = -level;
+            } else {
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+            }
+            block[j] = level;
+        }
+    }
+}
+
+static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
+                                   int16_t *block, int n, int qscale)
+{
+    int i, level, nCoeffs;
+    const uint16_t *quant_matrix;
+    int sum=-1;
+
+    if(s->alternate_scan) nCoeffs= 63;
+    else nCoeffs= s->block_last_index[n];
+
+    if (n < 4)
+        block[0] = block[0] * s->y_dc_scale;
+    else
+        block[0] = block[0] * s->c_dc_scale;
+    quant_matrix = s->intra_matrix;
+    for(i=1;i<=nCoeffs;i++) {
+        int j= s->intra_scantable.permutated[i];
+        level = block[j];
+        if (level) {
+            if (level < 0) {
+                level = -level;
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+                level = -level;
+            } else {
+                level = (int)(level * qscale * quant_matrix[j]) >> 3;
+            }
+            block[j] = level;
+            sum+=level;
+        }
+    }
+    block[63]^=sum&1;
+}
+
+static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
+                                   int16_t *block, int n, int qscale)
+{
+    int i, level, nCoeffs;
+    const uint16_t *quant_matrix;
+    int sum=-1;
+
+    if(s->alternate_scan) nCoeffs= 63;
+    else nCoeffs= s->block_last_index[n];
+
+    quant_matrix = s->inter_matrix;
+    for(i=0; i<=nCoeffs; i++) {
+        int j= s->intra_scantable.permutated[i];
+        level = block[j];
+        if (level) {
+            if (level < 0) {
+                level = -level;
+                level = (((level << 1) + 1) * qscale *
+                         ((int) (quant_matrix[j]))) >> 4;
+                level = -level;
+            } else {
+                level = (((level << 1) + 1) * qscale *
+                         ((int) (quant_matrix[j]))) >> 4;
+            }
+            block[j] = level;
+            sum+=level;
+        }
+    }
+    block[63]^=sum&1;
+}
+
+static void dct_unquantize_h263_intra_c(MpegEncContext *s,
+                                  int16_t *block, int n, int qscale)
+{
+    int i, level, qmul, qadd;
+    int nCoeffs;
+
+    assert(s->block_last_index[n]>=0);
+
+    qmul = qscale << 1;
+
+    if (!s->h263_aic) {
+        if (n < 4)
+            block[0] = block[0] * s->y_dc_scale;
+        else
+            block[0] = block[0] * s->c_dc_scale;
+        qadd = (qscale - 1) | 1;
+    }else{
+        qadd = 0;
+    }
+    if(s->ac_pred)
+        nCoeffs=63;
+    else
+        nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
+
+    for(i=1; i<=nCoeffs; i++) {
+        level = block[i];
+        if (level) {
+            if (level < 0) {
+                level = level * qmul - qadd;
+            } else {
+                level = level * qmul + qadd;
+            }
+            block[i] = level;
+        }
+    }
+}
+
+static void dct_unquantize_h263_inter_c(MpegEncContext *s,
+                                  int16_t *block, int n, int qscale)
+{
+    int i, level, qmul, qadd;
+    int nCoeffs;
+
+    assert(s->block_last_index[n]>=0);
+
+    qadd = (qscale - 1) | 1;
+    qmul = qscale << 1;
+
+    nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
+
+    for(i=0; i<=nCoeffs; i++) {
+        level = block[i];
+        if (level) {
+            if (level < 0) {
+                level = level * qmul - qadd;
+            } else {
+                level = level * qmul + qadd;
+            }
+            block[i] = level;
+        }
+    }
+}
+
 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
                               int (*mv)[2][4][2],
                               int mb_x, int mb_y, int mb_intra, int mb_skipped)
@@ -165,12 +369,8 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
 
-    if (ARCH_ALPHA)
-        ff_MPV_common_init_axp(s);
     if (ARCH_ARM)
         ff_MPV_common_init_arm(s);
-    if (ARCH_BFIN)
-        ff_MPV_common_init_bfin(s);
     if (ARCH_PPC)
         ff_MPV_common_init_ppc(s);
     if (ARCH_X86)
@@ -192,7 +392,7 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
     return 0;
 }
 
-int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
+static int frame_size_alloc(MpegEncContext *s, int linesize)
 {
     int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
 
@@ -237,7 +437,7 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
         r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
     }
 
-    if (r < 0 || !pic->f.data[0]) {
+    if (r < 0 || !pic->f.buf[0]) {
         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
                r, pic->f.data[0]);
         return -1;
@@ -271,7 +471,7 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
     }
 
     if (!s->edge_emu_buffer &&
-        (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
+        (ret = frame_size_alloc(s, pic->f.linesize[0])) < 0) {
         av_log(s->avctx, AV_LOG_ERROR,
                "get_buffer() failed to allocate context scratch buffers.\n");
         ff_mpeg_unref_picture(s, pic);
@@ -281,7 +481,7 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
     return 0;
 }
 
-static void free_picture_tables(Picture *pic)
+void ff_free_picture_tables(Picture *pic)
 {
     int i;
 
@@ -321,8 +521,7 @@ static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
             return AVERROR(ENOMEM);
     }
 
-    if (s->out_format == FMT_H263 || s->encoding ||
-               (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
+    if (s->out_format == FMT_H263 || s->encoding) {
         int mv_size        = 2 * (b8_array_size + 4) * sizeof(int16_t);
         int ref_index_size = 4 * mb_array_size;
 
@@ -374,7 +573,7 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
         assert(pic->f.data[0]);
         pic->shared = 1;
     } else {
-        assert(!pic->f.data[0]);
+        assert(!pic->f.buf[0]);
 
         if (alloc_frame_buffer(s, pic) < 0)
             return -1;
@@ -411,7 +610,7 @@ int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
 fail:
     av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
     ff_mpeg_unref_picture(s, pic);
-    free_picture_tables(pic);
+    ff_free_picture_tables(pic);
     return AVERROR(ENOMEM);
 }
 
@@ -435,7 +634,7 @@ void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
     av_buffer_unref(&pic->hwaccel_priv_buf);
 
     if (pic->needs_realloc)
-        free_picture_tables(pic);
+        ff_free_picture_tables(pic);
 
     memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
 }
@@ -451,7 +650,7 @@ do {\
         av_buffer_unref(&dst->table);\
         dst->table = av_buffer_ref(src->table);\
         if (!dst->table) {\
-            free_picture_tables(dst);\
+            ff_free_picture_tables(dst);\
             return AVERROR(ENOMEM);\
         }\
     }\
@@ -520,6 +719,15 @@ fail:
     return ret;
 }
 
+static void exchange_uv(MpegEncContext *s)
+{
+    int16_t (*tmp)[64];
+
+    tmp           = s->pblocks[4];
+    s->pblocks[4] = s->pblocks[5];
+    s->pblocks[5] = tmp;
+}
+
 static int init_duplicate_context(MpegEncContext *s)
 {
     int y_size = s->b8_stride * (2 * s->mb_height + 1);
@@ -550,6 +758,8 @@ static int init_duplicate_context(MpegEncContext *s)
     for (i = 0; i < 12; i++) {
         s->pblocks[i] = &s->block[i];
     }
+    if (s->avctx->codec_tag == AV_RL32("VCR2"))
+        exchange_uv(s);
 
     if (s->out_format == FMT_H263) {
         /* ac values */
@@ -624,8 +834,10 @@ int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
     for (i = 0; i < 12; i++) {
         dst->pblocks[i] = &dst->block[i];
     }
+    if (dst->avctx->codec_tag == AV_RL32("VCR2"))
+        exchange_uv(dst);
     if (!dst->edge_emu_buffer &&
-        (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
+        (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
         av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
                "scratch buffers.\n");
         return ret;
@@ -672,11 +884,10 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
 
     s->coded_picture_number = s1->coded_picture_number;
     s->picture_number       = s1->picture_number;
-    s->input_picture_number = s1->input_picture_number;
 
     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
         ff_mpeg_unref_picture(s, &s->picture[i]);
-        if (s1->picture[i].f.data[0] &&
+        if (s1->picture[i].f.buf[0] &&
             (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
             return ret;
     }
@@ -684,7 +895,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
 #define UPDATE_PICTURE(pic)\
 do {\
     ff_mpeg_unref_picture(s, &s->pic);\
-    if (s1->pic.f.data[0])\
+    if (s1->pic.f.buf[0])\
         ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
     else\
         ret = update_picture_tables(&s->pic, &s1->pic);\
@@ -705,8 +916,9 @@ do {\
     s->workaround_bugs      = s1->workaround_bugs;
 
     // MPEG4 timing info
-    memcpy(&s->time_increment_bits, &s1->time_increment_bits,
-           (char *) &s1->shape - (char *) &s1->time_increment_bits);
+    memcpy(&s->last_time_base, &s1->last_time_base,
+           (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
+           (char *) &s1->last_time_base);
 
     // B-frame info
     s->max_b_frames = s1->max_b_frames;
@@ -732,7 +944,7 @@ do {\
     // linesize dependend scratch buffer allocation
     if (!s->edge_emu_buffer)
         if (s1->linesize) {
-            if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
+            if (frame_size_alloc(s, s1->linesize) < 0) {
                 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
                        "scratch buffers.\n");
                 return AVERROR(ENOMEM);
@@ -751,10 +963,6 @@ do {\
         s->last_pict_type = s1->pict_type;
         if (s1->current_picture_ptr)
             s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
-
-        if (s1->pict_type != AV_PICTURE_TYPE_B) {
-            s->last_non_b_pict_type = s1->pict_type;
-        }
     }
 
     return 0;
@@ -777,9 +985,6 @@ void ff_MPV_common_defaults(MpegEncContext *s)
 
     s->coded_picture_number  = 0;
     s->picture_number        = 0;
-    s->input_picture_number  = 0;
-
-    s->picture_in_gop_number = 0;
 
     s->f_code                = 1;
     s->b_code                = 1;
@@ -1029,48 +1234,17 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
 
     s->stream_codec_tag   = avpriv_toupper4(s->avctx->stream_codec_tag);
 
-    if (s->width && s->height) {
-        s->avctx->coded_frame = &s->current_picture.f;
-
-        if (s->encoding) {
-            if (s->msmpeg4_version) {
-                FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
-                                  2 * 2 * (MAX_LEVEL + 1) *
-                                  (MAX_RUN + 1) * 2 * sizeof(int), fail);
-            }
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
-
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
-                              64 * 32   * sizeof(int), fail);
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
-                              64 * 32   * sizeof(int), fail);
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
-                              64 * 32 * 2 * sizeof(uint16_t), fail);
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
-                              64 * 32 * 2 * sizeof(uint16_t), fail);
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
-                              MAX_PICTURE_COUNT * sizeof(Picture *), fail);
-            FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
-                              MAX_PICTURE_COUNT * sizeof(Picture *), fail);
-
-            if (s->avctx->noise_reduction) {
-                FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
-                                  2 * 64 * sizeof(uint16_t), fail);
-            }
-        }
-    }
-
     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
                       MAX_PICTURE_COUNT * sizeof(Picture), fail);
     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-        avcodec_get_frame_defaults(&s->picture[i].f);
+        av_frame_unref(&s->picture[i].f);
     }
     memset(&s->next_picture, 0, sizeof(s->next_picture));
     memset(&s->last_picture, 0, sizeof(s->last_picture));
     memset(&s->current_picture, 0, sizeof(s->current_picture));
-    avcodec_get_frame_defaults(&s->next_picture.f);
-    avcodec_get_frame_defaults(&s->last_picture.f);
-    avcodec_get_frame_defaults(&s->current_picture.f);
+    av_frame_unref(&s->next_picture.f);
+    av_frame_unref(&s->last_picture.f);
+    av_frame_unref(&s->current_picture.f);
 
     if (s->width && s->height) {
         if (init_context_frame(s))
@@ -1260,32 +1434,19 @@ void ff_MPV_common_end(MpegEncContext *s)
     av_freep(&s->bitstream_buffer);
     s->allocated_bitstream_buffer_size = 0;
 
-    av_freep(&s->avctx->stats_out);
-    av_freep(&s->ac_stats);
-
-    av_freep(&s->q_intra_matrix);
-    av_freep(&s->q_inter_matrix);
-    av_freep(&s->q_intra_matrix16);
-    av_freep(&s->q_inter_matrix16);
-    av_freep(&s->input_picture);
-    av_freep(&s->reordered_input_picture);
-    av_freep(&s->dct_offset);
-
     if (s->picture) {
         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-            free_picture_tables(&s->picture[i]);
+            ff_free_picture_tables(&s->picture[i]);
             ff_mpeg_unref_picture(s, &s->picture[i]);
         }
     }
     av_freep(&s->picture);
-    free_picture_tables(&s->last_picture);
+    ff_free_picture_tables(&s->last_picture);
     ff_mpeg_unref_picture(s, &s->last_picture);
-    free_picture_tables(&s->current_picture);
+    ff_free_picture_tables(&s->current_picture);
     ff_mpeg_unref_picture(s, &s->current_picture);
-    free_picture_tables(&s->next_picture);
+    ff_free_picture_tables(&s->next_picture);
     ff_mpeg_unref_picture(s, &s->next_picture);
-    free_picture_tables(&s->new_picture);
-    ff_mpeg_unref_picture(s, &s->new_picture);
 
     free_context_frame(s);
 
@@ -1388,22 +1549,20 @@ av_cold void ff_init_vlc_rl(RLTable *rl)
     }
 }
 
-void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
+static void release_unused_pictures(MpegEncContext *s)
 {
     int i;
 
     /* release non reference frames */
     for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-        if (!s->picture[i].reference &&
-            (remove_current || &s->picture[i] !=  s->current_picture_ptr)) {
+        if (!s->picture[i].reference)
             ff_mpeg_unref_picture(s, &s->picture[i]);
-        }
     }
 }
 
 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
 {
-    if (pic->f.data[0] == NULL)
+    if (pic->f.buf[0] == NULL)
         return 1;
     if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
         return 1;
@@ -1416,7 +1575,7 @@ static int find_unused_picture(MpegEncContext *s, int shared)
 
     if (shared) {
         for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-            if (s->picture[i].f.data[0] == NULL)
+            if (s->picture[i].f.buf[0] == NULL)
                 return i;
         }
     } else {
@@ -1436,38 +1595,16 @@ int ff_find_unused_picture(MpegEncContext *s, int shared)
     if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
         if (s->picture[ret].needs_realloc) {
             s->picture[ret].needs_realloc = 0;
-            free_picture_tables(&s->picture[ret]);
+            ff_free_picture_tables(&s->picture[ret]);
             ff_mpeg_unref_picture(s, &s->picture[ret]);
-            avcodec_get_frame_defaults(&s->picture[ret].f);
         }
     }
     return ret;
 }
 
-static void update_noise_reduction(MpegEncContext *s)
-{
-    int intra, i;
-
-    for (intra = 0; intra < 2; intra++) {
-        if (s->dct_count[intra] > (1 << 16)) {
-            for (i = 0; i < 64; i++) {
-                s->dct_error_sum[intra][i] >>= 1;
-            }
-            s->dct_count[intra] >>= 1;
-        }
-
-        for (i = 0; i < 64; i++) {
-            s->dct_offset[intra][i] = (s->avctx->noise_reduction *
-                                       s->dct_count[intra] +
-                                       s->dct_error_sum[intra][i] / 2) /
-                                      (s->dct_error_sum[intra][i] + 1);
-        }
-    }
-}
-
 /**
- * generic function for encode/decode called after coding/decoding
- * the header and before a frame is coded/decoded.
+ * generic function called after decoding
+ * the header and before a frame is decoded.
  */
 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
 {
@@ -1478,68 +1615,64 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
     /* mark & release old frames */
     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
         s->last_picture_ptr != s->next_picture_ptr &&
-        s->last_picture_ptr->f.data[0]) {
+        s->last_picture_ptr->f.buf[0]) {
         ff_mpeg_unref_picture(s, s->last_picture_ptr);
     }
 
     /* release forgotten pictures */
     /* if (mpeg124/h263) */
-    if (!s->encoding) {
-        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-            if (&s->picture[i] != s->last_picture_ptr &&
-                &s->picture[i] != s->next_picture_ptr &&
-                s->picture[i].reference && !s->picture[i].needs_realloc) {
-                if (!(avctx->active_thread_type & FF_THREAD_FRAME))
-                    av_log(avctx, AV_LOG_ERROR,
-                           "releasing zombie picture\n");
-                ff_mpeg_unref_picture(s, &s->picture[i]);
-            }
+    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
+        if (&s->picture[i] != s->last_picture_ptr &&
+            &s->picture[i] != s->next_picture_ptr &&
+            s->picture[i].reference && !s->picture[i].needs_realloc) {
+            if (!(avctx->active_thread_type & FF_THREAD_FRAME))
+                av_log(avctx, AV_LOG_ERROR,
+                       "releasing zombie picture\n");
+            ff_mpeg_unref_picture(s, &s->picture[i]);
         }
     }
 
     ff_mpeg_unref_picture(s, &s->current_picture);
 
-    if (!s->encoding) {
-        ff_release_unused_pictures(s, 1);
+    release_unused_pictures(s);
 
-        if (s->current_picture_ptr &&
-            s->current_picture_ptr->f.data[0] == NULL) {
-            // we already have a unused image
-            // (maybe it was set before reading the header)
-            pic = s->current_picture_ptr;
-        } else {
-            i   = ff_find_unused_picture(s, 0);
-            if (i < 0) {
-                av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
-                return i;
-            }
-            pic = &s->picture[i];
+    if (s->current_picture_ptr &&
+        s->current_picture_ptr->f.buf[0] == NULL) {
+        // we already have a unused image
+        // (maybe it was set before reading the header)
+        pic = s->current_picture_ptr;
+    } else {
+        i   = ff_find_unused_picture(s, 0);
+        if (i < 0) {
+            av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
+            return i;
         }
+        pic = &s->picture[i];
+    }
 
-        pic->reference = 0;
-        if (!s->droppable) {
-            if (s->pict_type != AV_PICTURE_TYPE_B)
-                pic->reference = 3;
-        }
+    pic->reference = 0;
+    if (!s->droppable) {
+        if (s->pict_type != AV_PICTURE_TYPE_B)
+            pic->reference = 3;
+    }
 
-        pic->f.coded_picture_number = s->coded_picture_number++;
+    pic->f.coded_picture_number = s->coded_picture_number++;
 
-        if (ff_alloc_picture(s, pic, 0) < 0)
-            return -1;
+    if (ff_alloc_picture(s, pic, 0) < 0)
+        return -1;
 
-        s->current_picture_ptr = pic;
-        // FIXME use only the vars from current_pic
-        s->current_picture_ptr->f.top_field_first = s->top_field_first;
-        if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
-            s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
-            if (s->picture_structure != PICT_FRAME)
-                s->current_picture_ptr->f.top_field_first =
-                    (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
-        }
-        s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
-                                                     !s->progressive_sequence;
-        s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
+    s->current_picture_ptr = pic;
+    // FIXME use only the vars from current_pic
+    s->current_picture_ptr->f.top_field_first = s->top_field_first;
+    if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
+        s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
+        if (s->picture_structure != PICT_FRAME)
+            s->current_picture_ptr->f.top_field_first =
+                (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
     }
+    s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
+                                                 !s->progressive_sequence;
+    s->current_picture_ptr->field_picture      =  s->picture_structure != PICT_FRAME;
 
     s->current_picture_ptr->f.pict_type = s->pict_type;
     // if (s->flags && CODEC_FLAG_QSCALE)
@@ -1563,7 +1696,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
             s->pict_type, s->droppable);
 
     if ((s->last_picture_ptr == NULL ||
-         s->last_picture_ptr->f.data[0] == NULL) &&
+         s->last_picture_ptr->f.buf[0] == NULL) &&
         (s->pict_type != AV_PICTURE_TYPE_I ||
          s->picture_structure != PICT_FRAME)) {
         int h_chroma_shift, v_chroma_shift;
@@ -1583,6 +1716,10 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
             return i;
         }
         s->last_picture_ptr = &s->picture[i];
+
+        s->last_picture_ptr->reference   = 3;
+        s->last_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
+
         if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
             s->last_picture_ptr = NULL;
             return -1;
@@ -1601,7 +1738,7 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
         ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
     }
     if ((s->next_picture_ptr == NULL ||
-         s->next_picture_ptr->f.data[0] == NULL) &&
+         s->next_picture_ptr->f.buf[0] == NULL) &&
         s->pict_type == AV_PICTURE_TYPE_B) {
         /* Allocate a dummy frame */
         i = ff_find_unused_picture(s, 0);
@@ -1610,6 +1747,10 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
             return i;
         }
         s->next_picture_ptr = &s->picture[i];
+
+        s->next_picture_ptr->reference   = 3;
+        s->next_picture_ptr->f.pict_type = AV_PICTURE_TYPE_I;
+
         if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
             s->next_picture_ptr = NULL;
             return -1;
@@ -1620,21 +1761,21 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
 
     if (s->last_picture_ptr) {
         ff_mpeg_unref_picture(s, &s->last_picture);
-        if (s->last_picture_ptr->f.data[0] &&
+        if (s->last_picture_ptr->f.buf[0] &&
             (ret = ff_mpeg_ref_picture(s, &s->last_picture,
                                        s->last_picture_ptr)) < 0)
             return ret;
     }
     if (s->next_picture_ptr) {
         ff_mpeg_unref_picture(s, &s->next_picture);
-        if (s->next_picture_ptr->f.data[0] &&
+        if (s->next_picture_ptr->f.buf[0] &&
             (ret = ff_mpeg_ref_picture(s, &s->next_picture,
                                        s->next_picture_ptr)) < 0)
             return ret;
     }
 
     if (s->pict_type != AV_PICTURE_TYPE_I &&
-        !(s->last_picture_ptr && s->last_picture_ptr->f.data[0])) {
+        !(s->last_picture_ptr && s->last_picture_ptr->f.buf[0])) {
         av_log(s, AV_LOG_ERROR,
                "Non-reference picture received and no reference available\n");
         return AVERROR_INVALIDDATA;
@@ -1669,82 +1810,31 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
     }
 
-    if (s->dct_error_sum) {
-        assert(s->avctx->noise_reduction && s->encoding);
-        update_noise_reduction(s);
-    }
-
+#if FF_API_XVMC
+FF_DISABLE_DEPRECATION_WARNINGS
     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
         return ff_xvmc_field_start(s, avctx);
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif /* FF_API_XVMC */
 
     return 0;
 }
 
-/* generic function for encode/decode called after a
- * frame has been coded/decoded. */
+/* called after a frame has been decoded. */
 void ff_MPV_frame_end(MpegEncContext *s)
 {
-    int i;
+#if FF_API_XVMC
+FF_DISABLE_DEPRECATION_WARNINGS
     /* redraw edges for the frame if decoding didn't complete */
     // just to make sure that all data is rendered.
     if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
         ff_xvmc_field_end(s);
-   } else if ((s->er.error_count || s->encoding) &&
-              !s->avctx->hwaccel &&
-              s->unrestricted_mv &&
-              s->current_picture.reference &&
-              !s->intra_only &&
-              !(s->flags & CODEC_FLAG_EMU_EDGE)) {
-       const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
-       int hshift = desc->log2_chroma_w;
-       int vshift = desc->log2_chroma_h;
-       s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
-                         s->h_edge_pos, s->v_edge_pos,
-                         EDGE_WIDTH, EDGE_WIDTH,
-                         EDGE_TOP | EDGE_BOTTOM);
-       s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
-                         s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
-                         EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
-                         EDGE_TOP | EDGE_BOTTOM);
-       s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
-                         s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
-                         EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
-                         EDGE_TOP | EDGE_BOTTOM);
-    }
+    } else
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif /* FF_API_XVMC */
 
     emms_c();
 
-    s->last_pict_type                 = s->pict_type;
-    s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
-    if (s->pict_type!= AV_PICTURE_TYPE_B) {
-        s->last_non_b_pict_type = s->pict_type;
-    }
-#if 0
-    /* copy back current_picture variables */
-    for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-        if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
-            s->picture[i] = s->current_picture;
-            break;
-        }
-    }
-    assert(i < MAX_PICTURE_COUNT);
-#endif
-
-    if (s->encoding) {
-        /* release non-reference frames */
-        for (i = 0; i < MAX_PICTURE_COUNT; i++) {
-            if (!s->picture[i].reference)
-                ff_mpeg_unref_picture(s, &s->picture[i]);
-        }
-    }
-    // clear copies, to avoid confusion
-#if 0
-    memset(&s->last_picture,    0, sizeof(Picture));
-    memset(&s->next_picture,    0, sizeof(Picture));
-    memset(&s->current_picture, 0, sizeof(Picture));
-#endif
-    s->avctx->coded_frame = &s->current_picture_ptr->f;
-
     if (s->current_picture.reference)
         ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
 }
@@ -1962,10 +2052,15 @@ void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
                             int is_mpeg12)
 {
     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
+
+#if FF_API_XVMC
+FF_DISABLE_DEPRECATION_WARNINGS
     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
         ff_xvmc_decode_mb(s);//xvmc uses pblocks
         return;
     }
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif /* FF_API_XVMC */
 
     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
        /* print DCT coefficients */
@@ -2201,11 +2296,10 @@ void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
  */
 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
                         Picture *last, int y, int h, int picture_structure,
-                        int first_field, int draw_edges, int low_delay,
+                        int first_field, int low_delay,
                         int v_edge_pos, int h_edge_pos)
 {
     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
-    int hshift = desc->log2_chroma_w;
     int vshift = desc->log2_chroma_h;
     const int field_pic = picture_structure != PICT_FRAME;
     if(field_pic){
@@ -2213,29 +2307,6 @@ void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
         y <<= 1;
     }
 
-    if (!avctx->hwaccel &&
-        draw_edges &&
-        cur->reference &&
-        !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
-        int *linesize = cur->f.linesize;
-        int sides = 0, edge_h;
-        if (y==0) sides |= EDGE_TOP;
-        if (y + h >= v_edge_pos)
-            sides |= EDGE_BOTTOM;
-
-        edge_h= FFMIN(h, v_edge_pos - y);
-
-        dsp->draw_edges(cur->f.data[0] + y * linesize[0],
-                        linesize[0], h_edge_pos, edge_h,
-                        EDGE_WIDTH, EDGE_WIDTH, sides);
-        dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
-                        linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
-                        EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
-        dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
-                        linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
-                        EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
-    }
-
     h = FFMIN(h, avctx->height - y);
 
     if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
@@ -2275,10 +2346,9 @@ void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
 
 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
 {
-    int draw_edges = s->unrestricted_mv && !s->intra_only;
     ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
                        &s->last_picture, y, h, s->picture_structure,
-                       s->first_field, draw_edges, s->low_delay,
+                       s->first_field, s->low_delay,
                        s->v_edge_pos, s->h_edge_pos);
 }
 
@@ -2370,224 +2440,6 @@ void ff_mpeg_flush(AVCodecContext *avctx){
     s->pp_time=0;
 }
 
-static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale)
-{
-    int i, level, nCoeffs;
-    const uint16_t *quant_matrix;
-
-    nCoeffs= s->block_last_index[n];
-
-    if (n < 4)
-        block[0] = block[0] * s->y_dc_scale;
-    else
-        block[0] = block[0] * s->c_dc_scale;
-    /* XXX: only mpeg1 */
-    quant_matrix = s->intra_matrix;
-    for(i=1;i<=nCoeffs;i++) {
-        int j= s->intra_scantable.permutated[i];
-        level = block[j];
-        if (level) {
-            if (level < 0) {
-                level = -level;
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-                level = (level - 1) | 1;
-                level = -level;
-            } else {
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-                level = (level - 1) | 1;
-            }
-            block[j] = level;
-        }
-    }
-}
-
-static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale)
-{
-    int i, level, nCoeffs;
-    const uint16_t *quant_matrix;
-
-    nCoeffs= s->block_last_index[n];
-
-    quant_matrix = s->inter_matrix;
-    for(i=0; i<=nCoeffs; i++) {
-        int j= s->intra_scantable.permutated[i];
-        level = block[j];
-        if (level) {
-            if (level < 0) {
-                level = -level;
-                level = (((level << 1) + 1) * qscale *
-                         ((int) (quant_matrix[j]))) >> 4;
-                level = (level - 1) | 1;
-                level = -level;
-            } else {
-                level = (((level << 1) + 1) * qscale *
-                         ((int) (quant_matrix[j]))) >> 4;
-                level = (level - 1) | 1;
-            }
-            block[j] = level;
-        }
-    }
-}
-
-static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale)
-{
-    int i, level, nCoeffs;
-    const uint16_t *quant_matrix;
-
-    if(s->alternate_scan) nCoeffs= 63;
-    else nCoeffs= s->block_last_index[n];
-
-    if (n < 4)
-        block[0] = block[0] * s->y_dc_scale;
-    else
-        block[0] = block[0] * s->c_dc_scale;
-    quant_matrix = s->intra_matrix;
-    for(i=1;i<=nCoeffs;i++) {
-        int j= s->intra_scantable.permutated[i];
-        level = block[j];
-        if (level) {
-            if (level < 0) {
-                level = -level;
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-                level = -level;
-            } else {
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-            }
-            block[j] = level;
-        }
-    }
-}
-
-static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale)
-{
-    int i, level, nCoeffs;
-    const uint16_t *quant_matrix;
-    int sum=-1;
-
-    if(s->alternate_scan) nCoeffs= 63;
-    else nCoeffs= s->block_last_index[n];
-
-    if (n < 4)
-        block[0] = block[0] * s->y_dc_scale;
-    else
-        block[0] = block[0] * s->c_dc_scale;
-    quant_matrix = s->intra_matrix;
-    for(i=1;i<=nCoeffs;i++) {
-        int j= s->intra_scantable.permutated[i];
-        level = block[j];
-        if (level) {
-            if (level < 0) {
-                level = -level;
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-                level = -level;
-            } else {
-                level = (int)(level * qscale * quant_matrix[j]) >> 3;
-            }
-            block[j] = level;
-            sum+=level;
-        }
-    }
-    block[63]^=sum&1;
-}
-
-static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
-                                   int16_t *block, int n, int qscale)
-{
-    int i, level, nCoeffs;
-    const uint16_t *quant_matrix;
-    int sum=-1;
-
-    if(s->alternate_scan) nCoeffs= 63;
-    else nCoeffs= s->block_last_index[n];
-
-    quant_matrix = s->inter_matrix;
-    for(i=0; i<=nCoeffs; i++) {
-        int j= s->intra_scantable.permutated[i];
-        level = block[j];
-        if (level) {
-            if (level < 0) {
-                level = -level;
-                level = (((level << 1) + 1) * qscale *
-                         ((int) (quant_matrix[j]))) >> 4;
-                level = -level;
-            } else {
-                level = (((level << 1) + 1) * qscale *
-                         ((int) (quant_matrix[j]))) >> 4;
-            }
-            block[j] = level;
-            sum+=level;
-        }
-    }
-    block[63]^=sum&1;
-}
-
-static void dct_unquantize_h263_intra_c(MpegEncContext *s,
-                                  int16_t *block, int n, int qscale)
-{
-    int i, level, qmul, qadd;
-    int nCoeffs;
-
-    assert(s->block_last_index[n]>=0);
-
-    qmul = qscale << 1;
-
-    if (!s->h263_aic) {
-        if (n < 4)
-            block[0] = block[0] * s->y_dc_scale;
-        else
-            block[0] = block[0] * s->c_dc_scale;
-        qadd = (qscale - 1) | 1;
-    }else{
-        qadd = 0;
-    }
-    if(s->ac_pred)
-        nCoeffs=63;
-    else
-        nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
-
-    for(i=1; i<=nCoeffs; i++) {
-        level = block[i];
-        if (level) {
-            if (level < 0) {
-                level = level * qmul - qadd;
-            } else {
-                level = level * qmul + qadd;
-            }
-            block[i] = level;
-        }
-    }
-}
-
-static void dct_unquantize_h263_inter_c(MpegEncContext *s,
-                                  int16_t *block, int n, int qscale)
-{
-    int i, level, qmul, qadd;
-    int nCoeffs;
-
-    assert(s->block_last_index[n]>=0);
-
-    qadd = (qscale - 1) | 1;
-    qmul = qscale << 1;
-
-    nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
-
-    for(i=0; i<=nCoeffs; i++) {
-        level = block[i];
-        if (level) {
-            if (level < 0) {
-                level = level * qmul - qadd;
-            } else {
-                level = level * qmul + qadd;
-            }
-            block[i] = level;
-        }
-    }
-}
-
 /**
  * set qscale and update qscale dependent variables.
  */