]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/mpeg12.c
cmdutils: move exit_program() declaration to cmdutils from avconv
[ffmpeg] / libavcodec / mpeg12.c
index ffe6b94c05a8cfba104fe38fb93f31d76f4a80f3..4b20610cab516c4778331f305c9be37e893769e9 100644 (file)
@@ -37,6 +37,7 @@
 #include "bytestream.h"
 #include "vdpau_internal.h"
 #include "xvmc_internal.h"
+#include "thread.h"
 
 //#undef NDEBUG
 //#include <assert.h>
@@ -220,20 +221,20 @@ static int mpeg_decode_mb(MpegEncContext *s,
     if (s->mb_skip_run-- != 0) {
         if (s->pict_type == AV_PICTURE_TYPE_P) {
             s->mb_skipped = 1;
-            s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
+            s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
         } else {
             int mb_type;
 
             if(s->mb_x)
-                mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1];
+                mb_type = s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
             else
-                mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
+                mb_type = s->current_picture.f.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
             if(IS_INTRA(mb_type))
                 return -1;
 
-            s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]=
+            s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
                 mb_type | MB_TYPE_SKIP;
-//            assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
+//            assert(s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
 
             if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0)
                 s->mb_skipped = 1;
@@ -576,7 +577,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
         }
     }
 
-    s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type;
+    s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
 
     return 0;
 }
@@ -1179,6 +1180,27 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx)
     return 0;
 }
 
+static int mpeg_decode_update_thread_context(AVCodecContext *avctx, const AVCodecContext *avctx_from)
+{
+    Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
+    MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
+    int err;
+
+    if(avctx == avctx_from || !ctx_from->mpeg_enc_ctx_allocated || !s1->context_initialized)
+        return 0;
+
+    err = ff_mpeg_update_thread_context(avctx, avctx_from);
+    if(err) return err;
+
+    if(!ctx->mpeg_enc_ctx_allocated)
+        memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
+
+    if(!(s->pict_type == FF_B_TYPE || s->low_delay))
+        s->picture_number++;
+
+    return 0;
+}
+
 static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm,
                                      const uint8_t *new_perm){
     uint16_t temp_matrix[64];
@@ -1271,9 +1293,17 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){
             avctx->ticks_per_frame=2;
         //MPEG-2 aspect
             if(s->aspect_ratio_info > 1){
-                //we ignore the spec here as reality does not match the spec, see for example
+                AVRational dar =
+                    av_mul_q(
+                        av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info],
+                                 (AVRational){s1->pan_scan.width, s1->pan_scan.height}),
+                        (AVRational){s->width, s->height});
+
+                // we ignore the spec here and guess a bit as reality does not match the spec, see for example
                 // res_change_ffmpeg_aspect.ts and sequence-display-aspect.mpg
-                if( (s1->pan_scan.width == 0 )||(s1->pan_scan.height == 0) || 1){
+                // issue1613, 621, 562
+                if((s1->pan_scan.width == 0 ) || (s1->pan_scan.height == 0) ||
+                   (av_cmp_q(dar,(AVRational){4,3}) && av_cmp_q(dar,(AVRational){16,9}))) {
                     s->avctx->sample_aspect_ratio=
                         av_div_q(
                          ff_mpeg2_aspect[s->aspect_ratio_info],
@@ -1285,6 +1315,12 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){
                          ff_mpeg2_aspect[s->aspect_ratio_info],
                          (AVRational){s1->pan_scan.width, s1->pan_scan.height}
                         );
+//issue1613 4/3 16/9 -> 16/9
+//res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
+//widescreen-issue562.mpg 4/3 16/9 -> 16/9
+//                    s->avctx->sample_aspect_ratio= av_mul_q(s->avctx->sample_aspect_ratio, (AVRational){s->width, s->height});
+//av_log(NULL, AV_LOG_ERROR, "A %d/%d\n",ff_mpeg2_aspect[s->aspect_ratio_info].num, ff_mpeg2_aspect[s->aspect_ratio_info].den);
+//av_log(NULL, AV_LOG_ERROR, "B %d/%d\n",s->avctx->sample_aspect_ratio.num, s->avctx->sample_aspect_ratio.den);
                 }
             }else{
                 s->avctx->sample_aspect_ratio=
@@ -1349,8 +1385,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
         s->mpeg_f_code[1][0] = f_code;
         s->mpeg_f_code[1][1] = f_code;
     }
-    s->current_picture.pict_type= s->pict_type;
-    s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
+    s->current_picture.f.pict_type = s->pict_type;
+    s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
 
     if(avctx->debug & FF_DEBUG_PICT_INFO)
         av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
@@ -1503,8 +1539,8 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
                 s->pict_type= AV_PICTURE_TYPE_P;
         }else
             s->pict_type= AV_PICTURE_TYPE_B;
-        s->current_picture.pict_type= s->pict_type;
-        s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
+        s->current_picture.f.pict_type = s->pict_type;
+        s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
     }
     s->intra_dc_precision = get_bits(&s->gb, 2);
     s->picture_structure = get_bits(&s->gb, 2);
@@ -1582,19 +1618,22 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
         ff_er_frame_start(s);
 
         /* first check if we must repeat the frame */
-        s->current_picture_ptr->repeat_pict = 0;
+        s->current_picture_ptr->f.repeat_pict = 0;
         if (s->repeat_first_field) {
             if (s->progressive_sequence) {
                 if (s->top_field_first)
-                    s->current_picture_ptr->repeat_pict = 4;
+                    s->current_picture_ptr->f.repeat_pict = 4;
                 else
-                    s->current_picture_ptr->repeat_pict = 2;
+                    s->current_picture_ptr->f.repeat_pict = 2;
             } else if (s->progressive_frame) {
-                s->current_picture_ptr->repeat_pict = 1;
+                s->current_picture_ptr->f.repeat_pict = 1;
             }
         }
 
-        *s->current_picture_ptr->pan_scan= s1->pan_scan;
+        *s->current_picture_ptr->f.pan_scan = s1->pan_scan;
+
+        if (HAVE_PTHREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
+            ff_thread_finish_setup(avctx);
     }else{ //second field
             int i;
 
@@ -1604,9 +1643,9 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
             }
 
             for(i=0; i<4; i++){
-                s->current_picture.data[i] = s->current_picture_ptr->data[i];
+                s->current_picture.f.data[i] = s->current_picture_ptr->f.data[i];
                 if(s->picture_structure == PICT_BOTTOM_FIELD){
-                    s->current_picture.data[i] += s->current_picture_ptr->linesize[i];
+                    s->current_picture.f.data[i] += s->current_picture_ptr->f.linesize[i];
                 }
             }
     }
@@ -1728,7 +1767,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
         if(mpeg_decode_mb(s, s->block) < 0)
             return -1;
 
-        if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs
+        if (s->current_picture.f.motion_val[0] && !s->encoding) { //note motion_val is normally NULL unless we want to extract the MVs
             const int wrap = s->b8_stride;
             int xy = s->mb_x*2 + s->mb_y*2*wrap;
             int b8_xy= 4*(s->mb_x + s->mb_y*s->mb_stride);
@@ -1746,12 +1785,12 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
                         motion_y = s->mv[dir][i][1];
                     }
 
-                    s->current_picture.motion_val[dir][xy    ][0] = motion_x;
-                    s->current_picture.motion_val[dir][xy    ][1] = motion_y;
-                    s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
-                    s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
-                    s->current_picture.ref_index [dir][b8_xy    ]=
-                    s->current_picture.ref_index [dir][b8_xy + 1]= s->field_select[dir][i];
+                    s->current_picture.f.motion_val[dir][xy    ][0] = motion_x;
+                    s->current_picture.f.motion_val[dir][xy    ][1] = motion_y;
+                    s->current_picture.f.motion_val[dir][xy + 1][0] = motion_x;
+                    s->current_picture.f.motion_val[dir][xy + 1][1] = motion_y;
+                    s->current_picture.f.ref_index [dir][b8_xy    ] =
+                    s->current_picture.f.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
                     assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1);
                 }
                 xy += wrap;
@@ -1769,6 +1808,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
             const int mb_size= 16>>s->avctx->lowres;
 
             ff_draw_horiz_band(s, mb_size*(s->mb_y>>field_pic), mb_size);
+            MPV_report_decode_progress(s);
 
             s->mb_x = 0;
             s->mb_y += 1<<field_pic;
@@ -1871,6 +1911,8 @@ static int slice_decode_thread(AVCodecContext *c, void *arg){
 //av_log(c, AV_LOG_DEBUG, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
 //ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, s->start_mb_y, s->end_mb_y, s->error_count);
         if(ret < 0){
+            if (c->error_recognition >= FF_ER_EXPLODE)
+                return AVERROR_INVALIDDATA;
             if(s->resync_mb_x>=0 && s->resync_mb_y>=0)
                 ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, AC_ERROR|DC_ERROR|MV_ERROR);
         }else{
@@ -1888,8 +1930,6 @@ static int slice_decode_thread(AVCodecContext *c, void *arg){
         if(mb_y < 0 || mb_y >= s->end_mb_y)
             return -1;
     }
-
-    return 0; //not reached
 }
 
 /**
@@ -1916,7 +1956,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
     if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) {
         /* end of image */
 
-        s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2;
+        s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_MPEG2;
 
         ff_er_frame_end(s);
 
@@ -1926,7 +1966,8 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
             *pict= *(AVFrame*)s->current_picture_ptr;
             ff_print_debug_info(s, pict);
         } else {
-            s->picture_number++;
+            if (avctx->active_thread_type & FF_THREAD_FRAME)
+                s->picture_number++;
             /* latency of 1 frame for I- and P-frames */
             /* XXX: use another variable than picture_number */
             if (s->last_picture_ptr != NULL) {
@@ -2101,14 +2142,13 @@ static void mpeg_decode_gop(AVCodecContext *avctx,
     Mpeg1Context *s1 = avctx->priv_data;
     MpegEncContext *s = &s1->mpeg_enc_ctx;
 
-    int drop_frame_flag;
     int time_code_hours, time_code_minutes;
     int time_code_seconds, time_code_pictures;
     int broken_link;
 
     init_get_bits(&s->gb, buf, buf_size*8);
 
-    drop_frame_flag = get_bits1(&s->gb);
+    skip_bits1(&s->gb); /* drop_frame_flag */
 
     time_code_hours=get_bits(&s->gb,5);
     time_code_minutes = get_bits(&s->gb,6);
@@ -2222,25 +2262,15 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
             return buf_size;
     }
 
-#if 0
-    if (s->repeat_field % 2 == 1) {
-        s->repeat_field++;
-        //fprintf(stderr,"\nRepeating last frame: %d -> %d! pict: %d %d", avctx->frame_number-1, avctx->frame_number,
-        //        s2->picture_number, s->repeat_field);
-        if (avctx->flags & CODEC_FLAG_REPEAT_FIELD) {
-            *data_size = sizeof(AVPicture);
-            goto the_end;
-        }
-    }
-#endif
-
     if(s->mpeg_enc_ctx_allocated==0 && avctx->codec_tag == AV_RL32("VCR2"))
         vcr2_init_sequence(avctx);
 
     s->slice_count= 0;
 
-    if(avctx->extradata && !avctx->frame_number)
-        decode_chunks(avctx, picture, data_size, avctx->extradata, avctx->extradata_size);
+    if(avctx->extradata && !avctx->frame_number &&
+       decode_chunks(avctx, picture, data_size, avctx->extradata, avctx->extradata_size) < 0 &&
+       avctx->error_recognition >= FF_ER_EXPLODE)
+      return AVERROR_INVALIDDATA;
 
     return decode_chunks(avctx, picture, data_size, buf, buf_size);
 }
@@ -2262,7 +2292,7 @@ static int decode_chunks(AVCodecContext *avctx,
         buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code);
         if (start_code > 0x1ff){
             if(s2->pict_type != AV_PICTURE_TYPE_B || avctx->skip_frame <= AVDISCARD_DEFAULT){
-                if(avctx->thread_count > 1){
+                if(HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)){
                     int i;
 
                     avctx->execute(avctx, slice_decode_thread,  &s2->thread_context[0], NULL, s->slice_count, sizeof(void*));
@@ -2292,11 +2322,13 @@ static int decode_chunks(AVCodecContext *avctx,
         switch(start_code) {
         case SEQ_START_CODE:
             if(last_code == 0){
-            mpeg1_decode_sequence(avctx, buf_ptr,
-                                    input_size);
+                mpeg1_decode_sequence(avctx, buf_ptr,
+                                      input_size);
                 s->sync=1;
             }else{
                 av_log(avctx, AV_LOG_ERROR, "ignoring SEQ_START_CODE after %X\n", last_code);
+                if (avctx->error_recognition >= FF_ER_EXPLODE)
+                    return AVERROR_INVALIDDATA;
             }
             break;
 
@@ -2325,6 +2357,8 @@ static int decode_chunks(AVCodecContext *avctx,
             last_code= PICTURE_START_CODE;
             }else{
                 av_log(avctx, AV_LOG_ERROR, "ignoring pic after %X\n", last_code);
+                if (avctx->error_recognition >= FF_ER_EXPLODE)
+                    return AVERROR_INVALIDDATA;
             }
             break;
         case EXT_START_CODE:
@@ -2336,6 +2370,8 @@ static int decode_chunks(AVCodecContext *avctx,
                 mpeg_decode_sequence_extension(s);
                 }else{
                     av_log(avctx, AV_LOG_ERROR, "ignoring seq ext after %X\n", last_code);
+                    if (avctx->error_recognition >= FF_ER_EXPLODE)
+                        return AVERROR_INVALIDDATA;
                 }
                 break;
             case 0x2:
@@ -2352,6 +2388,8 @@ static int decode_chunks(AVCodecContext *avctx,
                 mpeg_decode_picture_coding_extension(s);
                 }else{
                     av_log(avctx, AV_LOG_ERROR, "ignoring pic cod ext after %X\n", last_code);
+                    if (avctx->error_recognition >= FF_ER_EXPLODE)
+                        return AVERROR_INVALIDDATA;
                 }
                 break;
             }
@@ -2368,6 +2406,8 @@ static int decode_chunks(AVCodecContext *avctx,
                 s->sync=1;
             }else{
                 av_log(avctx, AV_LOG_ERROR, "ignoring GOP_START_CODE after %X\n", last_code);
+                if (avctx->error_recognition >= FF_ER_EXPLODE)
+                    return AVERROR_INVALIDDATA;
             }
             break;
         default:
@@ -2412,6 +2452,8 @@ static int decode_chunks(AVCodecContext *avctx,
 
                 if(!s2->pict_type){
                     av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
+                    if (avctx->error_recognition >= FF_ER_EXPLODE)
+                        return AVERROR_INVALIDDATA;
                     break;
                 }
 
@@ -2422,6 +2464,8 @@ static int decode_chunks(AVCodecContext *avctx,
                 }
                 if(!s2->current_picture_ptr){
                     av_log(avctx, AV_LOG_ERROR, "current_picture not initialized\n");
+                    if (avctx->error_recognition >= FF_ER_EXPLODE)
+                        return AVERROR_INVALIDDATA;
                     return -1;
                 }
 
@@ -2430,7 +2474,7 @@ static int decode_chunks(AVCodecContext *avctx,
                     break;
                 }
 
-                if(avctx->thread_count > 1){
+                if(HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)){
                     int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count;
                     if(threshold <= mb_y){
                         MpegEncContext *thread_context= s2->thread_context[s->slice_count];
@@ -2450,6 +2494,8 @@ static int decode_chunks(AVCodecContext *avctx,
                     emms_c();
 
                     if(ret < 0){
+                        if (avctx->error_recognition >= FF_ER_EXPLODE)
+                            return AVERROR_INVALIDDATA;
                         if(s2->resync_mb_x>=0 && s2->resync_mb_y>=0)
                             ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, AC_ERROR|DC_ERROR|MV_ERROR);
                     }else{
@@ -2493,30 +2539,29 @@ static const AVProfile mpeg2_video_profiles[] = {
 
 
 AVCodec ff_mpeg1video_decoder = {
-    "mpeg1video",
-    AVMEDIA_TYPE_VIDEO,
-    CODEC_ID_MPEG1VIDEO,
-    sizeof(Mpeg1Context),
-    mpeg_decode_init,
-    NULL,
-    mpeg_decode_end,
-    mpeg_decode_frame,
-    CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
+    .name           = "mpeg1video",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_MPEG1VIDEO,
+    .priv_data_size = sizeof(Mpeg1Context),
+    .init           = mpeg_decode_init,
+    .close          = mpeg_decode_end,
+    .decode         = mpeg_decode_frame,
+    .capabilities   = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
     .flush= flush,
     .max_lowres= 3,
     .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),
+    .update_thread_context= ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context)
 };
 
 AVCodec ff_mpeg2video_decoder = {
-    "mpeg2video",
-    AVMEDIA_TYPE_VIDEO,
-    CODEC_ID_MPEG2VIDEO,
-    sizeof(Mpeg1Context),
-    mpeg_decode_init,
-    NULL,
-    mpeg_decode_end,
-    mpeg_decode_frame,
-    CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
+    .name           = "mpeg2video",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_MPEG2VIDEO,
+    .priv_data_size = sizeof(Mpeg1Context),
+    .init           = mpeg_decode_init,
+    .close          = mpeg_decode_end,
+    .decode         = mpeg_decode_frame,
+    .capabilities   = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
     .flush= flush,
     .max_lowres= 3,
     .long_name= NULL_IF_CONFIG_SMALL("MPEG-2 video"),
@@ -2525,15 +2570,14 @@ AVCodec ff_mpeg2video_decoder = {
 
 //legacy decoder
 AVCodec ff_mpegvideo_decoder = {
-    "mpegvideo",
-    AVMEDIA_TYPE_VIDEO,
-    CODEC_ID_MPEG2VIDEO,
-    sizeof(Mpeg1Context),
-    mpeg_decode_init,
-    NULL,
-    mpeg_decode_end,
-    mpeg_decode_frame,
-    CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
+    .name           = "mpegvideo",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_MPEG2VIDEO,
+    .priv_data_size = sizeof(Mpeg1Context),
+    .init           = mpeg_decode_init,
+    .close          = mpeg_decode_end,
+    .decode         = mpeg_decode_frame,
+    .capabilities   = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
     .flush= flush,
     .max_lowres= 3,
     .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),
@@ -2541,7 +2585,7 @@ AVCodec ff_mpegvideo_decoder = {
 
 #if CONFIG_MPEG_XVMC_DECODER
 static av_cold int mpeg_mc_decode_init(AVCodecContext *avctx){
-    if( avctx->thread_count > 1)
+    if( avctx->active_thread_type & FF_THREAD_SLICE )
         return -1;
     if( !(avctx->slice_flags & SLICE_FLAG_CODED_ORDER) )
         return -1;
@@ -2557,15 +2601,14 @@ static av_cold int mpeg_mc_decode_init(AVCodecContext *avctx){
 }
 
 AVCodec ff_mpeg_xvmc_decoder = {
-    "mpegvideo_xvmc",
-    AVMEDIA_TYPE_VIDEO,
-    CODEC_ID_MPEG2VIDEO_XVMC,
-    sizeof(Mpeg1Context),
-    mpeg_mc_decode_init,
-    NULL,
-    mpeg_decode_end,
-    mpeg_decode_frame,
-    CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL | CODEC_CAP_DELAY,
+    .name           = "mpegvideo_xvmc",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_MPEG2VIDEO_XVMC,
+    .priv_data_size = sizeof(Mpeg1Context),
+    .init           = mpeg_mc_decode_init,
+    .close          = mpeg_decode_end,
+    .decode         = mpeg_decode_frame,
+    .capabilities   = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL | CODEC_CAP_DELAY,
     .flush= flush,
     .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video XvMC (X-Video Motion Compensation)"),
 };
@@ -2574,15 +2617,14 @@ AVCodec ff_mpeg_xvmc_decoder = {
 
 #if CONFIG_MPEG_VDPAU_DECODER
 AVCodec ff_mpeg_vdpau_decoder = {
-    "mpegvideo_vdpau",
-    AVMEDIA_TYPE_VIDEO,
-    CODEC_ID_MPEG2VIDEO,
-    sizeof(Mpeg1Context),
-    mpeg_decode_init,
-    NULL,
-    mpeg_decode_end,
-    mpeg_decode_frame,
-    CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY,
+    .name           = "mpegvideo_vdpau",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_MPEG2VIDEO,
+    .priv_data_size = sizeof(Mpeg1Context),
+    .init           = mpeg_decode_init,
+    .close          = mpeg_decode_end,
+    .decode         = mpeg_decode_frame,
+    .capabilities   = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY,
     .flush= flush,
     .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"),
 };
@@ -2590,15 +2632,14 @@ AVCodec ff_mpeg_vdpau_decoder = {
 
 #if CONFIG_MPEG1_VDPAU_DECODER
 AVCodec ff_mpeg1_vdpau_decoder = {
-    "mpeg1video_vdpau",
-    AVMEDIA_TYPE_VIDEO,
-    CODEC_ID_MPEG1VIDEO,
-    sizeof(Mpeg1Context),
-    mpeg_decode_init,
-    NULL,
-    mpeg_decode_end,
-    mpeg_decode_frame,
-    CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY,
+    .name           = "mpeg1video_vdpau",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_MPEG1VIDEO,
+    .priv_data_size = sizeof(Mpeg1Context),
+    .init           = mpeg_decode_init,
+    .close          = mpeg_decode_end,
+    .decode         = mpeg_decode_frame,
+    .capabilities   = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY,
     .flush= flush,
     .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video (VDPAU acceleration)"),
 };