]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/mpeg12.c
Merge remote-tracking branch 'qatar/master'
[ffmpeg] / libavcodec / mpeg12.c
index 34958a7ee153cf1e6fab1723f69b9d811780593f..20155a0219f924cfe302dbbbc7e706130ffae6d4 100644 (file)
@@ -30,6 +30,7 @@
 #include "avcodec.h"
 #include "dsputil.h"
 #include "mpegvideo.h"
+#include "libavutil/avassert.h"
 
 #include "mpeg12.h"
 #include "mpeg12data.h"
@@ -214,22 +215,22 @@ static int mpeg_decode_mb(MpegEncContext *s,
     assert(s->mb_skipped==0);
 
     if (s->mb_skip_run-- != 0) {
-        if (s->pict_type == FF_P_TYPE) {
+        if (s->pict_type == AV_PICTURE_TYPE_P) {
             s->mb_skipped = 1;
-            s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
+            s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
         } else {
             int mb_type;
 
             if(s->mb_x)
-                mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1];
+                mb_type = s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
             else
-                mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
+                mb_type = s->current_picture.f.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
             if(IS_INTRA(mb_type))
                 return -1;
 
-            s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]=
+            s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
                 mb_type | MB_TYPE_SKIP;
-//            assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
+//            assert(s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
 
             if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0)
                 s->mb_skipped = 1;
@@ -240,7 +241,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
 
     switch(s->pict_type) {
     default:
-    case FF_I_TYPE:
+    case AV_PICTURE_TYPE_I:
         if (get_bits1(&s->gb) == 0) {
             if (get_bits1(&s->gb) == 0){
                 av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in I Frame at %d %d\n", s->mb_x, s->mb_y);
@@ -251,7 +252,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
             mb_type = MB_TYPE_INTRA;
         }
         break;
-    case FF_P_TYPE:
+    case AV_PICTURE_TYPE_P:
         mb_type = get_vlc2(&s->gb, mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
         if (mb_type < 0){
             av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in P Frame at %d %d\n", s->mb_x, s->mb_y);
@@ -259,7 +260,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
         }
         mb_type = ptype2mb_type[ mb_type ];
         break;
-    case FF_B_TYPE:
+    case AV_PICTURE_TYPE_B:
         mb_type = get_vlc2(&s->gb, mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
         if (mb_type < 0){
             av_log(s->avctx, AV_LOG_ERROR, "invalid mb type in B Frame at %d %d\n", s->mb_x, s->mb_y);
@@ -580,7 +581,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
         }
     }
 
-    s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type;
+    s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
 
     return 0;
 }
@@ -1198,7 +1199,7 @@ static int mpeg_decode_update_thread_context(AVCodecContext *avctx, const AVCode
     if(!ctx->mpeg_enc_ctx_allocated)
         memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
 
-    if(!(s->pict_type == FF_B_TYPE || s->low_delay))
+    if(!(s->pict_type == AV_PICTURE_TYPE_B || s->low_delay))
         s->picture_number++;
 
     return 0;
@@ -1331,19 +1332,17 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){
             avctx->ticks_per_frame=2;
         //MPEG-2 aspect
             if(s->aspect_ratio_info > 1){
-                AVRational dar=
+                AVRational dar =
                     av_mul_q(
-                        av_div_q(
-                            ff_mpeg2_aspect[s->aspect_ratio_info],
-                            (AVRational){s1->pan_scan.width, s1->pan_scan.height}
-                        ),
+                        av_div_q(ff_mpeg2_aspect[s->aspect_ratio_info],
+                                 (AVRational){s1->pan_scan.width, s1->pan_scan.height}),
                         (AVRational){s->width, s->height});
 
-                //we ignore the spec here and guess a bit as reality does not match the spec, see for example
+                // we ignore the spec here and guess a bit as reality does not match the spec, see for example
                 // res_change_ffmpeg_aspect.ts and sequence-display-aspect.mpg
-                //issue1613, 621, 562
-                if( (s1->pan_scan.width == 0 )||(s1->pan_scan.height == 0)
-                   || (av_cmp_q(dar,(AVRational){4,3})&&av_cmp_q(dar,(AVRational){16,9}))){
+                // issue1613, 621, 562
+                if((s1->pan_scan.width == 0 ) || (s1->pan_scan.height == 0) ||
+                   (av_cmp_q(dar,(AVRational){4,3}) && av_cmp_q(dar,(AVRational){16,9}))) {
                     s->avctx->sample_aspect_ratio=
                         av_div_q(
                          ff_mpeg2_aspect[s->aspect_ratio_info],
@@ -1408,7 +1407,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
         return -1;
 
     vbv_delay= get_bits(&s->gb, 16);
-    if (s->pict_type == FF_P_TYPE || s->pict_type == FF_B_TYPE) {
+    if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) {
         s->full_pel[0] = get_bits1(&s->gb);
         f_code = get_bits(&s->gb, 3);
         if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT)
@@ -1416,7 +1415,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
         s->mpeg_f_code[0][0] = f_code;
         s->mpeg_f_code[0][1] = f_code;
     }
-    if (s->pict_type == FF_B_TYPE) {
+    if (s->pict_type == AV_PICTURE_TYPE_B) {
         s->full_pel[1] = get_bits1(&s->gb);
         f_code = get_bits(&s->gb, 3);
         if (f_code == 0 && avctx->error_recognition >= FF_ER_COMPLIANT)
@@ -1424,8 +1423,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
         s->mpeg_f_code[1][0] = f_code;
         s->mpeg_f_code[1][1] = f_code;
     }
-    s->current_picture.pict_type= s->pict_type;
-    s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+    s->current_picture.f.pict_type = s->pict_type;
+    s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
 
     if(avctx->debug & FF_DEBUG_PICT_INFO)
         av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
@@ -1573,13 +1572,13 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
         av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code, guessing missing values\n");
         if(s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1]==15){
             if(s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
-                s->pict_type= FF_I_TYPE;
+                s->pict_type= AV_PICTURE_TYPE_I;
             else
-                s->pict_type= FF_P_TYPE;
+                s->pict_type= AV_PICTURE_TYPE_P;
         }else
-            s->pict_type= FF_B_TYPE;
-        s->current_picture.pict_type= s->pict_type;
-        s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
+            s->pict_type= AV_PICTURE_TYPE_B;
+        s->current_picture.f.pict_type = s->pict_type;
+        s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
     }
     s->intra_dc_precision = get_bits(&s->gb, 2);
     s->picture_structure = get_bits(&s->gb, 2);
@@ -1656,21 +1655,21 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
         ff_er_frame_start(s);
 
         /* first check if we must repeat the frame */
-        s->current_picture_ptr->repeat_pict = 0;
+        s->current_picture_ptr->f.repeat_pict = 0;
         if (s->repeat_first_field) {
             if (s->progressive_sequence) {
                 if (s->top_field_first)
-                    s->current_picture_ptr->repeat_pict = 4;
+                    s->current_picture_ptr->f.repeat_pict = 4;
                 else
-                    s->current_picture_ptr->repeat_pict = 2;
+                    s->current_picture_ptr->f.repeat_pict = 2;
             } else if (s->progressive_frame) {
-                s->current_picture_ptr->repeat_pict = 1;
+                s->current_picture_ptr->f.repeat_pict = 1;
             }
         }
 
-        *s->current_picture_ptr->pan_scan= s1->pan_scan;
+        *s->current_picture_ptr->f.pan_scan = s1->pan_scan;
 
-        if (HAVE_PTHREADS && avctx->active_thread_type&FF_THREAD_FRAME)
+        if (HAVE_PTHREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
             ff_thread_finish_setup(avctx);
     }else{ //second field
             int i;
@@ -1681,9 +1680,9 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
             }
 
             for(i=0; i<4; i++){
-                s->current_picture.data[i] = s->current_picture_ptr->data[i];
+                s->current_picture.f.data[i] = s->current_picture_ptr->f.data[i];
                 if(s->picture_structure == PICT_BOTTOM_FIELD){
-                    s->current_picture.data[i] += s->current_picture_ptr->linesize[i];
+                    s->current_picture.f.data[i] += s->current_picture_ptr->f.linesize[i];
                 }
             }
     }
@@ -1790,7 +1789,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
         if(s->avctx->debug&FF_DEBUG_PICT_INFO){
              av_log(s->avctx, AV_LOG_DEBUG, "qp:%d fc:%2d%2d%2d%2d %s %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
                  s->qscale, s->mpeg_f_code[0][0],s->mpeg_f_code[0][1],s->mpeg_f_code[1][0],s->mpeg_f_code[1][1],
-                 s->pict_type == FF_I_TYPE ? "I" : (s->pict_type == FF_P_TYPE ? "P" : (s->pict_type == FF_B_TYPE ? "B" : "S")),
+                 s->pict_type == AV_PICTURE_TYPE_I ? "I" : (s->pict_type == AV_PICTURE_TYPE_P ? "P" : (s->pict_type == AV_PICTURE_TYPE_B ? "B" : "S")),
                  s->progressive_sequence ? "ps" :"", s->progressive_frame ? "pf" : "", s->alternate_scan ? "alt" :"", s->top_field_first ? "top" :"",
                  s->intra_dc_precision, s->picture_structure, s->frame_pred_frame_dct, s->concealment_motion_vectors,
                  s->q_scale_type, s->intra_vlc_format, s->repeat_first_field, s->chroma_420_type ? "420" :"");
@@ -1805,7 +1804,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
         if(mpeg_decode_mb(s, s->block) < 0)
             return -1;
 
-        if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs
+        if (s->current_picture.f.motion_val[0] && !s->encoding) { //note motion_val is normally NULL unless we want to extract the MVs
             const int wrap = s->b8_stride;
             int xy = s->mb_x*2 + s->mb_y*2*wrap;
             int b8_xy= 4*(s->mb_x + s->mb_y*s->mb_stride);
@@ -1813,7 +1812,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
 
             for(i=0; i<2; i++){
                 for(dir=0; dir<2; dir++){
-                    if (s->mb_intra || (dir==1 && s->pict_type != FF_B_TYPE)) {
+                    if (s->mb_intra || (dir==1 && s->pict_type != AV_PICTURE_TYPE_B)) {
                         motion_x = motion_y = 0;
                     }else if (s->mv_type == MV_TYPE_16X16 || (s->mv_type == MV_TYPE_FIELD && field_pic)){
                         motion_x = s->mv[dir][0][0];
@@ -1823,12 +1822,12 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
                         motion_y = s->mv[dir][i][1];
                     }
 
-                    s->current_picture.motion_val[dir][xy    ][0] = motion_x;
-                    s->current_picture.motion_val[dir][xy    ][1] = motion_y;
-                    s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
-                    s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
-                    s->current_picture.ref_index [dir][b8_xy    ]=
-                    s->current_picture.ref_index [dir][b8_xy + 1]= s->field_select[dir][i];
+                    s->current_picture.f.motion_val[dir][xy    ][0] = motion_x;
+                    s->current_picture.f.motion_val[dir][xy    ][1] = motion_y;
+                    s->current_picture.f.motion_val[dir][xy + 1][0] = motion_x;
+                    s->current_picture.f.motion_val[dir][xy + 1][1] = motion_y;
+                    s->current_picture.f.ref_index [dir][b8_xy    ] =
+                    s->current_picture.f.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
                     assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1);
                 }
                 xy += wrap;
@@ -1853,7 +1852,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
 
             if(s->mb_y >= s->mb_height){
                 int left= get_bits_left(&s->gb);
-                int is_d10= s->chroma_format==2 && s->pict_type==FF_I_TYPE && avctx->profile==0 && avctx->level==5
+                int is_d10= s->chroma_format==2 && s->pict_type==AV_PICTURE_TYPE_I && avctx->profile==0 && avctx->level==5
                             && s->intra_dc_precision == 2 && s->q_scale_type == 1 && s->alternate_scan == 0
                             && s->progressive_frame == 0 /* vbv_delay == 0xBBB || 0xE10*/;
 
@@ -1896,7 +1895,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
             }
             if(s->mb_skip_run){
                 int i;
-                if(s->pict_type == FF_I_TYPE){
+                if(s->pict_type == AV_PICTURE_TYPE_I){
                     av_log(s->avctx, AV_LOG_ERROR, "skipped MB in I frame at %d %d\n", s->mb_x, s->mb_y);
                     return -1;
                 }
@@ -1909,7 +1908,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
                     s->mv_type = MV_TYPE_16X16;
                 else
                     s->mv_type = MV_TYPE_FIELD;
-                if (s->pict_type == FF_P_TYPE) {
+                if (s->pict_type == AV_PICTURE_TYPE_P) {
                     /* if P type, zero motion vector is implied */
                     s->mv_dir = MV_DIR_FORWARD;
                     s->mv[0][0][0] = s->mv[0][0][1] = 0;
@@ -1966,8 +1965,6 @@ static int slice_decode_thread(AVCodecContext *c, void *arg){
         if(mb_y < 0 || mb_y >= s->end_mb_y)
             return -1;
     }
-
-    return 0; //not reached
 }
 
 /**
@@ -1994,17 +1991,17 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
     if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) {
         /* end of image */
 
-        s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2;
+        s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_MPEG2;
 
         ff_er_frame_end(s);
 
         MPV_frame_end(s);
 
-        if (s->pict_type == FF_B_TYPE || s->low_delay) {
+        if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
             *pict= *(AVFrame*)s->current_picture_ptr;
             ff_print_debug_info(s, pict);
         } else {
-            if (avctx->active_thread_type&FF_THREAD_FRAME)
+            if (avctx->active_thread_type & FF_THREAD_FRAME)
                 s->picture_number++;
             /* latency of 1 frame for I- and P-frames */
             /* XXX: use another variable than picture_number */
@@ -2179,14 +2176,13 @@ static void mpeg_decode_gop(AVCodecContext *avctx,
     Mpeg1Context *s1 = avctx->priv_data;
     MpegEncContext *s = &s1->mpeg_enc_ctx;
 
-    int drop_frame_flag;
     int time_code_hours, time_code_minutes;
     int time_code_seconds, time_code_pictures;
     int broken_link;
 
     init_get_bits(&s->gb, buf, buf_size*8);
 
-    drop_frame_flag = get_bits1(&s->gb);
+    skip_bits1(&s->gb); /* drop_frame_flag */
 
     time_code_hours=get_bits(&s->gb,5);
     time_code_minutes = get_bits(&s->gb,6);
@@ -2339,9 +2335,10 @@ static int decode_chunks(AVCodecContext *avctx,
         uint32_t start_code = -1;
         buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code);
         if (start_code > 0x1ff){
-            if(s2->pict_type != FF_B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){
-                if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_SLICE){
+            if(s2->pict_type != AV_PICTURE_TYPE_B || avctx->skip_frame <= AVDISCARD_DEFAULT){
+                if(HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)){
                     int i;
+                    av_assert0(avctx->thread_count > 1);
 
                     avctx->execute(avctx, slice_decode_thread,  &s2->thread_context[0], NULL, s->slice_count, sizeof(void*));
                     for(i=0; i<s->slice_count; i++)
@@ -2465,19 +2462,19 @@ static int decode_chunks(AVCodecContext *avctx,
 
                 if(s2->last_picture_ptr==NULL){
                 /* Skip B-frames if we do not have reference frames and gop is not closed */
-                    if(s2->pict_type==FF_B_TYPE){
+                    if(s2->pict_type==AV_PICTURE_TYPE_B){
                         if(!s2->closed_gop)
                             break;
                     }
                 }
-                if(s2->pict_type==FF_I_TYPE)
+                if(s2->pict_type==AV_PICTURE_TYPE_I)
                     s->sync=1;
                 if(s2->next_picture_ptr==NULL){
                 /* Skip P-frames if we do not have a reference frame or we have an invalid header. */
-                    if(s2->pict_type==FF_P_TYPE && !s->sync) break;
+                    if(s2->pict_type==AV_PICTURE_TYPE_P && !s->sync) break;
                 }
-                if(  (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==FF_B_TYPE)
-                    ||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=FF_I_TYPE)
+                if(  (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==AV_PICTURE_TYPE_B)
+                    ||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=AV_PICTURE_TYPE_I)
                     || avctx->skip_frame >= AVDISCARD_ALL)
                     break;
 
@@ -2508,8 +2505,9 @@ static int decode_chunks(AVCodecContext *avctx,
                     break;
                 }
 
-                if(HAVE_THREADS && avctx->active_thread_type&FF_THREAD_SLICE){
+                if(HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE)){
                     int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count;
+                    av_assert0(avctx->thread_count > 1);
                     if(threshold <= mb_y){
                         MpegEncContext *thread_context= s2->thread_context[s->slice_count];
 
@@ -2579,7 +2577,7 @@ AVCodec ff_mpeg1video_decoder = {
     NULL,
     mpeg_decode_end,
     mpeg_decode_frame,
-    CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
+    CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
     .flush= flush,
     .max_lowres= 3,
     .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),