]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/h263.c
avcodec: Drop long-deprecated imgconvert.h header
[ffmpeg] / libavcodec / h263.c
index 5c25df22721e833210ab673b108fc5caa73a24e2..7f1966f8bf19beac6ef6ce86fcbf289b3fcbed58 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * H263/MPEG4 backend for ffmpeg encoder and decoder
+ * H263/MPEG4 backend for encoder and decoder
  * Copyright (c) 2000,2001 Fabrice Bellard
  * H263+ support.
  * Copyright (c) 2001 Juan J. Sierralta P
@@ -52,7 +52,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
     const int wrap = s->b8_stride;
     const int xy = s->block_index[0];
 
-    s->current_picture.mbskip_table[mb_xy]= s->mb_skipped;
+    s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped;
 
     if(s->mv_type != MV_TYPE_8X8){
         int motion_x, motion_y;
@@ -71,34 +71,34 @@ void ff_h263_update_motion_val(MpegEncContext * s){
                 s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
                 s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
             }
-            s->current_picture.ref_index[0][4*mb_xy    ]=
-            s->current_picture.ref_index[0][4*mb_xy + 1]= s->field_select[0][0];
-            s->current_picture.ref_index[0][4*mb_xy + 2]=
-            s->current_picture.ref_index[0][4*mb_xy + 3]= s->field_select[0][1];
+            s->current_picture.f.ref_index[0][4*mb_xy    ] =
+            s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
+            s->current_picture.f.ref_index[0][4*mb_xy + 2] =
+            s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
         }
 
         /* no update if 8X8 because it has been done during parsing */
-        s->current_picture.motion_val[0][xy][0] = motion_x;
-        s->current_picture.motion_val[0][xy][1] = motion_y;
-        s->current_picture.motion_val[0][xy + 1][0] = motion_x;
-        s->current_picture.motion_val[0][xy + 1][1] = motion_y;
-        s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
-        s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
-        s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
-        s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
+        s->current_picture.f.motion_val[0][xy][0]            = motion_x;
+        s->current_picture.f.motion_val[0][xy][1]            = motion_y;
+        s->current_picture.f.motion_val[0][xy + 1][0]        = motion_x;
+        s->current_picture.f.motion_val[0][xy + 1][1]        = motion_y;
+        s->current_picture.f.motion_val[0][xy + wrap][0]     = motion_x;
+        s->current_picture.f.motion_val[0][xy + wrap][1]     = motion_y;
+        s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x;
+        s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y;
     }
 
     if(s->encoding){ //FIXME encoding MUST be cleaned up
         if (s->mv_type == MV_TYPE_8X8)
-            s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
+            s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
         else if(s->mb_intra)
-            s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA;
+            s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA;
         else
-            s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
+            s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
     }
 }
 
-int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
+int ff_h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
 {
     int x, y, wrap, a, c, pred_dc;
     int16_t *dc_val;
@@ -154,7 +154,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
        Diag Top
        Left Center
     */
-    if(!IS_SKIP(s->current_picture.mb_type[xy])){
+    if (!IS_SKIP(s->current_picture.f.mb_type[xy])) {
         qp_c= s->qscale;
         s->dsp.h263_v_loop_filter(dest_y+8*linesize  , linesize, qp_c);
         s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
@@ -164,10 +164,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
     if(s->mb_y){
         int qp_dt, qp_tt, qp_tc;
 
-        if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride]))
+        if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride]))
             qp_tt=0;
         else
-            qp_tt= s->current_picture.qscale_table[xy-s->mb_stride];
+            qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride];
 
         if(qp_c)
             qp_tc= qp_c;
@@ -187,10 +187,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
             s->dsp.h263_h_loop_filter(dest_y-8*linesize+8  ,   linesize, qp_tt);
 
         if(s->mb_x){
-            if(qp_tt || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride]))
+            if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride]))
                 qp_dt= qp_tt;
             else
-                qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride];
+                qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride];
 
             if(qp_dt){
                 const int chroma_qp= s->chroma_qscale_table[qp_dt];
@@ -209,10 +209,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
 
     if(s->mb_x){
         int qp_lc;
-        if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1]))
+        if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1]))
             qp_lc= qp_c;
         else
-            qp_lc= s->current_picture.qscale_table[xy-1];
+            qp_lc = s->current_picture.f.qscale_table[xy - 1];
 
         if(qp_lc){
             s->dsp.h263_h_loop_filter(dest_y,   linesize, qp_lc);
@@ -226,7 +226,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
     }
 }
 
-void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
+void ff_h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
 {
     int x, y, wrap, a, c, pred_dc, scale, i;
     int16_t *dc_val, *ac_val, *ac_val1;
@@ -313,15 +313,15 @@ void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
         ac_val1[8 + i] = block[s->dsp.idct_permutation[i   ]];
 }
 
-int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
-                        int *px, int *py)
+int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
+                             int *px, int *py)
 {
     int wrap;
     int16_t *A, *B, *C, (*mot_val)[2];
     static const int off[4]= {2, 1, 1, -1};
 
     wrap = s->b8_stride;
-    mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
+    mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block];
 
     A = mot_val[ - 1];
     /* special case for first (slice) line */