]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/mpegvideo.c
Windows Media Audio Lossless decoder
[ffmpeg] / libavcodec / mpegvideo.c
index e4c45886dcb9d770afa3f4feb9915df09f447422..8be596b39e1873c66df61ac85a40be38eb12b243 100644 (file)
@@ -176,7 +176,7 @@ const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
 /* init common dct for both encoder and decoder */
 av_cold int ff_dct_common_init(MpegEncContext *s)
 {
-    dsputil_init(&s->dsp, s->avctx);
+    ff_dsputil_init(&s->dsp, s->avctx);
 
     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
@@ -188,19 +188,17 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
 
 #if HAVE_MMX
-    MPV_common_init_mmx(s);
+    ff_MPV_common_init_mmx(s);
 #elif ARCH_ALPHA
-    MPV_common_init_axp(s);
-#elif CONFIG_MLIB
-    MPV_common_init_mlib(s);
+    ff_MPV_common_init_axp(s);
 #elif HAVE_MMI
-    MPV_common_init_mmi(s);
+    ff_MPV_common_init_mmi(s);
 #elif ARCH_ARM
-    MPV_common_init_arm(s);
+    ff_MPV_common_init_arm(s);
 #elif HAVE_ALTIVEC
-    MPV_common_init_altivec(s);
+    ff_MPV_common_init_altivec(s);
 #elif ARCH_BFIN
-    MPV_common_init_bfin(s);
+    ff_MPV_common_init_bfin(s);
 #endif
 
     /* load & permutate scantables
@@ -234,9 +232,9 @@ static void free_frame_buffer(MpegEncContext *s, Picture *pic)
      * dimensions; ignore user defined callbacks for these
      */
     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
-        ff_thread_release_buffer(s->avctx, (AVFrame *) pic);
+        ff_thread_release_buffer(s->avctx, &pic->f);
     else
-        avcodec_default_release_buffer(s->avctx, (AVFrame *) pic);
+        avcodec_default_release_buffer(s->avctx, &pic->f);
     av_freep(&pic->f.hwaccel_picture_private);
 }
 
@@ -259,9 +257,9 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
     }
 
     if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
-        r = ff_thread_get_buffer(s->avctx, (AVFrame *) pic);
+        r = ff_thread_get_buffer(s->avctx, &pic->f);
     else
-        r = avcodec_default_get_buffer(s->avctx, (AVFrame *) pic);
+        r = avcodec_default_get_buffer(s->avctx, &pic->f);
 
     if (r < 0 || !pic->f.type || !pic->f.data[0]) {
         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
@@ -460,7 +458,7 @@ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
 
     return 0;
 fail:
-    return -1; // free() through MPV_common_end()
+    return -1; // free() through ff_MPV_common_end()
 }
 
 static void free_duplicate_context(MpegEncContext *s)
@@ -545,7 +543,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
         s->bitstream_buffer      = NULL;
         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
 
-        MPV_common_init(s);
+        ff_MPV_common_init(s);
     }
 
     s->avctx->coded_height  = s1->avctx->coded_height;
@@ -617,7 +615,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
  * The changed fields will not depend upon the
  * prior state of the MpegEncContext.
  */
-void MPV_common_defaults(MpegEncContext *s)
+void ff_MPV_common_defaults(MpegEncContext *s)
 {
     s->y_dc_scale_table      =
     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
@@ -646,16 +644,16 @@ void MPV_common_defaults(MpegEncContext *s)
  * the changed fields will not depend upon
  * the prior state of the MpegEncContext.
  */
-void MPV_decode_defaults(MpegEncContext *s)
+void ff_MPV_decode_defaults(MpegEncContext *s)
 {
-    MPV_common_defaults(s);
+    ff_MPV_common_defaults(s);
 }
 
 /**
  * init common structure for both encoder and decoder.
  * this assumes that some variables like width/height are already set
  */
-av_cold int MPV_common_init(MpegEncContext *s)
+av_cold int ff_MPV_common_init(MpegEncContext *s)
 {
     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
     int nb_slices = (HAVE_THREADS &&
@@ -731,7 +729,7 @@ av_cold int MPV_common_init(MpegEncContext *s)
 
         s->stream_codec_tag   = avpriv_toupper4(s->avctx->stream_codec_tag);
 
-        s->avctx->coded_frame = (AVFrame *)&s->current_picture;
+        s->avctx->coded_frame = &s->current_picture.f;
 
         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
                           fail); // error ressilience code looks cleaner with this
@@ -807,7 +805,7 @@ av_cold int MPV_common_init(MpegEncContext *s)
     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
                       s->picture_count * sizeof(Picture), fail);
     for (i = 0; i < s->picture_count; i++) {
-        avcodec_get_frame_defaults((AVFrame *) &s->picture[i]);
+        avcodec_get_frame_defaults(&s->picture[i].f);
     }
 
     if (s->width && s->height) {
@@ -915,12 +913,12 @@ av_cold int MPV_common_init(MpegEncContext *s)
 
     return 0;
  fail:
-    MPV_common_end(s);
+    ff_MPV_common_end(s);
     return -1;
 }
 
 /* init common structure for both encoder and decoder */
-void MPV_common_end(MpegEncContext *s)
+void ff_MPV_common_end(MpegEncContext *s)
 {
     int i, j, k;
 
@@ -1005,8 +1003,8 @@ void MPV_common_end(MpegEncContext *s)
         avcodec_default_free_buffers(s->avctx);
 }
 
-void init_rl(RLTable *rl,
-             uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
+void ff_init_rl(RLTable *rl,
+                uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
 {
     int8_t  max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
     uint8_t index_run[MAX_RUN + 1];
@@ -1057,7 +1055,7 @@ void init_rl(RLTable *rl,
     }
 }
 
-void init_vlc_rl(RLTable *rl)
+void ff_init_vlc_rl(RLTable *rl)
 {
     int i, q;
 
@@ -1160,7 +1158,7 @@ static void update_noise_reduction(MpegEncContext *s)
  * generic function for encode/decode called after coding/decoding
  * the header and before a frame is coded/decoded.
  */
-int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
+int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
 {
     int i;
     Picture *pic;
@@ -1170,25 +1168,26 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
            s->codec_id == CODEC_ID_SVQ3);
 
     /* mark & release old frames */
-    if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
-        s->last_picture_ptr != s->next_picture_ptr &&
-        s->last_picture_ptr->f.data[0]) {
-        if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
+    if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
+        if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
+            s->last_picture_ptr != s->next_picture_ptr &&
+            s->last_picture_ptr->f.data[0]) {
             if (s->last_picture_ptr->owner2 == s)
                 free_frame_buffer(s, s->last_picture_ptr);
+        }
 
-            /* release forgotten pictures */
-            /* if (mpeg124/h263) */
-            if (!s->encoding) {
-                for (i = 0; i < s->picture_count; i++) {
-                    if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
-                        &s->picture[i] != s->next_picture_ptr &&
-                        s->picture[i].f.reference) {
-                        if (!(avctx->active_thread_type & FF_THREAD_FRAME))
-                            av_log(avctx, AV_LOG_ERROR,
-                                   "releasing zombie picture\n");
-                        free_frame_buffer(s, &s->picture[i]);
-                    }
+        /* release forgotten pictures */
+        /* if (mpeg124/h263) */
+        if (!s->encoding) {
+            for (i = 0; i < s->picture_count; i++) {
+                if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
+                    &s->picture[i] != s->last_picture_ptr &&
+                    &s->picture[i] != s->next_picture_ptr &&
+                    s->picture[i].f.reference) {
+                    if (!(avctx->active_thread_type & FF_THREAD_FRAME))
+                        av_log(avctx, AV_LOG_ERROR,
+                               "releasing zombie picture\n");
+                    free_frame_buffer(s, &s->picture[i]);
                 }
             }
         }
@@ -1270,10 +1269,8 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
             s->last_picture_ptr = &s->picture[i];
             if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
                 return -1;
-            ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
-                                      INT_MAX, 0);
-            ff_thread_report_progress((AVFrame *) s->last_picture_ptr,
-                                      INT_MAX, 1);
+            ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
+            ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
         }
         if ((s->next_picture_ptr == NULL ||
              s->next_picture_ptr->f.data[0] == NULL) &&
@@ -1283,10 +1280,8 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
             s->next_picture_ptr = &s->picture[i];
             if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
                 return -1;
-            ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
-                                      INT_MAX, 0);
-            ff_thread_report_progress((AVFrame *) s->next_picture_ptr,
-                                      INT_MAX, 1);
+            ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
+            ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
         }
     }
 
@@ -1295,6 +1290,14 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
     if (s->next_picture_ptr)
         ff_copy_picture(&s->next_picture, s->next_picture_ptr);
 
+    if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
+        (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
+        if (s->next_picture_ptr)
+            s->next_picture_ptr->owner2 = s;
+        if (s->last_picture_ptr)
+            s->last_picture_ptr->owner2 = s;
+    }
+
     assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
                                                  s->last_picture_ptr->f.data[0]));
 
@@ -1340,7 +1343,7 @@ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
 
 /* generic function for encode/decode called after a
  * frame has been coded/decoded. */
-void MPV_frame_end(MpegEncContext *s)
+void ff_MPV_frame_end(MpegEncContext *s)
 {
     int i;
     /* redraw edges for the frame if decoding didn't complete */
@@ -1403,10 +1406,10 @@ void MPV_frame_end(MpegEncContext *s)
     memset(&s->next_picture,    0, sizeof(Picture));
     memset(&s->current_picture, 0, sizeof(Picture));
 #endif
-    s->avctx->coded_frame = (AVFrame *) s->current_picture_ptr;
+    s->avctx->coded_frame = &s->current_picture_ptr->f;
 
     if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
-        ff_thread_report_progress((AVFrame *) s->current_picture_ptr,
+        ff_thread_report_progress(&s->current_picture_ptr->f,
                                   s->mb_height - 1, 0);
     }
 }
@@ -1834,8 +1837,8 @@ static inline int hpel_motion_lowres(MpegEncContext *s,
 
     src   += src_y * stride + src_x;
 
-    if ((unsigned)src_x >  h_edge_pos - (!!sx) - w ||
-        (unsigned)src_y > (v_edge_pos >> field_based) - (!!sy) - h) {
+    if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w,                 0) ||
+        (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
         s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
                                 (h + 1) << field_based, src_x,
                                 src_y   << field_based,
@@ -1919,8 +1922,8 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
 
-    if ((unsigned) src_x >  h_edge_pos - (!!sx) - 2 * block_s ||
-        (unsigned) src_y > (v_edge_pos >> field_based) - (!!sy) - h) {
+    if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s,       0) ||
+        (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
                                 s->linesize, 17, 17 + field_based,
                                 src_x, src_y << field_based, h_edge_pos,
@@ -2002,8 +2005,8 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
     offset = src_y * s->uvlinesize + src_x;
     ptr = ref_picture[1] + offset;
     if (s->flags & CODEC_FLAG_EMU_EDGE) {
-        if ((unsigned) src_x > h_edge_pos - (!!sx) - block_s ||
-            (unsigned) src_y > v_edge_pos - (!!sy) - block_s) {
+        if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
+            (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
                                     9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
             ptr = s->edge_emu_buffer;
@@ -2168,7 +2171,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s,
 /**
  * find the lowest MB row referenced in the MVs
  */
-int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
+int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
 {
     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
     int my, off, i, mvs;
@@ -2358,10 +2361,14 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
 
                 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
                     if (s->mv_dir & MV_DIR_FORWARD) {
-                        ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
+                        ff_thread_await_progress(&s->last_picture_ptr->f,
+                                                 ff_MPV_lowest_referenced_row(s, 0),
+                                                 0);
                     }
                     if (s->mv_dir & MV_DIR_BACKWARD) {
-                        ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
+                        ff_thread_await_progress(&s->next_picture_ptr->f,
+                                                 ff_MPV_lowest_referenced_row(s, 1),
+                                                 0);
                     }
                 }
 
@@ -2512,7 +2519,7 @@ skip_idct:
     }
 }
 
-void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
+void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
 #if !CONFIG_SMALL
     if(s->out_format == FMT_MPEG1) {
         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
@@ -2568,9 +2575,9 @@ void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
         int i;
 
         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
-            src= (AVFrame*)s->current_picture_ptr;
+            src = &s->current_picture_ptr->f;
         else if(s->last_picture_ptr)
-            src= (AVFrame*)s->last_picture_ptr;
+            src = &s->last_picture_ptr->f;
         else
             return;
 
@@ -2886,8 +2893,8 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
 }
 
-void MPV_report_decode_progress(MpegEncContext *s)
+void ff_MPV_report_decode_progress(MpegEncContext *s)
 {
     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
-        ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
+        ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
 }