]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/mpegvideo_enc.c
avcodec/ituh263enc: Make static initializations thread-safe
[ffmpeg] / libavcodec / mpegvideo_enc.c
index 34dcf8c313c05eba9fd160cb4b5b03f1a6d6eca4..0d86c0a2cdf7df5a7acd8379264415f3b0431fd9 100644 (file)
@@ -67,7 +67,6 @@
 #include "wmv2.h"
 #include "rv10.h"
 #include "packet_internal.h"
-#include "libxvid.h"
 #include <limits.h>
 #include "sp5x.h"
 
@@ -87,6 +86,12 @@ static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
 
 const AVOption ff_mpv_generic_options[] = {
     FF_MPV_COMMON_OPTS
+#if FF_API_MPEGVIDEO_OPTS
+    FF_MPV_DEPRECATED_MPEG_QUANT_OPT
+    FF_MPV_DEPRECATED_A53_CC_OPT
+    FF_MPV_DEPRECATED_MATRIX_OPT
+    FF_MPV_DEPRECATED_BFRAME_OPTS
+#endif
     { NULL },
 };
 
@@ -298,59 +303,10 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
 {
     MpegEncContext *s = avctx->priv_data;
     AVCPBProperties *cpb_props;
-    int i, ret, format_supported;
+    int i, ret;
 
     mpv_encode_defaults(s);
 
-    switch (avctx->codec_id) {
-    case AV_CODEC_ID_MPEG2VIDEO:
-        if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
-            avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
-            av_log(avctx, AV_LOG_ERROR,
-                   "only YUV420 and YUV422 are supported\n");
-            return AVERROR(EINVAL);
-        }
-        break;
-    case AV_CODEC_ID_MJPEG:
-    case AV_CODEC_ID_AMV:
-        format_supported = 0;
-        /* JPEG color space */
-        if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
-            avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
-            avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
-            (avctx->color_range == AVCOL_RANGE_JPEG &&
-             (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
-              avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
-              avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
-            format_supported = 1;
-        /* MPEG color space */
-        else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
-                 (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
-                  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
-                  avctx->pix_fmt == AV_PIX_FMT_YUV444P))
-            format_supported = 1;
-
-        if (!format_supported) {
-            av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
-            return AVERROR(EINVAL);
-        }
-        break;
-    case AV_CODEC_ID_SPEEDHQ:
-        if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
-            avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
-            avctx->pix_fmt != AV_PIX_FMT_YUV444P) {
-            av_log(avctx, AV_LOG_ERROR,
-                   "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
-            return AVERROR(EINVAL);
-        }
-        break;
-    default:
-        if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
-            av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
-            return AVERROR(EINVAL);
-        }
-    }
-
     switch (avctx->pix_fmt) {
     case AV_PIX_FMT_YUVJ444P:
     case AV_PIX_FMT_YUV444P:
@@ -577,24 +533,11 @@ FF_ENABLE_DEPRECATION_WARNINGS
         return AVERROR(EINVAL);
     }
     if ((s->codec_id == AV_CODEC_ID_H263  ||
-         s->codec_id == AV_CODEC_ID_H263P) &&
+         s->codec_id == AV_CODEC_ID_H263P ||
+         s->codec_id == AV_CODEC_ID_RV20) &&
         ((avctx->width &3) ||
          (avctx->height&3) )) {
-        av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
-        return AVERROR(EINVAL);
-    }
-
-    if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
-        (avctx->width  > 4095 ||
-         avctx->height > 4095 )) {
-        av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
-        return AVERROR(EINVAL);
-    }
-
-    if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
-        (avctx->width  > 16383 ||
-         avctx->height > 16383 )) {
-        av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
+        av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
         return AVERROR(EINVAL);
     }
 
@@ -605,13 +548,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
         return AVERROR(EINVAL);
     }
 
-    if (s->codec_id == AV_CODEC_ID_RV20 &&
-        (avctx->width &3 ||
-         avctx->height&3 )) {
-        av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
-        return AVERROR(EINVAL);
-    }
-
     if ((s->codec_id == AV_CODEC_ID_WMV1 ||
          s->codec_id == AV_CODEC_ID_WMV2) &&
          avctx->width & 1) {
@@ -628,17 +564,16 @@ FF_ENABLE_DEPRECATION_WARNINGS
 #if FF_API_PRIVATE_OPT
     FF_DISABLE_DEPRECATION_WARNINGS
     if (avctx->mpeg_quant)
-        s->mpeg_quant = avctx->mpeg_quant;
+        s->mpeg_quant = 1;
     FF_ENABLE_DEPRECATION_WARNINGS
-#endif
 
-    // FIXME mpeg2 uses that too
     if (s->mpeg_quant && (   s->codec_id != AV_CODEC_ID_MPEG4
                           && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
         av_log(avctx, AV_LOG_ERROR,
                "mpeg2 style quantization not supported by codec\n");
         return AVERROR(EINVAL);
     }
+#endif
 
     if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
         av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
@@ -722,11 +657,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
         return AVERROR_PATCHWELCOME;
     }
 
-    if (!avctx->time_base.den || !avctx->time_base.num) {
-        av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
-        return AVERROR(EINVAL);
-    }
-
 #if FF_API_PRIVATE_OPT
 FF_DISABLE_DEPRECATION_WARNINGS
     if (avctx->b_frame_strategy)
@@ -790,17 +720,17 @@ FF_ENABLE_DEPRECATION_WARNINGS
         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1);
         s->rtp_mode   = 1;
         break;
+#if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
     case AV_CODEC_ID_MJPEG:
     case AV_CODEC_ID_AMV:
         s->out_format = FMT_MJPEG;
         s->intra_only = 1; /* force intra only for jpeg */
-        if (!CONFIG_MJPEG_ENCODER)
-            return AVERROR_ENCODER_NOT_FOUND;
         if ((ret = ff_mjpeg_encode_init(s)) < 0)
             return ret;
         avctx->delay = 0;
         s->low_delay = 1;
         break;
+#endif
     case AV_CODEC_ID_SPEEDHQ:
         s->out_format = FMT_SPEEDHQ;
         s->intra_only = 1; /* force intra only for SHQ */
@@ -1009,8 +939,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
     if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
         ff_h263_encode_init(s);
     if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
-        if ((ret = ff_msmpeg4_encode_init(s)) < 0)
-            return ret;
+        ff_msmpeg4_encode_init(s);
     if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
         && s->out_format == FMT_MPEG1)
         ff_mpeg1_encode_init(s);
@@ -1099,7 +1028,7 @@ av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
     ff_rate_control_uninit(s);
 
     ff_mpv_common_end(s);
-    if (CONFIG_MJPEG_ENCODER &&
+    if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
         s->out_format == FMT_MJPEG)
         ff_mjpeg_encode_close(s);
 
@@ -1366,23 +1295,20 @@ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
     return 0;
 }
 
-static int encode_frame(AVCodecContext *c, AVFrame *frame)
+static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
 {
-    AVPacket pkt = { 0 };
     int ret;
     int size = 0;
 
-    av_init_packet(&pkt);
-
     ret = avcodec_send_frame(c, frame);
     if (ret < 0)
         return ret;
 
     do {
-        ret = avcodec_receive_packet(c, &pkt);
+        ret = avcodec_receive_packet(c, pkt);
         if (ret >= 0) {
-            size += pkt.size;
-            av_packet_unref(&pkt);
+            size += pkt->size;
+            av_packet_unref(pkt);
         } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
             return ret;
     } while (ret >= 0);
@@ -1393,6 +1319,7 @@ static int encode_frame(AVCodecContext *c, AVFrame *frame)
 static int estimate_best_b_count(MpegEncContext *s)
 {
     const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
+    AVPacket *pkt;
     const int scale = s->brd_scale;
     int width  = s->width  >> scale;
     int height = s->height >> scale;
@@ -1403,6 +1330,10 @@ static int estimate_best_b_count(MpegEncContext *s)
 
     av_assert0(scale >= 0 && scale <= 3);
 
+    pkt = av_packet_alloc();
+    if (!pkt)
+        return AVERROR(ENOMEM);
+
     //emms_c();
     //s->next_picture_ptr->quality;
     p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
@@ -1454,8 +1385,10 @@ static int estimate_best_b_count(MpegEncContext *s)
             break;
 
         c = avcodec_alloc_context3(NULL);
-        if (!c)
-            return AVERROR(ENOMEM);
+        if (!c) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
 
         c->width        = width;
         c->height       = height;
@@ -1473,10 +1406,11 @@ static int estimate_best_b_count(MpegEncContext *s)
         if (ret < 0)
             goto fail;
 
+
         s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
         s->tmp_frames[0]->quality   = 1 * FF_QP2LAMBDA;
 
-        out_size = encode_frame(c, s->tmp_frames[0]);
+        out_size = encode_frame(c, s->tmp_frames[0], pkt);
         if (out_size < 0) {
             ret = out_size;
             goto fail;
@@ -1491,7 +1425,7 @@ static int estimate_best_b_count(MpegEncContext *s)
                                      AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
             s->tmp_frames[i + 1]->quality   = is_p ? p_lambda : b_lambda;
 
-            out_size = encode_frame(c, s->tmp_frames[i + 1]);
+            out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
             if (out_size < 0) {
                 ret = out_size;
                 goto fail;
@@ -1501,7 +1435,7 @@ static int estimate_best_b_count(MpegEncContext *s)
         }
 
         /* get the delayed frames */
-        out_size = encode_frame(c, NULL);
+        out_size = encode_frame(c, NULL, pkt);
         if (out_size < 0) {
             ret = out_size;
             goto fail;
@@ -1517,10 +1451,15 @@ static int estimate_best_b_count(MpegEncContext *s)
 
 fail:
         avcodec_free_context(&c);
-        if (ret < 0)
-            return ret;
+        av_packet_unref(pkt);
+        if (ret < 0) {
+            best_b_count = ret;
+            break;
+        }
     }
 
+    av_packet_free(&pkt);
+
     return best_b_count;
 }
 
@@ -1687,7 +1626,8 @@ no_output_pic:
             // input is not a shared pix -> reuse buffer for current_pix
             s->current_picture_ptr = s->reordered_input_picture[0];
             for (i = 0; i < 4; i++) {
-                s->new_picture.f->data[i] += INPLACE_OFFSET;
+                if (s->new_picture.f->data[i])
+                    s->new_picture.f->data[i] += INPLACE_OFFSET;
             }
         }
         ff_mpeg_unref_picture(s->avctx, &s->current_picture);
@@ -1917,7 +1857,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
 
         frame_end(s);
 
-        if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
+       if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
             ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
 
         if (avctx->rc_buffer_size) {
@@ -1984,8 +1924,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
         stuffing_count = ff_vbv_update(s, s->frame_bits);
         s->stuffing_bits = 8*stuffing_count;
         if (stuffing_count) {
-            if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
-                    stuffing_count + 50) {
+            if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
                 av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
                 return -1;
             }
@@ -2588,11 +2527,12 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
         if (CONFIG_H263_ENCODER)
             ff_h263_encode_mb(s, s->block, motion_x, motion_y);
         break;
+#if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
     case AV_CODEC_ID_MJPEG:
     case AV_CODEC_ID_AMV:
-        if (CONFIG_MJPEG_ENCODER)
-            ff_mjpeg_encode_mb(s, s->block);
+        ff_mjpeg_encode_mb(s, s->block);
         break;
+#endif
     case AV_CODEC_ID_SPEEDHQ:
         if (CONFIG_SPEEDHQ_ENCODER)
             ff_speedhq_encode_mb(s, s->block);
@@ -2845,7 +2785,8 @@ static void write_slice_end(MpegEncContext *s){
         }
 
         ff_mpeg4_stuffing(&s->pb);
-    }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
+    } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
+               s->out_format == FMT_MJPEG) {
         ff_mjpeg_encode_stuffing(s);
     } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
         ff_speedhq_end_slice(s);
@@ -2881,12 +2822,12 @@ static void update_mb_info(MpegEncContext *s, int startcode)
 {
     if (!s->mb_info)
         return;
-    if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
+    if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
         s->mb_info_size += 12;
         s->prev_mb_info = s->last_mb_info;
     }
     if (startcode) {
-        s->prev_mb_info = put_bits_count(&s->pb)/8;
+        s->prev_mb_info = put_bytes_count(&s->pb, 0);
         /* This might have incremented mb_info_size above, and we return without
          * actually writing any info into that slot yet. But in that case,
          * this will be called again at the start of the after writing the
@@ -2894,7 +2835,7 @@ static void update_mb_info(MpegEncContext *s, int startcode)
         return;
     }
 
-    s->last_mb_info = put_bits_count(&s->pb)/8;
+    s->last_mb_info = put_bytes_count(&s->pb, 0);
     if (!s->mb_info_size)
         s->mb_info_size += 12;
     write_mb_info(s);
@@ -2902,7 +2843,7 @@ static void update_mb_info(MpegEncContext *s, int startcode)
 
 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
 {
-    if (   s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
+    if (put_bytes_left(&s->pb, 0) < threshold
         && s->slice_context_count == 1
         && s->pb.buf == s->avctx->internal->byte_buffer) {
         int lastgob_pos = s->ptr_lastgob - s->pb.buf;
@@ -2931,7 +2872,7 @@ int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t s
         s->ptr_lastgob   = s->pb.buf + lastgob_pos;
         s->vbv_delay_ptr = s->pb.buf + vbv_pos;
     }
-    if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
+    if (put_bytes_left(&s->pb, 0) < threshold)
         return AVERROR(EINVAL);
     return 0;
 }
@@ -3023,13 +2964,13 @@ static int encode_thread(AVCodecContext *c, void *arg){
                                + s->mb_width*MAX_MB_BYTES;
 
             ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
-            if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
+            if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
                 return -1;
             }
             if(s->data_partitioning){
-                if(   s->pb2   .buf_end - s->pb2   .buf - (put_bits_count(&s->    pb2)>>3) < MAX_MB_BYTES
-                   || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
+                if (put_bytes_left(&s->pb2,    0) < MAX_MB_BYTES ||
+                    put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
                     av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
                     return -1;
                 }
@@ -3049,7 +2990,8 @@ static int encode_thread(AVCodecContext *c, void *arg){
             if(s->rtp_mode){
                 int current_packet_size, is_gob_start;
 
-                current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
+                current_packet_size = put_bytes_count(&s->pb, 1)
+                                      - (s->ptr_lastgob - s->pb.buf);
 
                 is_gob_start = s->rtp_payload_size &&
                                current_packet_size >= s->rtp_payload_size &&
@@ -3086,7 +3028,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
                     current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
 
                     if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
-                        int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
+                        int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
                         int d = 100 / s->error_rate;
                         if(r % d == 0){
                             current_packet_size=0;
@@ -3912,11 +3854,14 @@ static int encode_picture(MpegEncContext *s, int picture_number)
     s->mb_x = s->mb_y = 0;
     s->last_bits= put_bits_count(&s->pb);
     switch(s->out_format) {
+#if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
     case FMT_MJPEG:
-        if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
+        /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
+        if (!CONFIG_MJPEG_ENCODER || s->huffman != HUFFMAN_TABLE_OPTIMAL)
             ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
                                            s->pred, s->intra_matrix, s->chroma_intra_matrix);
         break;
+#endif
     case FMT_SPEEDHQ:
         if (CONFIG_SPEEDHQ_ENCODER)
             ff_speedhq_encode_picture_header(s);
@@ -4768,6 +4713,12 @@ static const AVOption h263_options[] = {
     { "obmc",         "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
     { "mb_info",      "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
     FF_MPV_COMMON_OPTS
+#if FF_API_MPEGVIDEO_OPTS
+    FF_MPV_DEPRECATED_MPEG_QUANT_OPT
+    FF_MPV_DEPRECATED_A53_CC_OPT
+    FF_MPV_DEPRECATED_MATRIX_OPT
+    FF_MPV_DEPRECATED_BFRAME_OPTS
+#endif
     { NULL },
 };
 
@@ -4787,7 +4738,7 @@ AVCodec ff_h263_encoder = {
     .init           = ff_mpv_encode_init,
     .encode2        = ff_mpv_encode_picture,
     .close          = ff_mpv_encode_end,
-    .caps_internal  = FF_CODEC_CAP_INIT_CLEANUP,
+    .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
     .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
     .priv_class     = &h263_class,
 };
@@ -4798,6 +4749,12 @@ static const AVOption h263p_options[] = {
     { "obmc",       "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
     { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
     FF_MPV_COMMON_OPTS
+#if FF_API_MPEGVIDEO_OPTS
+    FF_MPV_DEPRECATED_MPEG_QUANT_OPT
+    FF_MPV_DEPRECATED_A53_CC_OPT
+    FF_MPV_DEPRECATED_MATRIX_OPT
+    FF_MPV_DEPRECATED_BFRAME_OPTS
+#endif
     { NULL },
 };
 static const AVClass h263p_class = {
@@ -4817,7 +4774,7 @@ AVCodec ff_h263p_encoder = {
     .encode2        = ff_mpv_encode_picture,
     .close          = ff_mpv_encode_end,
     .capabilities   = AV_CODEC_CAP_SLICE_THREADS,
-    .caps_internal  = FF_CODEC_CAP_INIT_CLEANUP,
+    .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
     .priv_class     = &h263p_class,
 };