X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fmpegvideo.c;h=988dd18a330acccc9ebf7cf139bab5740c35aed1;hb=cad3a5d715a8da2d449b41d3801480fb50d638c2;hp=d4d3bea6498f91d1a7063049498b3b23e4d14267;hpb=26148e923613e718787c6fc4bf3f64e8909f597c;p=ffmpeg diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index d4d3bea6498..988dd18a330 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -32,7 +32,8 @@ #include "libavutil/imgutils.h" #include "libavutil/internal.h" #include "libavutil/motion_vector.h" -#include "libavutil/timer.h" +#include "libavutil/video_enc_params.h" + #include "avcodec.h" #include "blockdsp.h" #include "h264chroma.h" @@ -347,9 +348,9 @@ av_cold void ff_mpv_idct_init(MpegEncContext *s) ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } -static int alloc_picture(MpegEncContext *s, Picture *pic, int shared) +static int alloc_picture(MpegEncContext *s, Picture *pic) { - return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 0, + return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, 0, s->chroma_x_shift, s->chroma_y_shift, s->out_format, s->mb_stride, s->mb_width, s->mb_height, s->b8_stride, &s->linesize, &s->uvlinesize); @@ -373,25 +374,27 @@ static int init_duplicate_context(MpegEncContext *s) s->sc.obmc_scratchpad = NULL; if (s->encoding) { - FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map, - ME_MAP_SIZE * sizeof(uint32_t), fail) - FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, - ME_MAP_SIZE * sizeof(uint32_t), fail) + if (!FF_ALLOCZ_TYPED_ARRAY(s->me.map, ME_MAP_SIZE) || + !FF_ALLOCZ_TYPED_ARRAY(s->me.score_map, ME_MAP_SIZE)) + return AVERROR(ENOMEM); + if (s->noise_reduction) { - FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, - 2 * 64 * sizeof(int), fail) + if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2)) + return AVERROR(ENOMEM); } } - FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail) + if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 2)) + return AVERROR(ENOMEM); s->block = s->blocks[0]; for (i = 0; i < 12; i++) { s->pblocks[i] = &s->block[i]; } - FF_ALLOCZ_OR_GOTO(s->avctx, s->block32, sizeof(*s->block32), fail) + if (!(s->block32 = av_mallocz(sizeof(*s->block32))) || + !(s->dpcm_macroblock = av_mallocz(sizeof(*s->dpcm_macroblock)))) + return AVERROR(ENOMEM); s->dpcm_direction = 0; - FF_ALLOCZ_OR_GOTO(s->avctx, s->dpcm_macroblock, sizeof(*s->dpcm_macroblock), fail) if (s->avctx->codec_tag == AV_RL32("VCR2")) { // exchange uv @@ -400,16 +403,14 @@ static int init_duplicate_context(MpegEncContext *s) if (s->out_format == FMT_H263) { /* ac values */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, - yc_size * sizeof(int16_t) * 16, fail); + if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size)) + return AVERROR(ENOMEM); s->ac_val[0] = s->ac_val_base + s->b8_stride + 1; s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1; s->ac_val[2] = s->ac_val[1] + c_size; } return 0; -fail: - return -1; // free() through ff_mpv_common_end() } static void free_duplicate_context(MpegEncContext *s) @@ -469,7 +470,6 @@ int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src) MpegEncContext bak; int i, ret; // FIXME copy only needed parts - // START_TIMER backup_duplicate_context(&bak, dst); memcpy(dst, src, sizeof(MpegEncContext)); backup_duplicate_context(dst, &bak); @@ -487,8 +487,6 @@ int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src) "scratch buffers.\n"); return ret; } - // STOP_TIMER("update_duplicate_context") - // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads return 0; } @@ -538,6 +536,8 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, s->avctx->width = s1->avctx->width; s->avctx->height = s1->avctx->height; + s->quarter_sample = s1->quarter_sample; + s->coded_picture_number = s1->coded_picture_number; s->picture_number = s1->picture_number; @@ -662,17 +662,14 @@ void ff_mpv_common_defaults(MpegEncContext *s) } /** - * Set the given MpegEncContext to defaults for decoding. + * Initialize the given MpegEncContext for decoding. * the changed fields will not depend upon * the prior state of the MpegEncContext. */ -void ff_mpv_decode_defaults(MpegEncContext *s) +void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx) { ff_mpv_common_defaults(s); -} -void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx) -{ s->avctx = avctx; s->width = avctx->coded_width; s->height = avctx->coded_height; @@ -717,8 +714,8 @@ static int init_context_frame(MpegEncContext *s) if (s->mb_height & 1) yc_size += 2*s->b8_stride + 2*s->mb_stride; - FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), - fail); // error resilience code looks cleaner with this + if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1)) + return AVERROR(ENOMEM); for (y = 0; y < s->mb_height; y++) for (x = 0; x < s->mb_width; x++) s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride; @@ -727,12 +724,13 @@ static int init_context_frame(MpegEncContext *s) if (s->encoding) { /* Allocate MV tables */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail) - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail) - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail) - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail) - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail) - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail) + if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) || + !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) || + !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) || + !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) || + !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) || + !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size)) + return AVERROR(ENOMEM); s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1; s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1; s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1; @@ -741,15 +739,11 @@ static int init_context_frame(MpegEncContext *s) s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1; /* Allocate MB type table */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding - - FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail) - - FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab, - mb_array_size * sizeof(float), fail); - FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab, - mb_array_size * sizeof(float), fail); - + if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) || + !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) || + !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) || + !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size)) + return AVERROR(ENOMEM); } if (s->codec_id == AV_CODEC_ID_MPEG4 || @@ -759,34 +753,34 @@ static int init_context_frame(MpegEncContext *s) int j, k; for (j = 0; j < 2; j++) { for (k = 0; k < 2; k++) { - FF_ALLOCZ_OR_GOTO(s->avctx, - s->b_field_mv_table_base[i][j][k], - mv_table_size * 2 * sizeof(int16_t), - fail); + if (!FF_ALLOCZ_TYPED_ARRAY(s->b_field_mv_table_base[i][j][k], mv_table_size)) + return AVERROR(ENOMEM); s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1; } - FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail) - FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail) + if (!FF_ALLOCZ_TYPED_ARRAY(s->b_field_select_table [i][j], mv_table_size * 2) || + !FF_ALLOCZ_TYPED_ARRAY(s->p_field_mv_table_base[i][j], mv_table_size)) + return AVERROR(ENOMEM); s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1; } - FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail) + if (!FF_ALLOCZ_TYPED_ARRAY(s->p_field_select_table[i], mv_table_size * 2)) + return AVERROR(ENOMEM); } } if (s->out_format == FMT_H263) { - /* cbp values */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail); + /* cbp values, cbp, ac_pred, pred_dir */ + if (!FF_ALLOCZ_TYPED_ARRAY(s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride) || + !FF_ALLOCZ_TYPED_ARRAY(s->cbp_table, mb_array_size) || + !FF_ALLOCZ_TYPED_ARRAY(s->pred_dir_table, mb_array_size)) + return AVERROR(ENOMEM); s->coded_block = s->coded_block_base + s->b8_stride + 1; - - /* cbp, ac_pred, pred_dir */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail); - FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail); } if (s->h263_pred || s->h263_plus || !s->encoding) { /* dc values */ // MN: we need these for error resilience of intra-frames - FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail); + if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size)) + return AVERROR(ENOMEM); s->dc_val[0] = s->dc_val_base + s->b8_stride + 1; s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1; s->dc_val[2] = s->dc_val[1] + c_size; @@ -794,17 +788,14 @@ static int init_context_frame(MpegEncContext *s) s->dc_val_base[i] = 1024; } - /* which mb is an intra block */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail); + /* which mb is an intra block, init macroblock skip table */ + if (!FF_ALLOC_TYPED_ARRAY(s->mbintra_table, mb_array_size) || + // Note the + 1 is for a quicker MPEG-4 slice_end detection + !FF_ALLOCZ_TYPED_ARRAY(s->mbskip_table, mb_array_size + 2)) + return AVERROR(ENOMEM); memset(s->mbintra_table, 1, mb_array_size); - /* init macroblock skip table */ - FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail); - // Note the + 1 is for a quicker MPEG-4 slice_end detection - return ff_mpeg_er_init(s); -fail: - return AVERROR(ENOMEM); } static void clear_context(MpegEncContext *s) @@ -909,7 +900,7 @@ av_cold int ff_mpv_common_init(MpegEncContext *s) if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) { av_log(s->avctx, AV_LOG_ERROR, "decoding to AV_PIX_FMT_NONE is not supported.\n"); - return -1; + return AVERROR(EINVAL); } if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) { @@ -925,7 +916,7 @@ av_cold int ff_mpv_common_init(MpegEncContext *s) if ((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx)) - return -1; + return AVERROR(EINVAL); dct_init(s); @@ -936,28 +927,22 @@ av_cold int ff_mpv_common_init(MpegEncContext *s) if (ret) return ret; - FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, - MAX_PICTURE_COUNT * sizeof(Picture), fail); + if (!FF_ALLOCZ_TYPED_ARRAY(s->picture, MAX_PICTURE_COUNT)) + return AVERROR(ENOMEM); for (i = 0; i < MAX_PICTURE_COUNT; i++) { s->picture[i].f = av_frame_alloc(); if (!s->picture[i].f) - goto fail; - } - s->next_picture.f = av_frame_alloc(); - if (!s->next_picture.f) - goto fail; - s->last_picture.f = av_frame_alloc(); - if (!s->last_picture.f) - goto fail; - s->current_picture.f = av_frame_alloc(); - if (!s->current_picture.f) - goto fail; - s->new_picture.f = av_frame_alloc(); - if (!s->new_picture.f) - goto fail; - - if (init_context_frame(s)) - goto fail; + return AVERROR(ENOMEM); + } + + if (!(s->next_picture.f = av_frame_alloc()) || + !(s->last_picture.f = av_frame_alloc()) || + !(s->current_picture.f = av_frame_alloc()) || + !(s->new_picture.f = av_frame_alloc())) + return AVERROR(ENOMEM); + + if ((ret = init_context_frame(s))) + return AVERROR(ENOMEM); s->parse_context.state = -1; @@ -971,18 +956,18 @@ av_cold int ff_mpv_common_init(MpegEncContext *s) if (i) { s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext)); if (!s->thread_context[i]) - goto fail; + return AVERROR(ENOMEM); } - if (init_duplicate_context(s->thread_context[i]) < 0) - goto fail; + if ((ret = init_duplicate_context(s->thread_context[i])) < 0) + return ret; s->thread_context[i]->start_mb_y = (s->mb_height * (i) + nb_slices / 2) / nb_slices; s->thread_context[i]->end_mb_y = (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices; } } else { - if (init_duplicate_context(s) < 0) - goto fail; + if ((ret = init_duplicate_context(s)) < 0) + return ret; s->start_mb_y = 0; s->end_mb_y = s->mb_height; } @@ -990,9 +975,6 @@ av_cold int ff_mpv_common_init(MpegEncContext *s) // } return 0; - fail: - ff_mpv_common_end(s); - return -1; } /** @@ -1085,10 +1067,10 @@ int ff_mpv_common_frame_size_change(MpegEncContext *s) if ((s->width || s->height) && (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0) - goto fail; + return err; if ((err = init_context_frame(s))) - goto fail; + return err; memset(s->thread_context, 0, sizeof(s->thread_context)); s->thread_context[0] = s; @@ -1100,12 +1082,11 @@ int ff_mpv_common_frame_size_change(MpegEncContext *s) if (i) { s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext)); if (!s->thread_context[i]) { - err = AVERROR(ENOMEM); - goto fail; + return AVERROR(ENOMEM); } } if ((err = init_duplicate_context(s->thread_context[i])) < 0) - goto fail; + return err; s->thread_context[i]->start_mb_y = (s->mb_height * (i) + nb_slices / 2) / nb_slices; s->thread_context[i]->end_mb_y = @@ -1114,7 +1095,7 @@ int ff_mpv_common_frame_size_change(MpegEncContext *s) } else { err = init_duplicate_context(s); if (err < 0) - goto fail; + return err; s->start_mb_y = 0; s->end_mb_y = s->mb_height; } @@ -1122,9 +1103,6 @@ int ff_mpv_common_frame_size_change(MpegEncContext *s) } return 0; - fail: - ff_mpv_common_end(s); - return err; } /* init common structure for both encoder and decoder */ @@ -1133,7 +1111,7 @@ void ff_mpv_common_end(MpegEncContext *s) int i; if (!s) - return ; + return; if (s->slice_context_count > 1) { for (i = 0; i < s->slice_context_count; i++) { @@ -1151,6 +1129,9 @@ void ff_mpv_common_end(MpegEncContext *s) av_freep(&s->bitstream_buffer); s->allocated_bitstream_buffer_size = 0; + if (!s->avctx) + return; + if (s->picture) { for (i = 0; i < MAX_PICTURE_COUNT; i++) { ff_free_picture_tables(&s->picture[i]); @@ -1261,7 +1242,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx) pic->f->coded_picture_number = s->coded_picture_number++; - if (alloc_picture(s, pic, 0) < 0) + if (alloc_picture(s, pic) < 0) return -1; s->current_picture_ptr = pic; @@ -1322,7 +1303,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx) s->last_picture_ptr->f->key_frame = 0; s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P; - if (alloc_picture(s, s->last_picture_ptr, 0) < 0) { + if (alloc_picture(s, s->last_picture_ptr) < 0) { s->last_picture_ptr = NULL; return -1; } @@ -1363,7 +1344,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx) s->next_picture_ptr->f->key_frame = 0; s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P; - if (alloc_picture(s, s->next_picture_ptr, 0) < 0) { + if (alloc_picture(s, s->next_picture_ptr) < 0) { s->next_picture_ptr = NULL; return -1; } @@ -1443,14 +1424,33 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict) int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type) { - AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf); - int offset = 2*s->mb_stride + 1; - if(!ref) + AVVideoEncParams *par; + int mult = (qp_type == FF_QSCALE_TYPE_MPEG1) ? 2 : 1; + unsigned int nb_mb = p->alloc_mb_height * p->alloc_mb_width; + unsigned int x, y; + + if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS)) + return 0; + + par = av_video_enc_params_create_side_data(f, AV_VIDEO_ENC_PARAMS_MPEG2, nb_mb); + if (!par) return AVERROR(ENOMEM); - av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16)); - ref->size -= offset; - ref->data += offset; - return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type); + + for (y = 0; y < p->alloc_mb_height; y++) + for (x = 0; x < p->alloc_mb_width; x++) { + const unsigned int block_idx = y * p->alloc_mb_width + x; + const unsigned int mb_xy = y * p->alloc_mb_stride + x; + AVVideoBlockParams *b = av_video_enc_params_block(par, block_idx); + + b->src_x = x * 16; + b->src_y = y * 16; + b->w = 16; + b->h = 16; + + b->delta_qp = p->qscale_table[mb_xy] * mult; + } + + return 0; } static inline int hpel_motion_lowres(MpegEncContext *s,