X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fmpegvideo.c;h=a3ff7460e9085af351d78c18931b673f973ac53a;hb=dde6b2a3554553434c9e3435de3c7e94c3affe03;hp=d4a84d43e38595b9d53028b3d968a516583a3b29;hpb=10531d48a0f91aea57eac7e1c6cf5857e601826e;p=ffmpeg diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index d4a84d43e38..a3ff7460e90 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -39,6 +39,7 @@ #include "idctdsp.h" #include "internal.h" #include "mathops.h" +#include "mpeg_er.h" #include "mpegutils.h" #include "mpegvideo.h" #include "mpegvideodata.h" @@ -256,34 +257,6 @@ static void dct_unquantize_h263_inter_c(MpegEncContext *s, } } -static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, - int (*mv)[2][4][2], - int mb_x, int mb_y, int mb_intra, int mb_skipped) -{ - MpegEncContext *s = opaque; - - s->mv_dir = mv_dir; - s->mv_type = mv_type; - s->mb_intra = mb_intra; - s->mb_skipped = mb_skipped; - s->mb_x = mb_x; - s->mb_y = mb_y; - memcpy(s->mv, mv, sizeof(*mv)); - - ff_init_block_index(s); - ff_update_block_index(s); - - s->bdsp.clear_blocks(s->block[0]); - - s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16; - s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift); - s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift); - - if (ref) - av_log(s->avctx, AV_LOG_DEBUG, - "Interlaced error concealment is not fully implemented\n"); - ff_mpv_decode_mb(s, s->block); -} static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h) { @@ -361,373 +334,12 @@ av_cold void ff_mpv_idct_init(MpegEncContext *s) ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan); } -static int frame_size_alloc(MpegEncContext *s, int linesize) -{ - int alloc_size = FFALIGN(FFABS(linesize) + 64, 32); - - if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) - return 0; - - if (linesize < 24) { - av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n"); - return AVERROR_PATCHWELCOME; - } - - // edge emu needs blocksize + filter length - 1 - // (= 17x17 for halfpel / 21x21 for h264) - // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9 - // at uvlinesize. It supports only YUV420 so 24x24 is enough - // linesize * interlaced * MBsize - // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines - FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size, 4 * 68, - fail); - - FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size, 4 * 16 * 2, - fail) - s->me.temp = s->me.scratchpad; - s->rd_scratchpad = s->me.scratchpad; - s->b_scratchpad = s->me.scratchpad; - s->obmc_scratchpad = s->me.scratchpad + 16; - - return 0; -fail: - av_freep(&s->edge_emu_buffer); - return AVERROR(ENOMEM); -} - -/** - * Allocate a frame buffer - */ -static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) +static int alloc_picture(MpegEncContext *s, Picture *pic, int shared) { - int edges_needed = av_codec_is_encoder(s->avctx->codec); - int r, ret; - - pic->tf.f = pic->f; - if (s->codec_id != AV_CODEC_ID_WMV3IMAGE && - s->codec_id != AV_CODEC_ID_VC1IMAGE && - s->codec_id != AV_CODEC_ID_MSS2) { - if (edges_needed) { - pic->f->width = s->avctx->width + 2 * EDGE_WIDTH; - pic->f->height = s->avctx->height + 2 * EDGE_WIDTH; - } - - r = ff_thread_get_buffer(s->avctx, &pic->tf, - pic->reference ? AV_GET_BUFFER_FLAG_REF : 0); - } else { - pic->f->width = s->avctx->width; - pic->f->height = s->avctx->height; - pic->f->format = s->avctx->pix_fmt; - r = avcodec_default_get_buffer2(s->avctx, pic->f, 0); - } - - if (r < 0 || !pic->f->buf[0]) { - av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n", - r, pic->f->data[0]); - return -1; - } - - if (edges_needed) { - int i; - for (i = 0; pic->f->data[i]; i++) { - int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) * - pic->f->linesize[i] + - (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0)); - pic->f->data[i] += offset; - } - pic->f->width = s->avctx->width; - pic->f->height = s->avctx->height; - } - - if (s->avctx->hwaccel) { - assert(!pic->hwaccel_picture_private); - if (s->avctx->hwaccel->frame_priv_data_size) { - pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size); - if (!pic->hwaccel_priv_buf) { - av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n"); - return -1; - } - pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data; - } - } - - if (s->linesize && (s->linesize != pic->f->linesize[0] || - s->uvlinesize != pic->f->linesize[1])) { - av_log(s->avctx, AV_LOG_ERROR, - "get_buffer() failed (stride changed)\n"); - ff_mpeg_unref_picture(s->avctx, pic); - return -1; - } - - if (pic->f->linesize[1] != pic->f->linesize[2]) { - av_log(s->avctx, AV_LOG_ERROR, - "get_buffer() failed (uv stride mismatch)\n"); - ff_mpeg_unref_picture(s->avctx, pic); - return -1; - } - - if (!s->edge_emu_buffer && - (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) { - av_log(s->avctx, AV_LOG_ERROR, - "get_buffer() failed to allocate context scratch buffers.\n"); - ff_mpeg_unref_picture(s->avctx, pic); - return ret; - } - - return 0; -} - -void ff_free_picture_tables(Picture *pic) -{ - int i; - - pic->alloc_mb_width = - pic->alloc_mb_height = 0; - - av_buffer_unref(&pic->mb_var_buf); - av_buffer_unref(&pic->mc_mb_var_buf); - av_buffer_unref(&pic->mb_mean_buf); - av_buffer_unref(&pic->mbskip_table_buf); - av_buffer_unref(&pic->qscale_table_buf); - av_buffer_unref(&pic->mb_type_buf); - - for (i = 0; i < 2; i++) { - av_buffer_unref(&pic->motion_val_buf[i]); - av_buffer_unref(&pic->ref_index_buf[i]); - } -} - -static int alloc_picture_tables(MpegEncContext *s, Picture *pic) -{ - const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1; - const int mb_array_size = s->mb_stride * s->mb_height; - const int b8_array_size = s->b8_stride * s->mb_height * 2; - int i; - - - pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2); - pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride); - pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) * - sizeof(uint32_t)); - if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf) - return AVERROR(ENOMEM); - - if (s->encoding) { - pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t)); - pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t)); - pic->mb_mean_buf = av_buffer_allocz(mb_array_size); - if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf) - return AVERROR(ENOMEM); - } - - if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv || - (s->avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) { - int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t); - int ref_index_size = 4 * mb_array_size; - - for (i = 0; mv_size && i < 2; i++) { - pic->motion_val_buf[i] = av_buffer_allocz(mv_size); - pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size); - if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) - return AVERROR(ENOMEM); - } - } - - pic->alloc_mb_width = s->mb_width; - pic->alloc_mb_height = s->mb_height; - - return 0; -} - -static int make_tables_writable(Picture *pic) -{ - int ret, i; -#define MAKE_WRITABLE(table) \ -do {\ - if (pic->table &&\ - (ret = av_buffer_make_writable(&pic->table)) < 0)\ - return ret;\ -} while (0) - - MAKE_WRITABLE(mb_var_buf); - MAKE_WRITABLE(mc_mb_var_buf); - MAKE_WRITABLE(mb_mean_buf); - MAKE_WRITABLE(mbskip_table_buf); - MAKE_WRITABLE(qscale_table_buf); - MAKE_WRITABLE(mb_type_buf); - - for (i = 0; i < 2; i++) { - MAKE_WRITABLE(motion_val_buf[i]); - MAKE_WRITABLE(ref_index_buf[i]); - } - - return 0; -} - -/** - * Allocate a Picture. - * The pixels are allocated/set by calling get_buffer() if shared = 0 - */ -int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared) -{ - int i, ret; - - if (pic->qscale_table_buf) - if ( pic->alloc_mb_width != s->mb_width - || pic->alloc_mb_height != s->mb_height) - ff_free_picture_tables(pic); - - if (shared) { - av_assert0(pic->f->data[0]); - pic->shared = 1; - } else { - av_assert0(!pic->f->buf[0]); - - if (alloc_frame_buffer(s, pic) < 0) - return -1; - - s->linesize = pic->f->linesize[0]; - s->uvlinesize = pic->f->linesize[1]; - } - - if (!pic->qscale_table_buf) - ret = alloc_picture_tables(s, pic); - else - ret = make_tables_writable(pic); - if (ret < 0) - goto fail; - - if (s->encoding) { - pic->mb_var = (uint16_t*)pic->mb_var_buf->data; - pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data; - pic->mb_mean = pic->mb_mean_buf->data; - } - - pic->mbskip_table = pic->mbskip_table_buf->data; - pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1; - pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1; - - if (pic->motion_val_buf[0]) { - for (i = 0; i < 2; i++) { - pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4; - pic->ref_index[i] = pic->ref_index_buf[i]->data; - } - } - - return 0; -fail: - av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n"); - ff_mpeg_unref_picture(s->avctx, pic); - ff_free_picture_tables(pic); - return AVERROR(ENOMEM); -} - -/** - * Deallocate a picture. - */ -void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic) -{ - int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean); - - pic->tf.f = pic->f; - /* WM Image / Screen codecs allocate internal buffers with different - * dimensions / colorspaces; ignore user-defined callbacks for these. */ - if (avctx->codec->id != AV_CODEC_ID_WMV3IMAGE && - avctx->codec->id != AV_CODEC_ID_VC1IMAGE && - avctx->codec->id != AV_CODEC_ID_MSS2) - ff_thread_release_buffer(avctx, &pic->tf); - else if (pic->f) - av_frame_unref(pic->f); - - av_buffer_unref(&pic->hwaccel_priv_buf); - - if (pic->needs_realloc) - ff_free_picture_tables(pic); - - memset((uint8_t*)pic + off, 0, sizeof(*pic) - off); -} - -static int update_picture_tables(Picture *dst, Picture *src) -{ - int i; - -#define UPDATE_TABLE(table)\ -do {\ - if (src->table &&\ - (!dst->table || dst->table->buffer != src->table->buffer)) {\ - av_buffer_unref(&dst->table);\ - dst->table = av_buffer_ref(src->table);\ - if (!dst->table) {\ - ff_free_picture_tables(dst);\ - return AVERROR(ENOMEM);\ - }\ - }\ -} while (0) - - UPDATE_TABLE(mb_var_buf); - UPDATE_TABLE(mc_mb_var_buf); - UPDATE_TABLE(mb_mean_buf); - UPDATE_TABLE(mbskip_table_buf); - UPDATE_TABLE(qscale_table_buf); - UPDATE_TABLE(mb_type_buf); - for (i = 0; i < 2; i++) { - UPDATE_TABLE(motion_val_buf[i]); - UPDATE_TABLE(ref_index_buf[i]); - } - - dst->mb_var = src->mb_var; - dst->mc_mb_var = src->mc_mb_var; - dst->mb_mean = src->mb_mean; - dst->mbskip_table = src->mbskip_table; - dst->qscale_table = src->qscale_table; - dst->mb_type = src->mb_type; - for (i = 0; i < 2; i++) { - dst->motion_val[i] = src->motion_val[i]; - dst->ref_index[i] = src->ref_index[i]; - } - - dst->alloc_mb_width = src->alloc_mb_width; - dst->alloc_mb_height = src->alloc_mb_height; - - return 0; -} - -int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src) -{ - int ret; - - av_assert0(!dst->f->buf[0]); - av_assert0(src->f->buf[0]); - - src->tf.f = src->f; - dst->tf.f = dst->f; - ret = ff_thread_ref_frame(&dst->tf, &src->tf); - if (ret < 0) - goto fail; - - ret = update_picture_tables(dst, src); - if (ret < 0) - goto fail; - - if (src->hwaccel_picture_private) { - dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf); - if (!dst->hwaccel_priv_buf) - goto fail; - dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data; - } - - dst->field_picture = src->field_picture; - dst->mb_var_sum = src->mb_var_sum; - dst->mc_mb_var_sum = src->mc_mb_var_sum; - dst->b_frame_score = src->b_frame_score; - dst->needs_realloc = src->needs_realloc; - dst->reference = src->reference; - dst->shared = src->shared; - - return 0; -fail: - ff_mpeg_unref_picture(avctx, dst); - return ret; + return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 0, + s->chroma_x_shift, s->chroma_y_shift, s->out_format, + s->mb_stride, s->mb_width, s->mb_height, s->b8_stride, + &s->linesize, &s->uvlinesize); } static int init_duplicate_context(MpegEncContext *s) @@ -740,12 +352,12 @@ static int init_duplicate_context(MpegEncContext *s) if (s->mb_height & 1) yc_size += 2*s->b8_stride + 2*s->mb_stride; - s->edge_emu_buffer = + s->sc.edge_emu_buffer = s->me.scratchpad = s->me.temp = - s->rd_scratchpad = - s->b_scratchpad = - s->obmc_scratchpad = NULL; + s->sc.rd_scratchpad = + s->sc.b_scratchpad = + s->sc.obmc_scratchpad = NULL; if (s->encoding) { FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map, @@ -787,12 +399,12 @@ static void free_duplicate_context(MpegEncContext *s) if (!s) return; - av_freep(&s->edge_emu_buffer); + av_freep(&s->sc.edge_emu_buffer); av_freep(&s->me.scratchpad); s->me.temp = - s->rd_scratchpad = - s->b_scratchpad = - s->obmc_scratchpad = NULL; + s->sc.rd_scratchpad = + s->sc.b_scratchpad = + s->sc.obmc_scratchpad = NULL; av_freep(&s->dct_error_sum); av_freep(&s->me.map); @@ -805,12 +417,12 @@ static void free_duplicate_context(MpegEncContext *s) static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src) { #define COPY(a) bak->a = src->a - COPY(edge_emu_buffer); + COPY(sc.edge_emu_buffer); COPY(me.scratchpad); COPY(me.temp); - COPY(rd_scratchpad); - COPY(b_scratchpad); - COPY(obmc_scratchpad); + COPY(sc.rd_scratchpad); + COPY(sc.b_scratchpad); + COPY(sc.obmc_scratchpad); COPY(me.map); COPY(me.score_map); COPY(blocks); @@ -845,8 +457,9 @@ int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src) // exchange uv FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]); } - if (!dst->edge_emu_buffer && - (ret = frame_size_alloc(dst, dst->linesize)) < 0) { + if (!dst->sc.edge_emu_buffer && + (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me, + &dst->sc, dst->linesize)) < 0) { av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context " "scratch buffers.\n"); return ret; @@ -920,7 +533,7 @@ do {\ if (s1->pic.f && s1->pic.f->buf[0])\ ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\ else\ - ret = update_picture_tables(&s->pic, &s1->pic);\ + ret = ff_update_picture_tables(&s->pic, &s1->pic);\ if (ret < 0)\ return ret;\ } while (0) @@ -975,9 +588,10 @@ do {\ } // linesize dependend scratch buffer allocation - if (!s->edge_emu_buffer) + if (!s->sc.edge_emu_buffer) if (s1->linesize) { - if (frame_size_alloc(s, s1->linesize) < 0) { + if (ff_mpeg_framesize_alloc(s->avctx, &s->me, + &s->sc, s1->linesize) < 0) { av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context " "scratch buffers.\n"); return AVERROR(ENOMEM); @@ -1046,42 +660,6 @@ void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx) s->codec_tag = avpriv_toupper4(avctx->codec_tag); } -static int init_er(MpegEncContext *s) -{ - ERContext *er = &s->er; - int mb_array_size = s->mb_height * s->mb_stride; - int i; - - er->avctx = s->avctx; - - er->mb_index2xy = s->mb_index2xy; - er->mb_num = s->mb_num; - er->mb_width = s->mb_width; - er->mb_height = s->mb_height; - er->mb_stride = s->mb_stride; - er->b8_stride = s->b8_stride; - - er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride); - er->error_status_table = av_mallocz(mb_array_size); - if (!er->er_temp_buffer || !er->error_status_table) - goto fail; - - er->mbskip_table = s->mbskip_table; - er->mbintra_table = s->mbintra_table; - - for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++) - er->dc_val[i] = s->dc_val[i]; - - er->decode_mb = mpeg_er_decode_mb; - er->opaque = s; - - return 0; -fail: - av_freep(&er->er_temp_buffer); - av_freep(&er->error_status_table); - return AVERROR(ENOMEM); -} - /** * Initialize and allocates MpegEncContext fields dependent on the resolution. */ @@ -1200,7 +778,7 @@ static int init_context_frame(MpegEncContext *s) FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail); // Note the + 1 is for a quicker mpeg4 slice_end detection - return init_er(s); + return ff_mpeg_er_init(s); fail: return AVERROR(ENOMEM); } @@ -1283,16 +861,17 @@ av_cold int ff_mpv_common_init(MpegEncContext *s) s->parse_context.state = -1; s->context_initialized = 1; + memset(s->thread_context, 0, sizeof(s->thread_context)); s->thread_context[0] = s; // if (s->width && s->height) { if (nb_slices > 1) { - for (i = 1; i < nb_slices; i++) { - s->thread_context[i] = av_malloc(sizeof(MpegEncContext)); - memcpy(s->thread_context[i], s, sizeof(MpegEncContext)); - } - for (i = 0; i < nb_slices; i++) { + if (i) { + s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext)); + if (!s->thread_context[i]) + goto fail; + } if (init_duplicate_context(s->thread_context[i]) < 0) goto fail; s->thread_context[i]->start_mb_y = @@ -1410,17 +989,20 @@ int ff_mpv_common_frame_size_change(MpegEncContext *s) if ((err = init_context_frame(s))) goto fail; + memset(s->thread_context, 0, sizeof(s->thread_context)); s->thread_context[0] = s; if (s->width && s->height) { int nb_slices = s->slice_context_count; if (nb_slices > 1) { - for (i = 1; i < nb_slices; i++) { - s->thread_context[i] = av_malloc(sizeof(MpegEncContext)); - memcpy(s->thread_context[i], s, sizeof(MpegEncContext)); - } - for (i = 0; i < nb_slices; i++) { + if (i) { + s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext)); + if (!s->thread_context[i]) { + err = AVERROR(ENOMEM); + goto fail; + } + } if ((err = init_duplicate_context(s->thread_context[i])) < 0) goto fail; s->thread_context[i]->start_mb_y = @@ -1495,72 +1077,6 @@ void ff_mpv_common_end(MpegEncContext *s) s->linesize = s->uvlinesize = 0; } -static void release_unused_pictures(AVCodecContext *avctx, Picture *picture) -{ - int i; - - /* release non reference frames */ - for (i = 0; i < MAX_PICTURE_COUNT; i++) { - if (!picture[i].reference) - ff_mpeg_unref_picture(avctx, &picture[i]); - } -} - -static inline int pic_is_unused(Picture *pic) -{ - if (!pic->f->buf[0]) - return 1; - if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF)) - return 1; - return 0; -} - -static int find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared) -{ - int i; - - if (shared) { - for (i = 0; i < MAX_PICTURE_COUNT; i++) { - if (!picture[i].f->buf[0]) - return i; - } - } else { - for (i = 0; i < MAX_PICTURE_COUNT; i++) { - if (pic_is_unused(&picture[i])) - return i; - } - } - - av_log(avctx, AV_LOG_FATAL, - "Internal error, picture buffer overflow\n"); - /* We could return -1, but the codec would crash trying to draw into a - * non-existing frame anyway. This is safer than waiting for a random crash. - * Also the return of this is never useful, an encoder must only allocate - * as much as allowed in the specification. This has no relationship to how - * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large - * enough for such valid streams). - * Plus, a decoder has to check stream validity and remove frames if too - * many reference frames are around. Waiting for "OOM" is not correct at - * all. Similarly, missing reference frames have to be replaced by - * interpolated/MC frames, anything else is a bug in the codec ... - */ - abort(); - return -1; -} - -int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared) -{ - int ret = find_unused_picture(avctx, picture, shared); - - if (ret >= 0 && ret < MAX_PICTURE_COUNT) { - if (picture[ret].needs_realloc) { - picture[ret].needs_realloc = 0; - ff_free_picture_tables(&picture[ret]); - ff_mpeg_unref_picture(avctx, &picture[ret]); - } - } - return ret; -} static void gray_frame(AVFrame *frame) { @@ -1615,7 +1131,11 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx) ff_mpeg_unref_picture(s->avctx, &s->current_picture); - release_unused_pictures(s->avctx, s->picture); + /* release non reference frames */ + for (i = 0; i < MAX_PICTURE_COUNT; i++) { + if (!s->picture[i].reference) + ff_mpeg_unref_picture(s->avctx, &s->picture[i]); + } if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) { // we already have a unused image @@ -1638,7 +1158,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx) pic->f->coded_picture_number = s->coded_picture_number++; - if (ff_alloc_picture(s, pic, 0) < 0) + if (alloc_picture(s, pic, 0) < 0) return -1; s->current_picture_ptr = pic; @@ -1703,7 +1223,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx) s->last_picture_ptr->f->key_frame = 0; s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P; - if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) { + if (alloc_picture(s, s->last_picture_ptr, 0) < 0) { s->last_picture_ptr = NULL; return -1; } @@ -1744,7 +1264,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx) s->next_picture_ptr->f->key_frame = 0; s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P; - if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) { + if (alloc_picture(s, s->next_picture_ptr, 0) < 0) { s->next_picture_ptr = NULL; return -1; } @@ -2389,12 +1909,12 @@ static inline int hpel_motion_lowres(MpegEncContext *s, if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) || (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src, s->linesize, s->linesize, w + 1, (h + 1) << field_based, src_x, src_y << field_based, h_edge_pos, v_edge_pos); - src = s->edge_emu_buffer; + src = s->sc.edge_emu_buffer; emu = 1; } @@ -2491,14 +2011,14 @@ static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 || (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y, linesize >> field_based, linesize >> field_based, 17, 17 + field_based, src_x, src_y << field_based, h_edge_pos, v_edge_pos); - ptr_y = s->edge_emu_buffer; + ptr_y = s->sc.edge_emu_buffer; if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) { - uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize; + uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; uint8_t *vbuf =ubuf + 9 * s->uvlinesize; s->vdsp.emulated_edge_mc(ubuf, ptr_cb, uvlinesize >> field_based, uvlinesize >> field_based, @@ -2579,11 +2099,11 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s, ptr = ref_picture[1] + offset; if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) || (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, s->uvlinesize, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; emu = 1; } sx = (sx << 2) >> lowres; @@ -2592,11 +2112,11 @@ static inline void chroma_4mv_motion_lowres(MpegEncContext *s, ptr = ref_picture[2] + offset; if (emu) { - s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, + s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, s->uvlinesize, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); - ptr = s->edge_emu_buffer; + ptr = s->sc.edge_emu_buffer; } pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy); } @@ -2746,7 +2266,7 @@ static inline void MPV_motion_lowres(MpegEncContext *s, /** * find the lowest MB row referenced in the MVs */ -int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir) +static int lowest_referenced_row(MpegEncContext *s, int dir) { int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample; int my, off, i, mvs; @@ -2776,7 +2296,7 @@ int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir) off = ((FFMAX(-my_min, my_max)<> 6; - return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1); + return av_clip(s->mb_y + off, 0, s->mb_height - 1); unhandled: return s->mb_height-1; } @@ -2928,9 +2448,9 @@ void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], dest_cb= s->dest[1]; dest_cr= s->dest[2]; }else{ - dest_y = s->b_scratchpad; - dest_cb= s->b_scratchpad+16*linesize; - dest_cr= s->b_scratchpad+32*linesize; + dest_y = s->sc.b_scratchpad; + dest_cb= s->sc.b_scratchpad+16*linesize; + dest_cr= s->sc.b_scratchpad+32*linesize; } if (!s->mb_intra) { @@ -2941,12 +2461,12 @@ void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { if (s->mv_dir & MV_DIR_FORWARD) { ff_thread_await_progress(&s->last_picture_ptr->tf, - ff_mpv_lowest_referenced_row(s, 0), + lowest_referenced_row(s, 0), 0); } if (s->mv_dir & MV_DIR_BACKWARD) { ff_thread_await_progress(&s->next_picture_ptr->tf, - ff_mpv_lowest_referenced_row(s, 1), + lowest_referenced_row(s, 1), 0); } } @@ -3151,35 +2671,6 @@ void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename } } -/** - * Permute an 8x8 block. - * @param block the block which will be permuted according to the given permutation vector - * @param permutation the permutation vector - * @param last the last non zero coefficient in scantable order, used to speed the permutation up - * @param scantable the used scantable, this is only used to speed the permutation up, the block is not - * (inverse) permutated to scantable order! - */ -void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last) -{ - int i; - int16_t temp[64]; - - if(last<=0) return; - //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations - - for(i=0; i<=last; i++){ - const int j= scantable[i]; - temp[j]= block[j]; - block[j]=0; - } - - for(i=0; i<=last; i++){ - const int j= scantable[i]; - const int perm_j= permutation[j]; - block[perm_j]= temp[j]; - } -} - void ff_mpeg_flush(AVCodecContext *avctx){ int i; MpegEncContext *s = avctx->priv_data;