2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/avassert.h"
31 #include "libavutil/imgutils.h"
36 #include "mpegvideo.h"
39 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 int16_t *block, int n, int qscale);
62 /* enable all paranoid tests for rounding, overflows, etc... */
68 static const uint8_t ff_default_chroma_qscale_table[32] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
71 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
74 const uint8_t ff_mpeg1_dc_scale_table[128] = {
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
86 static const uint8_t mpeg2_dc_scale_table1[128] = {
87 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
98 static const uint8_t mpeg2_dc_scale_table2[128] = {
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
110 static const uint8_t mpeg2_dc_scale_table3[128] = {
111 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
122 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
123 ff_mpeg1_dc_scale_table,
124 mpeg2_dc_scale_table1,
125 mpeg2_dc_scale_table2,
126 mpeg2_dc_scale_table3,
129 const enum AVPixelFormat ff_pixfmt_list_420[] = {
134 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
136 int mb_x, int mb_y, int mb_intra, int mb_skipped)
138 MpegEncContext *s = opaque;
141 s->mv_type = mv_type;
142 s->mb_intra = mb_intra;
143 s->mb_skipped = mb_skipped;
146 memcpy(s->mv, mv, sizeof(*mv));
148 ff_init_block_index(s);
149 ff_update_block_index(s);
151 s->dsp.clear_blocks(s->block[0]);
153 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
154 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
155 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
158 ff_MPV_decode_mb(s, s->block);
161 /* init common dct for both encoder and decoder */
162 av_cold int ff_dct_common_init(MpegEncContext *s)
164 ff_dsputil_init(&s->dsp, s->avctx);
165 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
167 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
168 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
169 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
170 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
171 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
172 if (s->flags & CODEC_FLAG_BITEXACT)
173 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
174 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
177 ff_MPV_common_init_x86(s);
179 ff_MPV_common_init_axp(s);
181 ff_MPV_common_init_arm(s);
183 ff_MPV_common_init_altivec(s);
185 ff_MPV_common_init_bfin(s);
188 /* load & permutate scantables
189 * note: only wmv uses different ones
191 if (s->alternate_scan) {
192 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
195 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
196 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
198 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
199 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
204 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
206 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
208 // edge emu needs blocksize + filter length - 1
209 // (= 17x17 for halfpel / 21x21 for h264)
210 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
211 // at uvlinesize. It supports only YUV420 so 24x24 is enough
212 // linesize * interlaced * MBsize
213 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
216 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
218 s->me.temp = s->me.scratchpad;
219 s->rd_scratchpad = s->me.scratchpad;
220 s->b_scratchpad = s->me.scratchpad;
221 s->obmc_scratchpad = s->me.scratchpad + 16;
225 av_freep(&s->edge_emu_buffer);
226 return AVERROR(ENOMEM);
230 * Allocate a frame buffer
232 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
237 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
238 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
239 s->codec_id != AV_CODEC_ID_MSS2)
240 r = ff_thread_get_buffer(s->avctx, &pic->tf,
241 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
243 pic->f.width = s->avctx->width;
244 pic->f.height = s->avctx->height;
245 pic->f.format = s->avctx->pix_fmt;
246 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
249 if (r < 0 || !pic->f.data[0]) {
250 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
255 if (s->avctx->hwaccel) {
256 assert(!pic->hwaccel_picture_private);
257 if (s->avctx->hwaccel->priv_data_size) {
258 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
259 if (!pic->hwaccel_priv_buf) {
260 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
263 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
267 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
268 s->uvlinesize != pic->f.linesize[1])) {
269 av_log(s->avctx, AV_LOG_ERROR,
270 "get_buffer() failed (stride changed)\n");
271 ff_mpeg_unref_picture(s, pic);
275 if (pic->f.linesize[1] != pic->f.linesize[2]) {
276 av_log(s->avctx, AV_LOG_ERROR,
277 "get_buffer() failed (uv stride mismatch)\n");
278 ff_mpeg_unref_picture(s, pic);
282 if (!s->edge_emu_buffer &&
283 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
284 av_log(s->avctx, AV_LOG_ERROR,
285 "get_buffer() failed to allocate context scratch buffers.\n");
286 ff_mpeg_unref_picture(s, pic);
293 static void free_picture_tables(Picture *pic)
297 av_buffer_unref(&pic->mb_var_buf);
298 av_buffer_unref(&pic->mc_mb_var_buf);
299 av_buffer_unref(&pic->mb_mean_buf);
300 av_buffer_unref(&pic->mbskip_table_buf);
301 av_buffer_unref(&pic->qscale_table_buf);
302 av_buffer_unref(&pic->mb_type_buf);
304 for (i = 0; i < 2; i++) {
305 av_buffer_unref(&pic->motion_val_buf[i]);
306 av_buffer_unref(&pic->ref_index_buf[i]);
310 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
312 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
313 const int mb_array_size = s->mb_stride * s->mb_height;
314 const int b8_array_size = s->b8_stride * s->mb_height * 2;
318 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
319 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
320 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
322 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
323 return AVERROR(ENOMEM);
326 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
327 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
328 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
329 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
330 return AVERROR(ENOMEM);
333 if (s->out_format == FMT_H263 || s->encoding ||
334 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
335 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
336 int ref_index_size = 4 * mb_array_size;
338 for (i = 0; mv_size && i < 2; i++) {
339 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
340 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
341 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
342 return AVERROR(ENOMEM);
349 static int make_tables_writable(Picture *pic)
352 #define MAKE_WRITABLE(table) \
355 (ret = av_buffer_make_writable(&pic->table)) < 0)\
359 MAKE_WRITABLE(mb_var_buf);
360 MAKE_WRITABLE(mc_mb_var_buf);
361 MAKE_WRITABLE(mb_mean_buf);
362 MAKE_WRITABLE(mbskip_table_buf);
363 MAKE_WRITABLE(qscale_table_buf);
364 MAKE_WRITABLE(mb_type_buf);
366 for (i = 0; i < 2; i++) {
367 MAKE_WRITABLE(motion_val_buf[i]);
368 MAKE_WRITABLE(ref_index_buf[i]);
375 * Allocate a Picture.
376 * The pixels are allocated/set by calling get_buffer() if shared = 0
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
383 assert(pic->f.data[0]);
386 assert(!pic->f.data[0]);
388 if (alloc_frame_buffer(s, pic) < 0)
391 s->linesize = pic->f.linesize[0];
392 s->uvlinesize = pic->f.linesize[1];
395 if (!pic->qscale_table_buf)
396 ret = alloc_picture_tables(s, pic);
398 ret = make_tables_writable(pic);
403 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
404 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
405 pic->mb_mean = pic->mb_mean_buf->data;
408 pic->mbskip_table = pic->mbskip_table_buf->data;
409 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
410 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
412 if (pic->motion_val_buf[0]) {
413 for (i = 0; i < 2; i++) {
414 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
415 pic->ref_index[i] = pic->ref_index_buf[i]->data;
421 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
422 ff_mpeg_unref_picture(s, pic);
423 free_picture_tables(pic);
424 return AVERROR(ENOMEM);
428 * Deallocate a picture.
430 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
432 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
435 /* WM Image / Screen codecs allocate internal buffers with different
436 * dimensions / colorspaces; ignore user-defined callbacks for these. */
437 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
438 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
439 s->codec_id != AV_CODEC_ID_MSS2)
440 ff_thread_release_buffer(s->avctx, &pic->tf);
442 av_frame_unref(&pic->f);
444 av_buffer_unref(&pic->hwaccel_priv_buf);
446 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
449 static int update_picture_tables(Picture *dst, Picture *src)
453 #define UPDATE_TABLE(table)\
456 (!dst->table || dst->table->buffer != src->table->buffer)) {\
457 av_buffer_unref(&dst->table);\
458 dst->table = av_buffer_ref(src->table);\
460 free_picture_tables(dst);\
461 return AVERROR(ENOMEM);\
466 UPDATE_TABLE(mb_var_buf);
467 UPDATE_TABLE(mc_mb_var_buf);
468 UPDATE_TABLE(mb_mean_buf);
469 UPDATE_TABLE(mbskip_table_buf);
470 UPDATE_TABLE(qscale_table_buf);
471 UPDATE_TABLE(mb_type_buf);
472 for (i = 0; i < 2; i++) {
473 UPDATE_TABLE(motion_val_buf[i]);
474 UPDATE_TABLE(ref_index_buf[i]);
477 dst->mb_var = src->mb_var;
478 dst->mc_mb_var = src->mc_mb_var;
479 dst->mb_mean = src->mb_mean;
480 dst->mbskip_table = src->mbskip_table;
481 dst->qscale_table = src->qscale_table;
482 dst->mb_type = src->mb_type;
483 for (i = 0; i < 2; i++) {
484 dst->motion_val[i] = src->motion_val[i];
485 dst->ref_index[i] = src->ref_index[i];
491 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
495 av_assert0(!dst->f.buf[0]);
496 av_assert0(src->f.buf[0]);
500 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
504 ret = update_picture_tables(dst, src);
508 if (src->hwaccel_picture_private) {
509 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
510 if (!dst->hwaccel_priv_buf)
512 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
515 dst->field_picture = src->field_picture;
516 dst->mb_var_sum = src->mb_var_sum;
517 dst->mc_mb_var_sum = src->mc_mb_var_sum;
518 dst->b_frame_score = src->b_frame_score;
519 dst->needs_realloc = src->needs_realloc;
520 dst->reference = src->reference;
521 dst->shared = src->shared;
525 ff_mpeg_unref_picture(s, dst);
529 static int init_duplicate_context(MpegEncContext *s)
531 int y_size = s->b8_stride * (2 * s->mb_height + 1);
532 int c_size = s->mb_stride * (s->mb_height + 1);
533 int yc_size = y_size + 2 * c_size;
541 s->obmc_scratchpad = NULL;
544 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
545 ME_MAP_SIZE * sizeof(uint32_t), fail)
546 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
547 ME_MAP_SIZE * sizeof(uint32_t), fail)
548 if (s->avctx->noise_reduction) {
549 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
550 2 * 64 * sizeof(int), fail)
553 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
554 s->block = s->blocks[0];
556 for (i = 0; i < 12; i++) {
557 s->pblocks[i] = &s->block[i];
560 if (s->out_format == FMT_H263) {
562 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
563 yc_size * sizeof(int16_t) * 16, fail);
564 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
565 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
566 s->ac_val[2] = s->ac_val[1] + c_size;
571 return -1; // free() through ff_MPV_common_end()
574 static void free_duplicate_context(MpegEncContext *s)
579 av_freep(&s->edge_emu_buffer);
580 av_freep(&s->me.scratchpad);
584 s->obmc_scratchpad = NULL;
586 av_freep(&s->dct_error_sum);
587 av_freep(&s->me.map);
588 av_freep(&s->me.score_map);
589 av_freep(&s->blocks);
590 av_freep(&s->ac_val_base);
594 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
596 #define COPY(a) bak->a = src->a
597 COPY(edge_emu_buffer);
602 COPY(obmc_scratchpad);
609 COPY(me.map_generation);
621 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
625 // FIXME copy only needed parts
627 backup_duplicate_context(&bak, dst);
628 memcpy(dst, src, sizeof(MpegEncContext));
629 backup_duplicate_context(dst, &bak);
630 for (i = 0; i < 12; i++) {
631 dst->pblocks[i] = &dst->block[i];
633 if (!dst->edge_emu_buffer &&
634 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
635 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
636 "scratch buffers.\n");
639 // STOP_TIMER("update_duplicate_context")
640 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
644 int ff_mpeg_update_thread_context(AVCodecContext *dst,
645 const AVCodecContext *src)
648 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
650 if (dst == src || !s1->context_initialized)
653 // FIXME can parameters change on I-frames?
654 // in that case dst may need a reinit
655 if (!s->context_initialized) {
656 memcpy(s, s1, sizeof(MpegEncContext));
659 s->bitstream_buffer = NULL;
660 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
662 ff_MPV_common_init(s);
665 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
667 s->context_reinit = 0;
668 s->height = s1->height;
669 s->width = s1->width;
670 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
674 s->avctx->coded_height = s1->avctx->coded_height;
675 s->avctx->coded_width = s1->avctx->coded_width;
676 s->avctx->width = s1->avctx->width;
677 s->avctx->height = s1->avctx->height;
679 s->coded_picture_number = s1->coded_picture_number;
680 s->picture_number = s1->picture_number;
681 s->input_picture_number = s1->input_picture_number;
683 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
684 ff_mpeg_unref_picture(s, &s->picture[i]);
685 if (s1->picture[i].f.data[0] &&
686 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
690 #define UPDATE_PICTURE(pic)\
692 ff_mpeg_unref_picture(s, &s->pic);\
693 if (s1->pic.f.data[0])\
694 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
696 ret = update_picture_tables(&s->pic, &s1->pic);\
701 UPDATE_PICTURE(current_picture);
702 UPDATE_PICTURE(last_picture);
703 UPDATE_PICTURE(next_picture);
705 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
706 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
707 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
709 // Error/bug resilience
710 s->next_p_frame_damaged = s1->next_p_frame_damaged;
711 s->workaround_bugs = s1->workaround_bugs;
714 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
715 (char *) &s1->shape - (char *) &s1->time_increment_bits);
718 s->max_b_frames = s1->max_b_frames;
719 s->low_delay = s1->low_delay;
720 s->droppable = s1->droppable;
722 // DivX handling (doesn't work)
723 s->divx_packed = s1->divx_packed;
725 if (s1->bitstream_buffer) {
726 if (s1->bitstream_buffer_size +
727 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
728 av_fast_malloc(&s->bitstream_buffer,
729 &s->allocated_bitstream_buffer_size,
730 s1->allocated_bitstream_buffer_size);
731 s->bitstream_buffer_size = s1->bitstream_buffer_size;
732 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
733 s1->bitstream_buffer_size);
734 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
735 FF_INPUT_BUFFER_PADDING_SIZE);
738 // linesize dependend scratch buffer allocation
739 if (!s->edge_emu_buffer)
741 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
742 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
743 "scratch buffers.\n");
744 return AVERROR(ENOMEM);
747 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
748 "be allocated due to unknown size.\n");
752 // MPEG2/interlacing info
753 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
754 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
756 if (!s1->first_field) {
757 s->last_pict_type = s1->pict_type;
758 if (s1->current_picture_ptr)
759 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
761 if (s1->pict_type != AV_PICTURE_TYPE_B) {
762 s->last_non_b_pict_type = s1->pict_type;
770 * Set the given MpegEncContext to common defaults
771 * (same for encoding and decoding).
772 * The changed fields will not depend upon the
773 * prior state of the MpegEncContext.
775 void ff_MPV_common_defaults(MpegEncContext *s)
777 s->y_dc_scale_table =
778 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
779 s->chroma_qscale_table = ff_default_chroma_qscale_table;
780 s->progressive_frame = 1;
781 s->progressive_sequence = 1;
782 s->picture_structure = PICT_FRAME;
784 s->coded_picture_number = 0;
785 s->picture_number = 0;
786 s->input_picture_number = 0;
788 s->picture_in_gop_number = 0;
793 s->slice_context_count = 1;
797 * Set the given MpegEncContext to defaults for decoding.
798 * the changed fields will not depend upon
799 * the prior state of the MpegEncContext.
801 void ff_MPV_decode_defaults(MpegEncContext *s)
803 ff_MPV_common_defaults(s);
806 static int init_er(MpegEncContext *s)
808 ERContext *er = &s->er;
809 int mb_array_size = s->mb_height * s->mb_stride;
812 er->avctx = s->avctx;
815 er->mb_index2xy = s->mb_index2xy;
816 er->mb_num = s->mb_num;
817 er->mb_width = s->mb_width;
818 er->mb_height = s->mb_height;
819 er->mb_stride = s->mb_stride;
820 er->b8_stride = s->b8_stride;
822 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
823 er->error_status_table = av_mallocz(mb_array_size);
824 if (!er->er_temp_buffer || !er->error_status_table)
827 er->mbskip_table = s->mbskip_table;
828 er->mbintra_table = s->mbintra_table;
830 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
831 er->dc_val[i] = s->dc_val[i];
833 er->decode_mb = mpeg_er_decode_mb;
838 av_freep(&er->er_temp_buffer);
839 av_freep(&er->error_status_table);
840 return AVERROR(ENOMEM);
844 * Initialize and allocates MpegEncContext fields dependent on the resolution.
846 static int init_context_frame(MpegEncContext *s)
848 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
850 s->mb_width = (s->width + 15) / 16;
851 s->mb_stride = s->mb_width + 1;
852 s->b8_stride = s->mb_width * 2 + 1;
853 s->b4_stride = s->mb_width * 4 + 1;
854 mb_array_size = s->mb_height * s->mb_stride;
855 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
857 /* set default edge pos, will be overriden
858 * in decode_header if needed */
859 s->h_edge_pos = s->mb_width * 16;
860 s->v_edge_pos = s->mb_height * 16;
862 s->mb_num = s->mb_width * s->mb_height;
867 s->block_wrap[3] = s->b8_stride;
869 s->block_wrap[5] = s->mb_stride;
871 y_size = s->b8_stride * (2 * s->mb_height + 1);
872 c_size = s->mb_stride * (s->mb_height + 1);
873 yc_size = y_size + 2 * c_size;
875 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
876 fail); // error ressilience code looks cleaner with this
877 for (y = 0; y < s->mb_height; y++)
878 for (x = 0; x < s->mb_width; x++)
879 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
881 s->mb_index2xy[s->mb_height * s->mb_width] =
882 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
885 /* Allocate MV tables */
886 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
887 mv_table_size * 2 * sizeof(int16_t), fail);
888 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
889 mv_table_size * 2 * sizeof(int16_t), fail);
890 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
891 mv_table_size * 2 * sizeof(int16_t), fail);
892 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
893 mv_table_size * 2 * sizeof(int16_t), fail);
894 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
895 mv_table_size * 2 * sizeof(int16_t), fail);
896 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
897 mv_table_size * 2 * sizeof(int16_t), fail);
898 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
899 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
900 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
901 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
903 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
905 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
907 /* Allocate MB type table */
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
909 sizeof(uint16_t), fail); // needed for encoding
911 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
914 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
915 mb_array_size * sizeof(float), fail);
916 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
917 mb_array_size * sizeof(float), fail);
921 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
922 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
923 /* interlaced direct mode decoding tables */
924 for (i = 0; i < 2; i++) {
926 for (j = 0; j < 2; j++) {
927 for (k = 0; k < 2; k++) {
928 FF_ALLOCZ_OR_GOTO(s->avctx,
929 s->b_field_mv_table_base[i][j][k],
930 mv_table_size * 2 * sizeof(int16_t),
932 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
935 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
936 mb_array_size * 2 * sizeof(uint8_t), fail);
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
938 mv_table_size * 2 * sizeof(int16_t), fail);
939 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
942 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
943 mb_array_size * 2 * sizeof(uint8_t), fail);
946 if (s->out_format == FMT_H263) {
948 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
949 s->coded_block = s->coded_block_base + s->b8_stride + 1;
951 /* cbp, ac_pred, pred_dir */
952 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
953 mb_array_size * sizeof(uint8_t), fail);
954 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
955 mb_array_size * sizeof(uint8_t), fail);
958 if (s->h263_pred || s->h263_plus || !s->encoding) {
960 // MN: we need these for error resilience of intra-frames
961 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
962 yc_size * sizeof(int16_t), fail);
963 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
964 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
965 s->dc_val[2] = s->dc_val[1] + c_size;
966 for (i = 0; i < yc_size; i++)
967 s->dc_val_base[i] = 1024;
970 /* which mb is a intra block */
971 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
972 memset(s->mbintra_table, 1, mb_array_size);
974 /* init macroblock skip table */
975 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
976 // Note the + 1 is for a quicker mpeg4 slice_end detection
980 return AVERROR(ENOMEM);
984 * init common structure for both encoder and decoder.
985 * this assumes that some variables like width/height are already set
987 av_cold int ff_MPV_common_init(MpegEncContext *s)
990 int nb_slices = (HAVE_THREADS &&
991 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
992 s->avctx->thread_count : 1;
994 if (s->encoding && s->avctx->slices)
995 nb_slices = s->avctx->slices;
997 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
998 s->mb_height = (s->height + 31) / 32 * 2;
1000 s->mb_height = (s->height + 15) / 16;
1002 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1003 av_log(s->avctx, AV_LOG_ERROR,
1004 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1008 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1011 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1013 max_slices = MAX_THREADS;
1014 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1015 " reducing to %d\n", nb_slices, max_slices);
1016 nb_slices = max_slices;
1019 if ((s->width || s->height) &&
1020 av_image_check_size(s->width, s->height, 0, s->avctx))
1023 ff_dct_common_init(s);
1025 s->flags = s->avctx->flags;
1026 s->flags2 = s->avctx->flags2;
1028 if (s->width && s->height) {
1029 /* set chroma shifts */
1030 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1032 &s->chroma_y_shift);
1034 /* convert fourcc to upper case */
1035 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1037 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1039 s->avctx->coded_frame = &s->current_picture.f;
1042 if (s->msmpeg4_version) {
1043 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1044 2 * 2 * (MAX_LEVEL + 1) *
1045 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1047 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1050 64 * 32 * sizeof(int), fail);
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1052 64 * 32 * sizeof(int), fail);
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1054 64 * 32 * 2 * sizeof(uint16_t), fail);
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1056 64 * 32 * 2 * sizeof(uint16_t), fail);
1057 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1058 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1059 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1060 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1062 if (s->avctx->noise_reduction) {
1063 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1064 2 * 64 * sizeof(uint16_t), fail);
1069 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1070 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1071 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1072 avcodec_get_frame_defaults(&s->picture[i].f);
1074 memset(&s->next_picture, 0, sizeof(s->next_picture));
1075 memset(&s->last_picture, 0, sizeof(s->last_picture));
1076 memset(&s->current_picture, 0, sizeof(s->current_picture));
1077 avcodec_get_frame_defaults(&s->next_picture.f);
1078 avcodec_get_frame_defaults(&s->last_picture.f);
1079 avcodec_get_frame_defaults(&s->current_picture.f);
1081 if (s->width && s->height) {
1082 if (init_context_frame(s))
1085 s->parse_context.state = -1;
1088 s->context_initialized = 1;
1089 s->thread_context[0] = s;
1091 if (s->width && s->height) {
1092 if (nb_slices > 1) {
1093 for (i = 1; i < nb_slices; i++) {
1094 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1095 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1098 for (i = 0; i < nb_slices; i++) {
1099 if (init_duplicate_context(s->thread_context[i]) < 0)
1101 s->thread_context[i]->start_mb_y =
1102 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1103 s->thread_context[i]->end_mb_y =
1104 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1107 if (init_duplicate_context(s) < 0)
1110 s->end_mb_y = s->mb_height;
1112 s->slice_context_count = nb_slices;
1117 ff_MPV_common_end(s);
1122 * Frees and resets MpegEncContext fields depending on the resolution.
1123 * Is used during resolution changes to avoid a full reinitialization of the
1126 static int free_context_frame(MpegEncContext *s)
1130 av_freep(&s->mb_type);
1131 av_freep(&s->p_mv_table_base);
1132 av_freep(&s->b_forw_mv_table_base);
1133 av_freep(&s->b_back_mv_table_base);
1134 av_freep(&s->b_bidir_forw_mv_table_base);
1135 av_freep(&s->b_bidir_back_mv_table_base);
1136 av_freep(&s->b_direct_mv_table_base);
1137 s->p_mv_table = NULL;
1138 s->b_forw_mv_table = NULL;
1139 s->b_back_mv_table = NULL;
1140 s->b_bidir_forw_mv_table = NULL;
1141 s->b_bidir_back_mv_table = NULL;
1142 s->b_direct_mv_table = NULL;
1143 for (i = 0; i < 2; i++) {
1144 for (j = 0; j < 2; j++) {
1145 for (k = 0; k < 2; k++) {
1146 av_freep(&s->b_field_mv_table_base[i][j][k]);
1147 s->b_field_mv_table[i][j][k] = NULL;
1149 av_freep(&s->b_field_select_table[i][j]);
1150 av_freep(&s->p_field_mv_table_base[i][j]);
1151 s->p_field_mv_table[i][j] = NULL;
1153 av_freep(&s->p_field_select_table[i]);
1156 av_freep(&s->dc_val_base);
1157 av_freep(&s->coded_block_base);
1158 av_freep(&s->mbintra_table);
1159 av_freep(&s->cbp_table);
1160 av_freep(&s->pred_dir_table);
1162 av_freep(&s->mbskip_table);
1164 av_freep(&s->er.error_status_table);
1165 av_freep(&s->er.er_temp_buffer);
1166 av_freep(&s->mb_index2xy);
1167 av_freep(&s->lambda_table);
1168 av_freep(&s->cplx_tab);
1169 av_freep(&s->bits_tab);
1171 s->linesize = s->uvlinesize = 0;
1176 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1180 if (s->slice_context_count > 1) {
1181 for (i = 0; i < s->slice_context_count; i++) {
1182 free_duplicate_context(s->thread_context[i]);
1184 for (i = 1; i < s->slice_context_count; i++) {
1185 av_freep(&s->thread_context[i]);
1188 free_duplicate_context(s);
1190 if ((err = free_context_frame(s)) < 0)
1194 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1195 s->picture[i].needs_realloc = 1;
1198 s->last_picture_ptr =
1199 s->next_picture_ptr =
1200 s->current_picture_ptr = NULL;
1203 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1204 s->mb_height = (s->height + 31) / 32 * 2;
1206 s->mb_height = (s->height + 15) / 16;
1208 if ((s->width || s->height) &&
1209 av_image_check_size(s->width, s->height, 0, s->avctx))
1210 return AVERROR_INVALIDDATA;
1212 if ((err = init_context_frame(s)))
1215 s->thread_context[0] = s;
1217 if (s->width && s->height) {
1218 int nb_slices = s->slice_context_count;
1219 if (nb_slices > 1) {
1220 for (i = 1; i < nb_slices; i++) {
1221 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1222 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1225 for (i = 0; i < nb_slices; i++) {
1226 if (init_duplicate_context(s->thread_context[i]) < 0)
1228 s->thread_context[i]->start_mb_y =
1229 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1230 s->thread_context[i]->end_mb_y =
1231 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1234 if (init_duplicate_context(s) < 0)
1237 s->end_mb_y = s->mb_height;
1239 s->slice_context_count = nb_slices;
1244 ff_MPV_common_end(s);
1248 /* init common structure for both encoder and decoder */
1249 void ff_MPV_common_end(MpegEncContext *s)
1253 if (s->slice_context_count > 1) {
1254 for (i = 0; i < s->slice_context_count; i++) {
1255 free_duplicate_context(s->thread_context[i]);
1257 for (i = 1; i < s->slice_context_count; i++) {
1258 av_freep(&s->thread_context[i]);
1260 s->slice_context_count = 1;
1261 } else free_duplicate_context(s);
1263 av_freep(&s->parse_context.buffer);
1264 s->parse_context.buffer_size = 0;
1266 av_freep(&s->bitstream_buffer);
1267 s->allocated_bitstream_buffer_size = 0;
1269 av_freep(&s->avctx->stats_out);
1270 av_freep(&s->ac_stats);
1272 av_freep(&s->q_intra_matrix);
1273 av_freep(&s->q_inter_matrix);
1274 av_freep(&s->q_intra_matrix16);
1275 av_freep(&s->q_inter_matrix16);
1276 av_freep(&s->input_picture);
1277 av_freep(&s->reordered_input_picture);
1278 av_freep(&s->dct_offset);
1281 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1282 free_picture_tables(&s->picture[i]);
1283 ff_mpeg_unref_picture(s, &s->picture[i]);
1286 av_freep(&s->picture);
1287 free_picture_tables(&s->last_picture);
1288 ff_mpeg_unref_picture(s, &s->last_picture);
1289 free_picture_tables(&s->current_picture);
1290 ff_mpeg_unref_picture(s, &s->current_picture);
1291 free_picture_tables(&s->next_picture);
1292 ff_mpeg_unref_picture(s, &s->next_picture);
1293 free_picture_tables(&s->new_picture);
1294 ff_mpeg_unref_picture(s, &s->new_picture);
1296 free_context_frame(s);
1298 s->context_initialized = 0;
1299 s->last_picture_ptr =
1300 s->next_picture_ptr =
1301 s->current_picture_ptr = NULL;
1302 s->linesize = s->uvlinesize = 0;
1305 void ff_init_rl(RLTable *rl,
1306 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1308 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1309 uint8_t index_run[MAX_RUN + 1];
1310 int last, run, level, start, end, i;
1312 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1313 if (static_store && rl->max_level[0])
1316 /* compute max_level[], max_run[] and index_run[] */
1317 for (last = 0; last < 2; last++) {
1326 memset(max_level, 0, MAX_RUN + 1);
1327 memset(max_run, 0, MAX_LEVEL + 1);
1328 memset(index_run, rl->n, MAX_RUN + 1);
1329 for (i = start; i < end; i++) {
1330 run = rl->table_run[i];
1331 level = rl->table_level[i];
1332 if (index_run[run] == rl->n)
1334 if (level > max_level[run])
1335 max_level[run] = level;
1336 if (run > max_run[level])
1337 max_run[level] = run;
1340 rl->max_level[last] = static_store[last];
1342 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1343 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1345 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1347 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1348 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1350 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1352 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1353 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1357 void ff_init_vlc_rl(RLTable *rl)
1361 for (q = 0; q < 32; q++) {
1363 int qadd = (q - 1) | 1;
1369 for (i = 0; i < rl->vlc.table_size; i++) {
1370 int code = rl->vlc.table[i][0];
1371 int len = rl->vlc.table[i][1];
1374 if (len == 0) { // illegal code
1377 } else if (len < 0) { // more bits needed
1381 if (code == rl->n) { // esc
1385 run = rl->table_run[code] + 1;
1386 level = rl->table_level[code] * qmul + qadd;
1387 if (code >= rl->last) run += 192;
1390 rl->rl_vlc[q][i].len = len;
1391 rl->rl_vlc[q][i].level = level;
1392 rl->rl_vlc[q][i].run = run;
1397 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1401 /* release non reference frames */
1402 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1403 if (!s->picture[i].reference &&
1404 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1405 ff_mpeg_unref_picture(s, &s->picture[i]);
1410 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1412 if (pic->f.data[0] == NULL)
1414 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1419 static int find_unused_picture(MpegEncContext *s, int shared)
1424 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1425 if (s->picture[i].f.data[0] == NULL)
1429 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1430 if (pic_is_unused(s, &s->picture[i]))
1435 return AVERROR_INVALIDDATA;
1438 int ff_find_unused_picture(MpegEncContext *s, int shared)
1440 int ret = find_unused_picture(s, shared);
1442 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1443 if (s->picture[ret].needs_realloc) {
1444 s->picture[ret].needs_realloc = 0;
1445 free_picture_tables(&s->picture[ret]);
1446 ff_mpeg_unref_picture(s, &s->picture[ret]);
1447 avcodec_get_frame_defaults(&s->picture[ret].f);
1453 static void update_noise_reduction(MpegEncContext *s)
1457 for (intra = 0; intra < 2; intra++) {
1458 if (s->dct_count[intra] > (1 << 16)) {
1459 for (i = 0; i < 64; i++) {
1460 s->dct_error_sum[intra][i] >>= 1;
1462 s->dct_count[intra] >>= 1;
1465 for (i = 0; i < 64; i++) {
1466 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1467 s->dct_count[intra] +
1468 s->dct_error_sum[intra][i] / 2) /
1469 (s->dct_error_sum[intra][i] + 1);
1475 * generic function for encode/decode called after coding/decoding
1476 * the header and before a frame is coded/decoded.
1478 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1484 /* mark & release old frames */
1485 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1486 s->last_picture_ptr != s->next_picture_ptr &&
1487 s->last_picture_ptr->f.data[0]) {
1488 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1491 /* release forgotten pictures */
1492 /* if (mpeg124/h263) */
1494 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1495 if (&s->picture[i] != s->last_picture_ptr &&
1496 &s->picture[i] != s->next_picture_ptr &&
1497 s->picture[i].reference && !s->picture[i].needs_realloc) {
1498 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1499 av_log(avctx, AV_LOG_ERROR,
1500 "releasing zombie picture\n");
1501 ff_mpeg_unref_picture(s, &s->picture[i]);
1507 ff_release_unused_pictures(s, 1);
1509 if (s->current_picture_ptr &&
1510 s->current_picture_ptr->f.data[0] == NULL) {
1511 // we already have a unused image
1512 // (maybe it was set before reading the header)
1513 pic = s->current_picture_ptr;
1515 i = ff_find_unused_picture(s, 0);
1517 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1520 pic = &s->picture[i];
1524 if (!s->droppable) {
1525 if (s->pict_type != AV_PICTURE_TYPE_B)
1529 pic->f.coded_picture_number = s->coded_picture_number++;
1531 if (ff_alloc_picture(s, pic, 0) < 0)
1534 s->current_picture_ptr = pic;
1535 // FIXME use only the vars from current_pic
1536 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1537 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1538 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1539 if (s->picture_structure != PICT_FRAME)
1540 s->current_picture_ptr->f.top_field_first =
1541 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1543 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1544 !s->progressive_sequence;
1545 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1548 s->current_picture_ptr->f.pict_type = s->pict_type;
1549 // if (s->flags && CODEC_FLAG_QSCALE)
1550 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1551 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1553 ff_mpeg_unref_picture(s, &s->current_picture);
1554 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1555 s->current_picture_ptr)) < 0)
1558 if (s->pict_type != AV_PICTURE_TYPE_B) {
1559 s->last_picture_ptr = s->next_picture_ptr;
1561 s->next_picture_ptr = s->current_picture_ptr;
1563 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1564 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1565 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1566 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1567 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1568 s->pict_type, s->droppable);
1570 if ((s->last_picture_ptr == NULL ||
1571 s->last_picture_ptr->f.data[0] == NULL) &&
1572 (s->pict_type != AV_PICTURE_TYPE_I ||
1573 s->picture_structure != PICT_FRAME)) {
1574 int h_chroma_shift, v_chroma_shift;
1575 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1576 &h_chroma_shift, &v_chroma_shift);
1577 if (s->pict_type != AV_PICTURE_TYPE_I)
1578 av_log(avctx, AV_LOG_ERROR,
1579 "warning: first frame is no keyframe\n");
1580 else if (s->picture_structure != PICT_FRAME)
1581 av_log(avctx, AV_LOG_INFO,
1582 "allocate dummy last picture for field based first keyframe\n");
1584 /* Allocate a dummy frame */
1585 i = ff_find_unused_picture(s, 0);
1587 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1590 s->last_picture_ptr = &s->picture[i];
1591 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1592 s->last_picture_ptr = NULL;
1596 memset(s->last_picture_ptr->f.data[0], 0,
1597 avctx->height * s->last_picture_ptr->f.linesize[0]);
1598 memset(s->last_picture_ptr->f.data[1], 0x80,
1599 (avctx->height >> v_chroma_shift) *
1600 s->last_picture_ptr->f.linesize[1]);
1601 memset(s->last_picture_ptr->f.data[2], 0x80,
1602 (avctx->height >> v_chroma_shift) *
1603 s->last_picture_ptr->f.linesize[2]);
1605 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1606 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1608 if ((s->next_picture_ptr == NULL ||
1609 s->next_picture_ptr->f.data[0] == NULL) &&
1610 s->pict_type == AV_PICTURE_TYPE_B) {
1611 /* Allocate a dummy frame */
1612 i = ff_find_unused_picture(s, 0);
1614 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1617 s->next_picture_ptr = &s->picture[i];
1618 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1619 s->next_picture_ptr = NULL;
1622 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1623 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1626 if (s->last_picture_ptr) {
1627 ff_mpeg_unref_picture(s, &s->last_picture);
1628 if (s->last_picture_ptr->f.data[0] &&
1629 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1630 s->last_picture_ptr)) < 0)
1633 if (s->next_picture_ptr) {
1634 ff_mpeg_unref_picture(s, &s->next_picture);
1635 if (s->next_picture_ptr->f.data[0] &&
1636 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1637 s->next_picture_ptr)) < 0)
1641 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1642 s->last_picture_ptr->f.data[0]));
1644 if (s->picture_structure!= PICT_FRAME) {
1646 for (i = 0; i < 4; i++) {
1647 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1648 s->current_picture.f.data[i] +=
1649 s->current_picture.f.linesize[i];
1651 s->current_picture.f.linesize[i] *= 2;
1652 s->last_picture.f.linesize[i] *= 2;
1653 s->next_picture.f.linesize[i] *= 2;
1657 s->err_recognition = avctx->err_recognition;
1659 /* set dequantizer, we can't do it during init as
1660 * it might change for mpeg4 and we can't do it in the header
1661 * decode as init is not called for mpeg4 there yet */
1662 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1663 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1664 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1665 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1666 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1667 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1669 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1670 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1673 if (s->dct_error_sum) {
1674 assert(s->avctx->noise_reduction && s->encoding);
1675 update_noise_reduction(s);
1678 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1679 return ff_xvmc_field_start(s, avctx);
1684 /* generic function for encode/decode called after a
1685 * frame has been coded/decoded. */
1686 void ff_MPV_frame_end(MpegEncContext *s)
1689 /* redraw edges for the frame if decoding didn't complete */
1690 // just to make sure that all data is rendered.
1691 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1692 ff_xvmc_field_end(s);
1693 } else if ((s->er.error_count || s->encoding) &&
1694 !s->avctx->hwaccel &&
1695 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1696 s->unrestricted_mv &&
1697 s->current_picture.reference &&
1699 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1700 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1701 int hshift = desc->log2_chroma_w;
1702 int vshift = desc->log2_chroma_h;
1703 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1704 s->h_edge_pos, s->v_edge_pos,
1705 EDGE_WIDTH, EDGE_WIDTH,
1706 EDGE_TOP | EDGE_BOTTOM);
1707 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1708 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1709 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1710 EDGE_TOP | EDGE_BOTTOM);
1711 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1712 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1713 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1714 EDGE_TOP | EDGE_BOTTOM);
1719 s->last_pict_type = s->pict_type;
1720 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1721 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1722 s->last_non_b_pict_type = s->pict_type;
1725 /* copy back current_picture variables */
1726 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1727 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1728 s->picture[i] = s->current_picture;
1732 assert(i < MAX_PICTURE_COUNT);
1736 /* release non-reference frames */
1737 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1738 if (!s->picture[i].reference)
1739 ff_mpeg_unref_picture(s, &s->picture[i]);
1742 // clear copies, to avoid confusion
1744 memset(&s->last_picture, 0, sizeof(Picture));
1745 memset(&s->next_picture, 0, sizeof(Picture));
1746 memset(&s->current_picture, 0, sizeof(Picture));
1748 s->avctx->coded_frame = &s->current_picture_ptr->f;
1750 if (s->current_picture.reference)
1751 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1755 * Print debugging info for the given picture.
1757 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1760 if (s->avctx->hwaccel || !p || !p->mb_type)
1764 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1767 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1768 switch (pict->pict_type) {
1769 case AV_PICTURE_TYPE_I:
1770 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1772 case AV_PICTURE_TYPE_P:
1773 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1775 case AV_PICTURE_TYPE_B:
1776 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1778 case AV_PICTURE_TYPE_S:
1779 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1781 case AV_PICTURE_TYPE_SI:
1782 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1784 case AV_PICTURE_TYPE_SP:
1785 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1788 for (y = 0; y < s->mb_height; y++) {
1789 for (x = 0; x < s->mb_width; x++) {
1790 if (s->avctx->debug & FF_DEBUG_SKIP) {
1791 int count = s->mbskip_table[x + y * s->mb_stride];
1794 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1796 if (s->avctx->debug & FF_DEBUG_QP) {
1797 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1798 p->qscale_table[x + y * s->mb_stride]);
1800 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1801 int mb_type = p->mb_type[x + y * s->mb_stride];
1802 // Type & MV direction
1803 if (IS_PCM(mb_type))
1804 av_log(s->avctx, AV_LOG_DEBUG, "P");
1805 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1806 av_log(s->avctx, AV_LOG_DEBUG, "A");
1807 else if (IS_INTRA4x4(mb_type))
1808 av_log(s->avctx, AV_LOG_DEBUG, "i");
1809 else if (IS_INTRA16x16(mb_type))
1810 av_log(s->avctx, AV_LOG_DEBUG, "I");
1811 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1812 av_log(s->avctx, AV_LOG_DEBUG, "d");
1813 else if (IS_DIRECT(mb_type))
1814 av_log(s->avctx, AV_LOG_DEBUG, "D");
1815 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1816 av_log(s->avctx, AV_LOG_DEBUG, "g");
1817 else if (IS_GMC(mb_type))
1818 av_log(s->avctx, AV_LOG_DEBUG, "G");
1819 else if (IS_SKIP(mb_type))
1820 av_log(s->avctx, AV_LOG_DEBUG, "S");
1821 else if (!USES_LIST(mb_type, 1))
1822 av_log(s->avctx, AV_LOG_DEBUG, ">");
1823 else if (!USES_LIST(mb_type, 0))
1824 av_log(s->avctx, AV_LOG_DEBUG, "<");
1826 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1827 av_log(s->avctx, AV_LOG_DEBUG, "X");
1831 if (IS_8X8(mb_type))
1832 av_log(s->avctx, AV_LOG_DEBUG, "+");
1833 else if (IS_16X8(mb_type))
1834 av_log(s->avctx, AV_LOG_DEBUG, "-");
1835 else if (IS_8X16(mb_type))
1836 av_log(s->avctx, AV_LOG_DEBUG, "|");
1837 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1838 av_log(s->avctx, AV_LOG_DEBUG, " ");
1840 av_log(s->avctx, AV_LOG_DEBUG, "?");
1843 if (IS_INTERLACED(mb_type))
1844 av_log(s->avctx, AV_LOG_DEBUG, "=");
1846 av_log(s->avctx, AV_LOG_DEBUG, " ");
1849 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1855 * find the lowest MB row referenced in the MVs
1857 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1859 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1860 int my, off, i, mvs;
1862 if (s->picture_structure != PICT_FRAME || s->mcsel)
1865 switch (s->mv_type) {
1879 for (i = 0; i < mvs; i++) {
1880 my = s->mv[dir][i][1]<<qpel_shift;
1881 my_max = FFMAX(my_max, my);
1882 my_min = FFMIN(my_min, my);
1885 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1887 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1889 return s->mb_height-1;
1892 /* put block[] to dest[] */
1893 static inline void put_dct(MpegEncContext *s,
1894 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1896 s->dct_unquantize_intra(s, block, i, qscale);
1897 s->dsp.idct_put (dest, line_size, block);
1900 /* add block[] to dest[] */
1901 static inline void add_dct(MpegEncContext *s,
1902 int16_t *block, int i, uint8_t *dest, int line_size)
1904 if (s->block_last_index[i] >= 0) {
1905 s->dsp.idct_add (dest, line_size, block);
1909 static inline void add_dequant_dct(MpegEncContext *s,
1910 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1912 if (s->block_last_index[i] >= 0) {
1913 s->dct_unquantize_inter(s, block, i, qscale);
1915 s->dsp.idct_add (dest, line_size, block);
1920 * Clean dc, ac, coded_block for the current non-intra MB.
1922 void ff_clean_intra_table_entries(MpegEncContext *s)
1924 int wrap = s->b8_stride;
1925 int xy = s->block_index[0];
1928 s->dc_val[0][xy + 1 ] =
1929 s->dc_val[0][xy + wrap] =
1930 s->dc_val[0][xy + 1 + wrap] = 1024;
1932 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1933 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1934 if (s->msmpeg4_version>=3) {
1935 s->coded_block[xy ] =
1936 s->coded_block[xy + 1 ] =
1937 s->coded_block[xy + wrap] =
1938 s->coded_block[xy + 1 + wrap] = 0;
1941 wrap = s->mb_stride;
1942 xy = s->mb_x + s->mb_y * wrap;
1944 s->dc_val[2][xy] = 1024;
1946 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1947 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1949 s->mbintra_table[xy]= 0;
1952 /* generic function called after a macroblock has been parsed by the
1953 decoder or after it has been encoded by the encoder.
1955 Important variables used:
1956 s->mb_intra : true if intra macroblock
1957 s->mv_dir : motion vector direction
1958 s->mv_type : motion vector type
1959 s->mv : motion vector
1960 s->interlaced_dct : true if interlaced dct used (mpeg2)
1962 static av_always_inline
1963 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1966 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1967 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1968 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1972 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1973 /* print DCT coefficients */
1975 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1977 for(j=0; j<64; j++){
1978 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
1980 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1984 s->current_picture.qscale_table[mb_xy] = s->qscale;
1986 /* update DC predictors for P macroblocks */
1988 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1989 if(s->mbintra_table[mb_xy])
1990 ff_clean_intra_table_entries(s);
1994 s->last_dc[2] = 128 << s->intra_dc_precision;
1997 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1998 s->mbintra_table[mb_xy]=1;
2000 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2001 uint8_t *dest_y, *dest_cb, *dest_cr;
2002 int dct_linesize, dct_offset;
2003 op_pixels_func (*op_pix)[4];
2004 qpel_mc_func (*op_qpix)[16];
2005 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2006 const int uvlinesize = s->current_picture.f.linesize[1];
2007 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2008 const int block_size = 8;
2010 /* avoid copy if macroblock skipped in last frame too */
2011 /* skip only during decoding as we might trash the buffers during encoding a bit */
2013 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2015 if (s->mb_skipped) {
2017 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2019 } else if(!s->current_picture.reference) {
2022 *mbskip_ptr = 0; /* not skipped */
2026 dct_linesize = linesize << s->interlaced_dct;
2027 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2031 dest_cb= s->dest[1];
2032 dest_cr= s->dest[2];
2034 dest_y = s->b_scratchpad;
2035 dest_cb= s->b_scratchpad+16*linesize;
2036 dest_cr= s->b_scratchpad+32*linesize;
2040 /* motion handling */
2041 /* decoding or more than one mb_type (MC was already done otherwise) */
2044 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2045 if (s->mv_dir & MV_DIR_FORWARD) {
2046 ff_thread_await_progress(&s->last_picture_ptr->tf,
2047 ff_MPV_lowest_referenced_row(s, 0),
2050 if (s->mv_dir & MV_DIR_BACKWARD) {
2051 ff_thread_await_progress(&s->next_picture_ptr->tf,
2052 ff_MPV_lowest_referenced_row(s, 1),
2057 op_qpix= s->me.qpel_put;
2058 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2059 op_pix = s->dsp.put_pixels_tab;
2061 op_pix = s->dsp.put_no_rnd_pixels_tab;
2063 if (s->mv_dir & MV_DIR_FORWARD) {
2064 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2065 op_pix = s->dsp.avg_pixels_tab;
2066 op_qpix= s->me.qpel_avg;
2068 if (s->mv_dir & MV_DIR_BACKWARD) {
2069 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2073 /* skip dequant / idct if we are really late ;) */
2074 if(s->avctx->skip_idct){
2075 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2076 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2077 || s->avctx->skip_idct >= AVDISCARD_ALL)
2081 /* add dct residue */
2082 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2083 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2084 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2085 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2086 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2087 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2089 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2090 if (s->chroma_y_shift){
2091 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2092 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2096 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2097 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2098 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2099 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2102 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2103 add_dct(s, block[0], 0, dest_y , dct_linesize);
2104 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2105 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2106 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2108 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2109 if(s->chroma_y_shift){//Chroma420
2110 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2111 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2114 dct_linesize = uvlinesize << s->interlaced_dct;
2115 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2117 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2118 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2119 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2120 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2121 if(!s->chroma_x_shift){//Chroma444
2122 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2123 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2124 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2125 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2130 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2131 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2134 /* dct only in intra block */
2135 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2136 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2137 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2138 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2139 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2141 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2142 if(s->chroma_y_shift){
2143 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2144 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2148 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2149 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2150 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2151 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2155 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2156 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2157 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2158 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2160 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2161 if(s->chroma_y_shift){
2162 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2163 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2166 dct_linesize = uvlinesize << s->interlaced_dct;
2167 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2169 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2170 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2171 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2172 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2173 if(!s->chroma_x_shift){//Chroma444
2174 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2175 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2176 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2177 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2185 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2186 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2187 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2192 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2194 if(s->out_format == FMT_MPEG1) {
2195 MPV_decode_mb_internal(s, block, 1);
2198 MPV_decode_mb_internal(s, block, 0);
2202 * @param h is the normal height, this will be reduced automatically if needed for the last row
2204 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2205 Picture *last, int y, int h, int picture_structure,
2206 int first_field, int draw_edges, int low_delay,
2207 int v_edge_pos, int h_edge_pos)
2209 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2210 int hshift = desc->log2_chroma_w;
2211 int vshift = desc->log2_chroma_h;
2212 const int field_pic = picture_structure != PICT_FRAME;
2218 if (!avctx->hwaccel &&
2219 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2222 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2223 int *linesize = cur->f.linesize;
2224 int sides = 0, edge_h;
2225 if (y==0) sides |= EDGE_TOP;
2226 if (y + h >= v_edge_pos)
2227 sides |= EDGE_BOTTOM;
2229 edge_h= FFMIN(h, v_edge_pos - y);
2231 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2232 linesize[0], h_edge_pos, edge_h,
2233 EDGE_WIDTH, EDGE_WIDTH, sides);
2234 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2235 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2236 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2237 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2238 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2239 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2242 h = FFMIN(h, avctx->height - y);
2244 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2246 if (avctx->draw_horiz_band) {
2248 int offset[AV_NUM_DATA_POINTERS];
2251 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2252 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2259 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2260 picture_structure == PICT_FRAME &&
2261 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2262 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2265 offset[0]= y * src->linesize[0];
2267 offset[2]= (y >> vshift) * src->linesize[1];
2268 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2274 avctx->draw_horiz_band(avctx, src, offset,
2275 y, picture_structure, h);
2279 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2281 int draw_edges = s->unrestricted_mv && !s->intra_only;
2282 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2283 &s->last_picture, y, h, s->picture_structure,
2284 s->first_field, draw_edges, s->low_delay,
2285 s->v_edge_pos, s->h_edge_pos);
2288 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2289 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2290 const int uvlinesize = s->current_picture.f.linesize[1];
2291 const int mb_size= 4;
2293 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2294 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2295 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2296 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2297 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2298 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2299 //block_index is not used by mpeg2, so it is not affected by chroma_format
2301 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2302 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2303 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2305 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2307 if(s->picture_structure==PICT_FRAME){
2308 s->dest[0] += s->mb_y * linesize << mb_size;
2309 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2310 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2312 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2313 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2314 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2315 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2321 * Permute an 8x8 block.
2322 * @param block the block which will be permuted according to the given permutation vector
2323 * @param permutation the permutation vector
2324 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2325 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2326 * (inverse) permutated to scantable order!
2328 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2334 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2336 for(i=0; i<=last; i++){
2337 const int j= scantable[i];
2342 for(i=0; i<=last; i++){
2343 const int j= scantable[i];
2344 const int perm_j= permutation[j];
2345 block[perm_j]= temp[j];
2349 void ff_mpeg_flush(AVCodecContext *avctx){
2351 MpegEncContext *s = avctx->priv_data;
2353 if(s==NULL || s->picture==NULL)
2356 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2357 ff_mpeg_unref_picture(s, &s->picture[i]);
2358 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2360 s->mb_x= s->mb_y= 0;
2362 s->parse_context.state= -1;
2363 s->parse_context.frame_start_found= 0;
2364 s->parse_context.overread= 0;
2365 s->parse_context.overread_index= 0;
2366 s->parse_context.index= 0;
2367 s->parse_context.last_index= 0;
2368 s->bitstream_buffer_size=0;
2372 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2373 int16_t *block, int n, int qscale)
2375 int i, level, nCoeffs;
2376 const uint16_t *quant_matrix;
2378 nCoeffs= s->block_last_index[n];
2381 block[0] = block[0] * s->y_dc_scale;
2383 block[0] = block[0] * s->c_dc_scale;
2384 /* XXX: only mpeg1 */
2385 quant_matrix = s->intra_matrix;
2386 for(i=1;i<=nCoeffs;i++) {
2387 int j= s->intra_scantable.permutated[i];
2392 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2393 level = (level - 1) | 1;
2396 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2397 level = (level - 1) | 1;
2404 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2405 int16_t *block, int n, int qscale)
2407 int i, level, nCoeffs;
2408 const uint16_t *quant_matrix;
2410 nCoeffs= s->block_last_index[n];
2412 quant_matrix = s->inter_matrix;
2413 for(i=0; i<=nCoeffs; i++) {
2414 int j= s->intra_scantable.permutated[i];
2419 level = (((level << 1) + 1) * qscale *
2420 ((int) (quant_matrix[j]))) >> 4;
2421 level = (level - 1) | 1;
2424 level = (((level << 1) + 1) * qscale *
2425 ((int) (quant_matrix[j]))) >> 4;
2426 level = (level - 1) | 1;
2433 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2434 int16_t *block, int n, int qscale)
2436 int i, level, nCoeffs;
2437 const uint16_t *quant_matrix;
2439 if(s->alternate_scan) nCoeffs= 63;
2440 else nCoeffs= s->block_last_index[n];
2443 block[0] = block[0] * s->y_dc_scale;
2445 block[0] = block[0] * s->c_dc_scale;
2446 quant_matrix = s->intra_matrix;
2447 for(i=1;i<=nCoeffs;i++) {
2448 int j= s->intra_scantable.permutated[i];
2453 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2456 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2463 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2464 int16_t *block, int n, int qscale)
2466 int i, level, nCoeffs;
2467 const uint16_t *quant_matrix;
2470 if(s->alternate_scan) nCoeffs= 63;
2471 else nCoeffs= s->block_last_index[n];
2474 block[0] = block[0] * s->y_dc_scale;
2476 block[0] = block[0] * s->c_dc_scale;
2477 quant_matrix = s->intra_matrix;
2478 for(i=1;i<=nCoeffs;i++) {
2479 int j= s->intra_scantable.permutated[i];
2484 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2487 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2496 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2497 int16_t *block, int n, int qscale)
2499 int i, level, nCoeffs;
2500 const uint16_t *quant_matrix;
2503 if(s->alternate_scan) nCoeffs= 63;
2504 else nCoeffs= s->block_last_index[n];
2506 quant_matrix = s->inter_matrix;
2507 for(i=0; i<=nCoeffs; i++) {
2508 int j= s->intra_scantable.permutated[i];
2513 level = (((level << 1) + 1) * qscale *
2514 ((int) (quant_matrix[j]))) >> 4;
2517 level = (((level << 1) + 1) * qscale *
2518 ((int) (quant_matrix[j]))) >> 4;
2527 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2528 int16_t *block, int n, int qscale)
2530 int i, level, qmul, qadd;
2533 assert(s->block_last_index[n]>=0);
2539 block[0] = block[0] * s->y_dc_scale;
2541 block[0] = block[0] * s->c_dc_scale;
2542 qadd = (qscale - 1) | 1;
2549 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2551 for(i=1; i<=nCoeffs; i++) {
2555 level = level * qmul - qadd;
2557 level = level * qmul + qadd;
2564 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2565 int16_t *block, int n, int qscale)
2567 int i, level, qmul, qadd;
2570 assert(s->block_last_index[n]>=0);
2572 qadd = (qscale - 1) | 1;
2575 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2577 for(i=0; i<=nCoeffs; i++) {
2581 level = level * qmul - qadd;
2583 level = level * qmul + qadd;
2591 * set qscale and update qscale dependent variables.
2593 void ff_set_qscale(MpegEncContext * s, int qscale)
2597 else if (qscale > 31)
2601 s->chroma_qscale= s->chroma_qscale_table[qscale];
2603 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2604 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2607 void ff_MPV_report_decode_progress(MpegEncContext *s)
2609 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2610 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2613 #if CONFIG_ERROR_RESILIENCE
2614 void ff_mpeg_er_frame_start(MpegEncContext *s)
2616 ERContext *er = &s->er;
2618 er->cur_pic = s->current_picture_ptr;
2619 er->last_pic = s->last_picture_ptr;
2620 er->next_pic = s->next_picture_ptr;
2622 er->pp_time = s->pp_time;
2623 er->pb_time = s->pb_time;
2624 er->quarter_sample = s->quarter_sample;
2625 er->partitioned_frame = s->partitioned_frame;
2627 ff_er_frame_start(er);
2629 #endif /* CONFIG_ERROR_RESILIENCE */