2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
37 #include "mpegvideo.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 int16_t *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 ff_mpeg1_dc_scale_table,
125 mpeg2_dc_scale_table1,
126 mpeg2_dc_scale_table2,
127 mpeg2_dc_scale_table3,
130 const enum AVPixelFormat ff_pixfmt_list_420[] = {
135 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
137 int mb_x, int mb_y, int mb_intra, int mb_skipped)
139 MpegEncContext *s = opaque;
142 s->mv_type = mv_type;
143 s->mb_intra = mb_intra;
144 s->mb_skipped = mb_skipped;
147 memcpy(s->mv, mv, sizeof(*mv));
149 ff_init_block_index(s);
150 ff_update_block_index(s);
152 s->dsp.clear_blocks(s->block[0]);
154 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
155 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
156 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
159 ff_MPV_decode_mb(s, s->block);
162 /* init common dct for both encoder and decoder */
163 av_cold int ff_dct_common_init(MpegEncContext *s)
165 ff_dsputil_init(&s->dsp, s->avctx);
166 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
167 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
169 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
170 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
171 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
172 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
173 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
174 if (s->flags & CODEC_FLAG_BITEXACT)
175 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
176 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
179 ff_MPV_common_init_x86(s);
181 ff_MPV_common_init_axp(s);
183 ff_MPV_common_init_arm(s);
185 ff_MPV_common_init_bfin(s);
187 ff_MPV_common_init_ppc(s);
190 /* load & permutate scantables
191 * note: only wmv uses different ones
193 if (s->alternate_scan) {
194 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
195 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
197 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
198 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
200 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
201 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
206 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
208 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
210 // edge emu needs blocksize + filter length - 1
211 // (= 17x17 for halfpel / 21x21 for h264)
212 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
213 // at uvlinesize. It supports only YUV420 so 24x24 is enough
214 // linesize * interlaced * MBsize
215 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
218 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
220 s->me.temp = s->me.scratchpad;
221 s->rd_scratchpad = s->me.scratchpad;
222 s->b_scratchpad = s->me.scratchpad;
223 s->obmc_scratchpad = s->me.scratchpad + 16;
227 av_freep(&s->edge_emu_buffer);
228 return AVERROR(ENOMEM);
232 * Allocate a frame buffer
234 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
239 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
240 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
241 s->codec_id != AV_CODEC_ID_MSS2)
242 r = ff_thread_get_buffer(s->avctx, &pic->tf,
243 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
245 pic->f.width = s->avctx->width;
246 pic->f.height = s->avctx->height;
247 pic->f.format = s->avctx->pix_fmt;
248 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
251 if (r < 0 || !pic->f.data[0]) {
252 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
257 if (s->avctx->hwaccel) {
258 assert(!pic->hwaccel_picture_private);
259 if (s->avctx->hwaccel->priv_data_size) {
260 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
261 if (!pic->hwaccel_priv_buf) {
262 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
265 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
269 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
270 s->uvlinesize != pic->f.linesize[1])) {
271 av_log(s->avctx, AV_LOG_ERROR,
272 "get_buffer() failed (stride changed)\n");
273 ff_mpeg_unref_picture(s, pic);
277 if (pic->f.linesize[1] != pic->f.linesize[2]) {
278 av_log(s->avctx, AV_LOG_ERROR,
279 "get_buffer() failed (uv stride mismatch)\n");
280 ff_mpeg_unref_picture(s, pic);
284 if (!s->edge_emu_buffer &&
285 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
286 av_log(s->avctx, AV_LOG_ERROR,
287 "get_buffer() failed to allocate context scratch buffers.\n");
288 ff_mpeg_unref_picture(s, pic);
295 static void free_picture_tables(Picture *pic)
299 av_buffer_unref(&pic->mb_var_buf);
300 av_buffer_unref(&pic->mc_mb_var_buf);
301 av_buffer_unref(&pic->mb_mean_buf);
302 av_buffer_unref(&pic->mbskip_table_buf);
303 av_buffer_unref(&pic->qscale_table_buf);
304 av_buffer_unref(&pic->mb_type_buf);
306 for (i = 0; i < 2; i++) {
307 av_buffer_unref(&pic->motion_val_buf[i]);
308 av_buffer_unref(&pic->ref_index_buf[i]);
312 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
314 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
315 const int mb_array_size = s->mb_stride * s->mb_height;
316 const int b8_array_size = s->b8_stride * s->mb_height * 2;
320 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
321 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
322 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
324 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
325 return AVERROR(ENOMEM);
328 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
329 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
330 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
331 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
332 return AVERROR(ENOMEM);
335 if (s->out_format == FMT_H263 || s->encoding ||
336 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
337 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
338 int ref_index_size = 4 * mb_array_size;
340 for (i = 0; mv_size && i < 2; i++) {
341 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
342 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
343 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
344 return AVERROR(ENOMEM);
351 static int make_tables_writable(Picture *pic)
354 #define MAKE_WRITABLE(table) \
357 (ret = av_buffer_make_writable(&pic->table)) < 0)\
361 MAKE_WRITABLE(mb_var_buf);
362 MAKE_WRITABLE(mc_mb_var_buf);
363 MAKE_WRITABLE(mb_mean_buf);
364 MAKE_WRITABLE(mbskip_table_buf);
365 MAKE_WRITABLE(qscale_table_buf);
366 MAKE_WRITABLE(mb_type_buf);
368 for (i = 0; i < 2; i++) {
369 MAKE_WRITABLE(motion_val_buf[i]);
370 MAKE_WRITABLE(ref_index_buf[i]);
377 * Allocate a Picture.
378 * The pixels are allocated/set by calling get_buffer() if shared = 0
380 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
385 assert(pic->f.data[0]);
388 assert(!pic->f.data[0]);
390 if (alloc_frame_buffer(s, pic) < 0)
393 s->linesize = pic->f.linesize[0];
394 s->uvlinesize = pic->f.linesize[1];
397 if (!pic->qscale_table_buf)
398 ret = alloc_picture_tables(s, pic);
400 ret = make_tables_writable(pic);
405 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
406 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
407 pic->mb_mean = pic->mb_mean_buf->data;
410 pic->mbskip_table = pic->mbskip_table_buf->data;
411 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
412 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
414 if (pic->motion_val_buf[0]) {
415 for (i = 0; i < 2; i++) {
416 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
417 pic->ref_index[i] = pic->ref_index_buf[i]->data;
423 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
424 ff_mpeg_unref_picture(s, pic);
425 free_picture_tables(pic);
426 return AVERROR(ENOMEM);
430 * Deallocate a picture.
432 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
434 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
437 /* WM Image / Screen codecs allocate internal buffers with different
438 * dimensions / colorspaces; ignore user-defined callbacks for these. */
439 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
440 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
441 s->codec_id != AV_CODEC_ID_MSS2)
442 ff_thread_release_buffer(s->avctx, &pic->tf);
444 av_frame_unref(&pic->f);
446 av_buffer_unref(&pic->hwaccel_priv_buf);
448 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
451 static int update_picture_tables(Picture *dst, Picture *src)
455 #define UPDATE_TABLE(table)\
458 (!dst->table || dst->table->buffer != src->table->buffer)) {\
459 av_buffer_unref(&dst->table);\
460 dst->table = av_buffer_ref(src->table);\
462 free_picture_tables(dst);\
463 return AVERROR(ENOMEM);\
468 UPDATE_TABLE(mb_var_buf);
469 UPDATE_TABLE(mc_mb_var_buf);
470 UPDATE_TABLE(mb_mean_buf);
471 UPDATE_TABLE(mbskip_table_buf);
472 UPDATE_TABLE(qscale_table_buf);
473 UPDATE_TABLE(mb_type_buf);
474 for (i = 0; i < 2; i++) {
475 UPDATE_TABLE(motion_val_buf[i]);
476 UPDATE_TABLE(ref_index_buf[i]);
479 dst->mb_var = src->mb_var;
480 dst->mc_mb_var = src->mc_mb_var;
481 dst->mb_mean = src->mb_mean;
482 dst->mbskip_table = src->mbskip_table;
483 dst->qscale_table = src->qscale_table;
484 dst->mb_type = src->mb_type;
485 for (i = 0; i < 2; i++) {
486 dst->motion_val[i] = src->motion_val[i];
487 dst->ref_index[i] = src->ref_index[i];
493 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
497 av_assert0(!dst->f.buf[0]);
498 av_assert0(src->f.buf[0]);
502 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
506 ret = update_picture_tables(dst, src);
510 if (src->hwaccel_picture_private) {
511 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
512 if (!dst->hwaccel_priv_buf)
514 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
517 dst->field_picture = src->field_picture;
518 dst->mb_var_sum = src->mb_var_sum;
519 dst->mc_mb_var_sum = src->mc_mb_var_sum;
520 dst->b_frame_score = src->b_frame_score;
521 dst->needs_realloc = src->needs_realloc;
522 dst->reference = src->reference;
523 dst->shared = src->shared;
527 ff_mpeg_unref_picture(s, dst);
531 static int init_duplicate_context(MpegEncContext *s)
533 int y_size = s->b8_stride * (2 * s->mb_height + 1);
534 int c_size = s->mb_stride * (s->mb_height + 1);
535 int yc_size = y_size + 2 * c_size;
543 s->obmc_scratchpad = NULL;
546 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
547 ME_MAP_SIZE * sizeof(uint32_t), fail)
548 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
549 ME_MAP_SIZE * sizeof(uint32_t), fail)
550 if (s->avctx->noise_reduction) {
551 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
552 2 * 64 * sizeof(int), fail)
555 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
556 s->block = s->blocks[0];
558 for (i = 0; i < 12; i++) {
559 s->pblocks[i] = &s->block[i];
562 if (s->out_format == FMT_H263) {
564 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
565 yc_size * sizeof(int16_t) * 16, fail);
566 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
567 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
568 s->ac_val[2] = s->ac_val[1] + c_size;
573 return -1; // free() through ff_MPV_common_end()
576 static void free_duplicate_context(MpegEncContext *s)
581 av_freep(&s->edge_emu_buffer);
582 av_freep(&s->me.scratchpad);
586 s->obmc_scratchpad = NULL;
588 av_freep(&s->dct_error_sum);
589 av_freep(&s->me.map);
590 av_freep(&s->me.score_map);
591 av_freep(&s->blocks);
592 av_freep(&s->ac_val_base);
596 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
598 #define COPY(a) bak->a = src->a
599 COPY(edge_emu_buffer);
604 COPY(obmc_scratchpad);
611 COPY(me.map_generation);
623 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
627 // FIXME copy only needed parts
629 backup_duplicate_context(&bak, dst);
630 memcpy(dst, src, sizeof(MpegEncContext));
631 backup_duplicate_context(dst, &bak);
632 for (i = 0; i < 12; i++) {
633 dst->pblocks[i] = &dst->block[i];
635 if (!dst->edge_emu_buffer &&
636 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
637 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
638 "scratch buffers.\n");
641 // STOP_TIMER("update_duplicate_context")
642 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
646 int ff_mpeg_update_thread_context(AVCodecContext *dst,
647 const AVCodecContext *src)
650 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
652 if (dst == src || !s1->context_initialized)
655 // FIXME can parameters change on I-frames?
656 // in that case dst may need a reinit
657 if (!s->context_initialized) {
658 memcpy(s, s1, sizeof(MpegEncContext));
661 s->bitstream_buffer = NULL;
662 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
664 ff_MPV_common_init(s);
667 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
669 s->context_reinit = 0;
670 s->height = s1->height;
671 s->width = s1->width;
672 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
676 s->avctx->coded_height = s1->avctx->coded_height;
677 s->avctx->coded_width = s1->avctx->coded_width;
678 s->avctx->width = s1->avctx->width;
679 s->avctx->height = s1->avctx->height;
681 s->coded_picture_number = s1->coded_picture_number;
682 s->picture_number = s1->picture_number;
683 s->input_picture_number = s1->input_picture_number;
685 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
686 ff_mpeg_unref_picture(s, &s->picture[i]);
687 if (s1->picture[i].f.data[0] &&
688 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
692 #define UPDATE_PICTURE(pic)\
694 ff_mpeg_unref_picture(s, &s->pic);\
695 if (s1->pic.f.data[0])\
696 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
698 ret = update_picture_tables(&s->pic, &s1->pic);\
703 UPDATE_PICTURE(current_picture);
704 UPDATE_PICTURE(last_picture);
705 UPDATE_PICTURE(next_picture);
707 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
708 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
709 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
711 // Error/bug resilience
712 s->next_p_frame_damaged = s1->next_p_frame_damaged;
713 s->workaround_bugs = s1->workaround_bugs;
716 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
717 (char *) &s1->shape - (char *) &s1->time_increment_bits);
720 s->max_b_frames = s1->max_b_frames;
721 s->low_delay = s1->low_delay;
722 s->droppable = s1->droppable;
724 // DivX handling (doesn't work)
725 s->divx_packed = s1->divx_packed;
727 if (s1->bitstream_buffer) {
728 if (s1->bitstream_buffer_size +
729 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
730 av_fast_malloc(&s->bitstream_buffer,
731 &s->allocated_bitstream_buffer_size,
732 s1->allocated_bitstream_buffer_size);
733 s->bitstream_buffer_size = s1->bitstream_buffer_size;
734 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
735 s1->bitstream_buffer_size);
736 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
737 FF_INPUT_BUFFER_PADDING_SIZE);
740 // linesize dependend scratch buffer allocation
741 if (!s->edge_emu_buffer)
743 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
744 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
745 "scratch buffers.\n");
746 return AVERROR(ENOMEM);
749 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
750 "be allocated due to unknown size.\n");
754 // MPEG2/interlacing info
755 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
756 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
758 if (!s1->first_field) {
759 s->last_pict_type = s1->pict_type;
760 if (s1->current_picture_ptr)
761 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
763 if (s1->pict_type != AV_PICTURE_TYPE_B) {
764 s->last_non_b_pict_type = s1->pict_type;
772 * Set the given MpegEncContext to common defaults
773 * (same for encoding and decoding).
774 * The changed fields will not depend upon the
775 * prior state of the MpegEncContext.
777 void ff_MPV_common_defaults(MpegEncContext *s)
779 s->y_dc_scale_table =
780 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
781 s->chroma_qscale_table = ff_default_chroma_qscale_table;
782 s->progressive_frame = 1;
783 s->progressive_sequence = 1;
784 s->picture_structure = PICT_FRAME;
786 s->coded_picture_number = 0;
787 s->picture_number = 0;
788 s->input_picture_number = 0;
790 s->picture_in_gop_number = 0;
795 s->slice_context_count = 1;
799 * Set the given MpegEncContext to defaults for decoding.
800 * the changed fields will not depend upon
801 * the prior state of the MpegEncContext.
803 void ff_MPV_decode_defaults(MpegEncContext *s)
805 ff_MPV_common_defaults(s);
808 static int init_er(MpegEncContext *s)
810 ERContext *er = &s->er;
811 int mb_array_size = s->mb_height * s->mb_stride;
814 er->avctx = s->avctx;
817 er->mb_index2xy = s->mb_index2xy;
818 er->mb_num = s->mb_num;
819 er->mb_width = s->mb_width;
820 er->mb_height = s->mb_height;
821 er->mb_stride = s->mb_stride;
822 er->b8_stride = s->b8_stride;
824 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
825 er->error_status_table = av_mallocz(mb_array_size);
826 if (!er->er_temp_buffer || !er->error_status_table)
829 er->mbskip_table = s->mbskip_table;
830 er->mbintra_table = s->mbintra_table;
832 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
833 er->dc_val[i] = s->dc_val[i];
835 er->decode_mb = mpeg_er_decode_mb;
840 av_freep(&er->er_temp_buffer);
841 av_freep(&er->error_status_table);
842 return AVERROR(ENOMEM);
846 * Initialize and allocates MpegEncContext fields dependent on the resolution.
848 static int init_context_frame(MpegEncContext *s)
850 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
852 s->mb_width = (s->width + 15) / 16;
853 s->mb_stride = s->mb_width + 1;
854 s->b8_stride = s->mb_width * 2 + 1;
855 s->b4_stride = s->mb_width * 4 + 1;
856 mb_array_size = s->mb_height * s->mb_stride;
857 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
859 /* set default edge pos, will be overriden
860 * in decode_header if needed */
861 s->h_edge_pos = s->mb_width * 16;
862 s->v_edge_pos = s->mb_height * 16;
864 s->mb_num = s->mb_width * s->mb_height;
869 s->block_wrap[3] = s->b8_stride;
871 s->block_wrap[5] = s->mb_stride;
873 y_size = s->b8_stride * (2 * s->mb_height + 1);
874 c_size = s->mb_stride * (s->mb_height + 1);
875 yc_size = y_size + 2 * c_size;
877 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
878 fail); // error ressilience code looks cleaner with this
879 for (y = 0; y < s->mb_height; y++)
880 for (x = 0; x < s->mb_width; x++)
881 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
883 s->mb_index2xy[s->mb_height * s->mb_width] =
884 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
887 /* Allocate MV tables */
888 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
889 mv_table_size * 2 * sizeof(int16_t), fail);
890 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
891 mv_table_size * 2 * sizeof(int16_t), fail);
892 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
893 mv_table_size * 2 * sizeof(int16_t), fail);
894 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
895 mv_table_size * 2 * sizeof(int16_t), fail);
896 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
897 mv_table_size * 2 * sizeof(int16_t), fail);
898 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
899 mv_table_size * 2 * sizeof(int16_t), fail);
900 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
901 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
902 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
903 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
905 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
907 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
909 /* Allocate MB type table */
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
911 sizeof(uint16_t), fail); // needed for encoding
913 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
916 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
917 mb_array_size * sizeof(float), fail);
918 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
919 mb_array_size * sizeof(float), fail);
923 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
924 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
925 /* interlaced direct mode decoding tables */
926 for (i = 0; i < 2; i++) {
928 for (j = 0; j < 2; j++) {
929 for (k = 0; k < 2; k++) {
930 FF_ALLOCZ_OR_GOTO(s->avctx,
931 s->b_field_mv_table_base[i][j][k],
932 mv_table_size * 2 * sizeof(int16_t),
934 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
938 mb_array_size * 2 * sizeof(uint8_t), fail);
939 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
940 mv_table_size * 2 * sizeof(int16_t), fail);
941 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
944 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
945 mb_array_size * 2 * sizeof(uint8_t), fail);
948 if (s->out_format == FMT_H263) {
950 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
951 s->coded_block = s->coded_block_base + s->b8_stride + 1;
953 /* cbp, ac_pred, pred_dir */
954 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
955 mb_array_size * sizeof(uint8_t), fail);
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
957 mb_array_size * sizeof(uint8_t), fail);
960 if (s->h263_pred || s->h263_plus || !s->encoding) {
962 // MN: we need these for error resilience of intra-frames
963 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
964 yc_size * sizeof(int16_t), fail);
965 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
966 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
967 s->dc_val[2] = s->dc_val[1] + c_size;
968 for (i = 0; i < yc_size; i++)
969 s->dc_val_base[i] = 1024;
972 /* which mb is a intra block */
973 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
974 memset(s->mbintra_table, 1, mb_array_size);
976 /* init macroblock skip table */
977 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
978 // Note the + 1 is for a quicker mpeg4 slice_end detection
982 return AVERROR(ENOMEM);
986 * init common structure for both encoder and decoder.
987 * this assumes that some variables like width/height are already set
989 av_cold int ff_MPV_common_init(MpegEncContext *s)
992 int nb_slices = (HAVE_THREADS &&
993 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
994 s->avctx->thread_count : 1;
996 if (s->encoding && s->avctx->slices)
997 nb_slices = s->avctx->slices;
999 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1000 s->mb_height = (s->height + 31) / 32 * 2;
1002 s->mb_height = (s->height + 15) / 16;
1004 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1005 av_log(s->avctx, AV_LOG_ERROR,
1006 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1010 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1013 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1015 max_slices = MAX_THREADS;
1016 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1017 " reducing to %d\n", nb_slices, max_slices);
1018 nb_slices = max_slices;
1021 if ((s->width || s->height) &&
1022 av_image_check_size(s->width, s->height, 0, s->avctx))
1025 ff_dct_common_init(s);
1027 s->flags = s->avctx->flags;
1028 s->flags2 = s->avctx->flags2;
1030 if (s->width && s->height) {
1031 /* set chroma shifts */
1032 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1034 &s->chroma_y_shift);
1036 /* convert fourcc to upper case */
1037 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1039 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1041 s->avctx->coded_frame = &s->current_picture.f;
1044 if (s->msmpeg4_version) {
1045 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1046 2 * 2 * (MAX_LEVEL + 1) *
1047 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1052 64 * 32 * sizeof(int), fail);
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1054 64 * 32 * sizeof(int), fail);
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1056 64 * 32 * 2 * sizeof(uint16_t), fail);
1057 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1058 64 * 32 * 2 * sizeof(uint16_t), fail);
1059 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1060 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1061 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1062 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1064 if (s->avctx->noise_reduction) {
1065 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1066 2 * 64 * sizeof(uint16_t), fail);
1071 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1072 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1073 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1074 avcodec_get_frame_defaults(&s->picture[i].f);
1076 memset(&s->next_picture, 0, sizeof(s->next_picture));
1077 memset(&s->last_picture, 0, sizeof(s->last_picture));
1078 memset(&s->current_picture, 0, sizeof(s->current_picture));
1079 avcodec_get_frame_defaults(&s->next_picture.f);
1080 avcodec_get_frame_defaults(&s->last_picture.f);
1081 avcodec_get_frame_defaults(&s->current_picture.f);
1083 if (s->width && s->height) {
1084 if (init_context_frame(s))
1087 s->parse_context.state = -1;
1090 s->context_initialized = 1;
1091 s->thread_context[0] = s;
1093 if (s->width && s->height) {
1094 if (nb_slices > 1) {
1095 for (i = 1; i < nb_slices; i++) {
1096 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1097 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1100 for (i = 0; i < nb_slices; i++) {
1101 if (init_duplicate_context(s->thread_context[i]) < 0)
1103 s->thread_context[i]->start_mb_y =
1104 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1105 s->thread_context[i]->end_mb_y =
1106 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1109 if (init_duplicate_context(s) < 0)
1112 s->end_mb_y = s->mb_height;
1114 s->slice_context_count = nb_slices;
1119 ff_MPV_common_end(s);
1124 * Frees and resets MpegEncContext fields depending on the resolution.
1125 * Is used during resolution changes to avoid a full reinitialization of the
1128 static int free_context_frame(MpegEncContext *s)
1132 av_freep(&s->mb_type);
1133 av_freep(&s->p_mv_table_base);
1134 av_freep(&s->b_forw_mv_table_base);
1135 av_freep(&s->b_back_mv_table_base);
1136 av_freep(&s->b_bidir_forw_mv_table_base);
1137 av_freep(&s->b_bidir_back_mv_table_base);
1138 av_freep(&s->b_direct_mv_table_base);
1139 s->p_mv_table = NULL;
1140 s->b_forw_mv_table = NULL;
1141 s->b_back_mv_table = NULL;
1142 s->b_bidir_forw_mv_table = NULL;
1143 s->b_bidir_back_mv_table = NULL;
1144 s->b_direct_mv_table = NULL;
1145 for (i = 0; i < 2; i++) {
1146 for (j = 0; j < 2; j++) {
1147 for (k = 0; k < 2; k++) {
1148 av_freep(&s->b_field_mv_table_base[i][j][k]);
1149 s->b_field_mv_table[i][j][k] = NULL;
1151 av_freep(&s->b_field_select_table[i][j]);
1152 av_freep(&s->p_field_mv_table_base[i][j]);
1153 s->p_field_mv_table[i][j] = NULL;
1155 av_freep(&s->p_field_select_table[i]);
1158 av_freep(&s->dc_val_base);
1159 av_freep(&s->coded_block_base);
1160 av_freep(&s->mbintra_table);
1161 av_freep(&s->cbp_table);
1162 av_freep(&s->pred_dir_table);
1164 av_freep(&s->mbskip_table);
1166 av_freep(&s->er.error_status_table);
1167 av_freep(&s->er.er_temp_buffer);
1168 av_freep(&s->mb_index2xy);
1169 av_freep(&s->lambda_table);
1170 av_freep(&s->cplx_tab);
1171 av_freep(&s->bits_tab);
1173 s->linesize = s->uvlinesize = 0;
1178 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1182 if (s->slice_context_count > 1) {
1183 for (i = 0; i < s->slice_context_count; i++) {
1184 free_duplicate_context(s->thread_context[i]);
1186 for (i = 1; i < s->slice_context_count; i++) {
1187 av_freep(&s->thread_context[i]);
1190 free_duplicate_context(s);
1192 if ((err = free_context_frame(s)) < 0)
1196 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1197 s->picture[i].needs_realloc = 1;
1200 s->last_picture_ptr =
1201 s->next_picture_ptr =
1202 s->current_picture_ptr = NULL;
1205 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1206 s->mb_height = (s->height + 31) / 32 * 2;
1208 s->mb_height = (s->height + 15) / 16;
1210 if ((s->width || s->height) &&
1211 av_image_check_size(s->width, s->height, 0, s->avctx))
1212 return AVERROR_INVALIDDATA;
1214 if ((err = init_context_frame(s)))
1217 s->thread_context[0] = s;
1219 if (s->width && s->height) {
1220 int nb_slices = s->slice_context_count;
1221 if (nb_slices > 1) {
1222 for (i = 1; i < nb_slices; i++) {
1223 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1224 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1227 for (i = 0; i < nb_slices; i++) {
1228 if (init_duplicate_context(s->thread_context[i]) < 0)
1230 s->thread_context[i]->start_mb_y =
1231 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1232 s->thread_context[i]->end_mb_y =
1233 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1236 if (init_duplicate_context(s) < 0)
1239 s->end_mb_y = s->mb_height;
1241 s->slice_context_count = nb_slices;
1246 ff_MPV_common_end(s);
1250 /* init common structure for both encoder and decoder */
1251 void ff_MPV_common_end(MpegEncContext *s)
1255 if (s->slice_context_count > 1) {
1256 for (i = 0; i < s->slice_context_count; i++) {
1257 free_duplicate_context(s->thread_context[i]);
1259 for (i = 1; i < s->slice_context_count; i++) {
1260 av_freep(&s->thread_context[i]);
1262 s->slice_context_count = 1;
1263 } else free_duplicate_context(s);
1265 av_freep(&s->parse_context.buffer);
1266 s->parse_context.buffer_size = 0;
1268 av_freep(&s->bitstream_buffer);
1269 s->allocated_bitstream_buffer_size = 0;
1271 av_freep(&s->avctx->stats_out);
1272 av_freep(&s->ac_stats);
1274 av_freep(&s->q_intra_matrix);
1275 av_freep(&s->q_inter_matrix);
1276 av_freep(&s->q_intra_matrix16);
1277 av_freep(&s->q_inter_matrix16);
1278 av_freep(&s->input_picture);
1279 av_freep(&s->reordered_input_picture);
1280 av_freep(&s->dct_offset);
1283 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1284 free_picture_tables(&s->picture[i]);
1285 ff_mpeg_unref_picture(s, &s->picture[i]);
1288 av_freep(&s->picture);
1289 free_picture_tables(&s->last_picture);
1290 ff_mpeg_unref_picture(s, &s->last_picture);
1291 free_picture_tables(&s->current_picture);
1292 ff_mpeg_unref_picture(s, &s->current_picture);
1293 free_picture_tables(&s->next_picture);
1294 ff_mpeg_unref_picture(s, &s->next_picture);
1295 free_picture_tables(&s->new_picture);
1296 ff_mpeg_unref_picture(s, &s->new_picture);
1298 free_context_frame(s);
1300 s->context_initialized = 0;
1301 s->last_picture_ptr =
1302 s->next_picture_ptr =
1303 s->current_picture_ptr = NULL;
1304 s->linesize = s->uvlinesize = 0;
1307 av_cold void ff_init_rl(RLTable *rl,
1308 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1310 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1311 uint8_t index_run[MAX_RUN + 1];
1312 int last, run, level, start, end, i;
1314 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1315 if (static_store && rl->max_level[0])
1318 /* compute max_level[], max_run[] and index_run[] */
1319 for (last = 0; last < 2; last++) {
1328 memset(max_level, 0, MAX_RUN + 1);
1329 memset(max_run, 0, MAX_LEVEL + 1);
1330 memset(index_run, rl->n, MAX_RUN + 1);
1331 for (i = start; i < end; i++) {
1332 run = rl->table_run[i];
1333 level = rl->table_level[i];
1334 if (index_run[run] == rl->n)
1336 if (level > max_level[run])
1337 max_level[run] = level;
1338 if (run > max_run[level])
1339 max_run[level] = run;
1342 rl->max_level[last] = static_store[last];
1344 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1345 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1347 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1349 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1350 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1352 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1354 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1355 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1359 av_cold void ff_init_vlc_rl(RLTable *rl)
1363 for (q = 0; q < 32; q++) {
1365 int qadd = (q - 1) | 1;
1371 for (i = 0; i < rl->vlc.table_size; i++) {
1372 int code = rl->vlc.table[i][0];
1373 int len = rl->vlc.table[i][1];
1376 if (len == 0) { // illegal code
1379 } else if (len < 0) { // more bits needed
1383 if (code == rl->n) { // esc
1387 run = rl->table_run[code] + 1;
1388 level = rl->table_level[code] * qmul + qadd;
1389 if (code >= rl->last) run += 192;
1392 rl->rl_vlc[q][i].len = len;
1393 rl->rl_vlc[q][i].level = level;
1394 rl->rl_vlc[q][i].run = run;
1399 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1403 /* release non reference frames */
1404 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1405 if (!s->picture[i].reference &&
1406 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1407 ff_mpeg_unref_picture(s, &s->picture[i]);
1412 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1414 if (pic->f.data[0] == NULL)
1416 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1421 static int find_unused_picture(MpegEncContext *s, int shared)
1426 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1427 if (s->picture[i].f.data[0] == NULL)
1431 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1432 if (pic_is_unused(s, &s->picture[i]))
1437 return AVERROR_INVALIDDATA;
1440 int ff_find_unused_picture(MpegEncContext *s, int shared)
1442 int ret = find_unused_picture(s, shared);
1444 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1445 if (s->picture[ret].needs_realloc) {
1446 s->picture[ret].needs_realloc = 0;
1447 free_picture_tables(&s->picture[ret]);
1448 ff_mpeg_unref_picture(s, &s->picture[ret]);
1449 avcodec_get_frame_defaults(&s->picture[ret].f);
1455 static void update_noise_reduction(MpegEncContext *s)
1459 for (intra = 0; intra < 2; intra++) {
1460 if (s->dct_count[intra] > (1 << 16)) {
1461 for (i = 0; i < 64; i++) {
1462 s->dct_error_sum[intra][i] >>= 1;
1464 s->dct_count[intra] >>= 1;
1467 for (i = 0; i < 64; i++) {
1468 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1469 s->dct_count[intra] +
1470 s->dct_error_sum[intra][i] / 2) /
1471 (s->dct_error_sum[intra][i] + 1);
1477 * generic function for encode/decode called after coding/decoding
1478 * the header and before a frame is coded/decoded.
1480 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1486 /* mark & release old frames */
1487 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1488 s->last_picture_ptr != s->next_picture_ptr &&
1489 s->last_picture_ptr->f.data[0]) {
1490 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1493 /* release forgotten pictures */
1494 /* if (mpeg124/h263) */
1496 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1497 if (&s->picture[i] != s->last_picture_ptr &&
1498 &s->picture[i] != s->next_picture_ptr &&
1499 s->picture[i].reference && !s->picture[i].needs_realloc) {
1500 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1501 av_log(avctx, AV_LOG_ERROR,
1502 "releasing zombie picture\n");
1503 ff_mpeg_unref_picture(s, &s->picture[i]);
1509 ff_release_unused_pictures(s, 1);
1511 if (s->current_picture_ptr &&
1512 s->current_picture_ptr->f.data[0] == NULL) {
1513 // we already have a unused image
1514 // (maybe it was set before reading the header)
1515 pic = s->current_picture_ptr;
1517 i = ff_find_unused_picture(s, 0);
1519 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1522 pic = &s->picture[i];
1526 if (!s->droppable) {
1527 if (s->pict_type != AV_PICTURE_TYPE_B)
1531 pic->f.coded_picture_number = s->coded_picture_number++;
1533 if (ff_alloc_picture(s, pic, 0) < 0)
1536 s->current_picture_ptr = pic;
1537 // FIXME use only the vars from current_pic
1538 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1539 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1540 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1541 if (s->picture_structure != PICT_FRAME)
1542 s->current_picture_ptr->f.top_field_first =
1543 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1545 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1546 !s->progressive_sequence;
1547 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1550 s->current_picture_ptr->f.pict_type = s->pict_type;
1551 // if (s->flags && CODEC_FLAG_QSCALE)
1552 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1553 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1555 ff_mpeg_unref_picture(s, &s->current_picture);
1556 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1557 s->current_picture_ptr)) < 0)
1560 if (s->pict_type != AV_PICTURE_TYPE_B) {
1561 s->last_picture_ptr = s->next_picture_ptr;
1563 s->next_picture_ptr = s->current_picture_ptr;
1565 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1566 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1567 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1568 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1569 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1570 s->pict_type, s->droppable);
1572 if ((s->last_picture_ptr == NULL ||
1573 s->last_picture_ptr->f.data[0] == NULL) &&
1574 (s->pict_type != AV_PICTURE_TYPE_I ||
1575 s->picture_structure != PICT_FRAME)) {
1576 int h_chroma_shift, v_chroma_shift;
1577 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1578 &h_chroma_shift, &v_chroma_shift);
1579 if (s->pict_type != AV_PICTURE_TYPE_I)
1580 av_log(avctx, AV_LOG_ERROR,
1581 "warning: first frame is no keyframe\n");
1582 else if (s->picture_structure != PICT_FRAME)
1583 av_log(avctx, AV_LOG_INFO,
1584 "allocate dummy last picture for field based first keyframe\n");
1586 /* Allocate a dummy frame */
1587 i = ff_find_unused_picture(s, 0);
1589 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1592 s->last_picture_ptr = &s->picture[i];
1593 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1594 s->last_picture_ptr = NULL;
1598 memset(s->last_picture_ptr->f.data[0], 0,
1599 avctx->height * s->last_picture_ptr->f.linesize[0]);
1600 memset(s->last_picture_ptr->f.data[1], 0x80,
1601 (avctx->height >> v_chroma_shift) *
1602 s->last_picture_ptr->f.linesize[1]);
1603 memset(s->last_picture_ptr->f.data[2], 0x80,
1604 (avctx->height >> v_chroma_shift) *
1605 s->last_picture_ptr->f.linesize[2]);
1607 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1608 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1610 if ((s->next_picture_ptr == NULL ||
1611 s->next_picture_ptr->f.data[0] == NULL) &&
1612 s->pict_type == AV_PICTURE_TYPE_B) {
1613 /* Allocate a dummy frame */
1614 i = ff_find_unused_picture(s, 0);
1616 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1619 s->next_picture_ptr = &s->picture[i];
1620 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1621 s->next_picture_ptr = NULL;
1624 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1625 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1628 if (s->last_picture_ptr) {
1629 ff_mpeg_unref_picture(s, &s->last_picture);
1630 if (s->last_picture_ptr->f.data[0] &&
1631 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1632 s->last_picture_ptr)) < 0)
1635 if (s->next_picture_ptr) {
1636 ff_mpeg_unref_picture(s, &s->next_picture);
1637 if (s->next_picture_ptr->f.data[0] &&
1638 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1639 s->next_picture_ptr)) < 0)
1643 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1644 s->last_picture_ptr->f.data[0]));
1646 if (s->picture_structure!= PICT_FRAME) {
1648 for (i = 0; i < 4; i++) {
1649 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1650 s->current_picture.f.data[i] +=
1651 s->current_picture.f.linesize[i];
1653 s->current_picture.f.linesize[i] *= 2;
1654 s->last_picture.f.linesize[i] *= 2;
1655 s->next_picture.f.linesize[i] *= 2;
1659 s->err_recognition = avctx->err_recognition;
1661 /* set dequantizer, we can't do it during init as
1662 * it might change for mpeg4 and we can't do it in the header
1663 * decode as init is not called for mpeg4 there yet */
1664 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1665 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1666 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1667 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1668 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1669 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1671 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1672 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1675 if (s->dct_error_sum) {
1676 assert(s->avctx->noise_reduction && s->encoding);
1677 update_noise_reduction(s);
1680 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1681 return ff_xvmc_field_start(s, avctx);
1686 /* generic function for encode/decode called after a
1687 * frame has been coded/decoded. */
1688 void ff_MPV_frame_end(MpegEncContext *s)
1691 /* redraw edges for the frame if decoding didn't complete */
1692 // just to make sure that all data is rendered.
1693 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1694 ff_xvmc_field_end(s);
1695 } else if ((s->er.error_count || s->encoding) &&
1696 !s->avctx->hwaccel &&
1697 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1698 s->unrestricted_mv &&
1699 s->current_picture.reference &&
1701 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1702 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1703 int hshift = desc->log2_chroma_w;
1704 int vshift = desc->log2_chroma_h;
1705 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1706 s->h_edge_pos, s->v_edge_pos,
1707 EDGE_WIDTH, EDGE_WIDTH,
1708 EDGE_TOP | EDGE_BOTTOM);
1709 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1710 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1711 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1712 EDGE_TOP | EDGE_BOTTOM);
1713 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1714 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1715 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1716 EDGE_TOP | EDGE_BOTTOM);
1721 s->last_pict_type = s->pict_type;
1722 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1723 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1724 s->last_non_b_pict_type = s->pict_type;
1727 /* copy back current_picture variables */
1728 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1729 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1730 s->picture[i] = s->current_picture;
1734 assert(i < MAX_PICTURE_COUNT);
1738 /* release non-reference frames */
1739 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1740 if (!s->picture[i].reference)
1741 ff_mpeg_unref_picture(s, &s->picture[i]);
1744 // clear copies, to avoid confusion
1746 memset(&s->last_picture, 0, sizeof(Picture));
1747 memset(&s->next_picture, 0, sizeof(Picture));
1748 memset(&s->current_picture, 0, sizeof(Picture));
1750 s->avctx->coded_frame = &s->current_picture_ptr->f;
1752 if (s->current_picture.reference)
1753 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1757 * Print debugging info for the given picture.
1759 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1762 if (s->avctx->hwaccel || !p || !p->mb_type)
1766 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1769 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1770 switch (pict->pict_type) {
1771 case AV_PICTURE_TYPE_I:
1772 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1774 case AV_PICTURE_TYPE_P:
1775 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1777 case AV_PICTURE_TYPE_B:
1778 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1780 case AV_PICTURE_TYPE_S:
1781 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1783 case AV_PICTURE_TYPE_SI:
1784 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1786 case AV_PICTURE_TYPE_SP:
1787 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1790 for (y = 0; y < s->mb_height; y++) {
1791 for (x = 0; x < s->mb_width; x++) {
1792 if (s->avctx->debug & FF_DEBUG_SKIP) {
1793 int count = s->mbskip_table[x + y * s->mb_stride];
1796 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1798 if (s->avctx->debug & FF_DEBUG_QP) {
1799 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1800 p->qscale_table[x + y * s->mb_stride]);
1802 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1803 int mb_type = p->mb_type[x + y * s->mb_stride];
1804 // Type & MV direction
1805 if (IS_PCM(mb_type))
1806 av_log(s->avctx, AV_LOG_DEBUG, "P");
1807 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1808 av_log(s->avctx, AV_LOG_DEBUG, "A");
1809 else if (IS_INTRA4x4(mb_type))
1810 av_log(s->avctx, AV_LOG_DEBUG, "i");
1811 else if (IS_INTRA16x16(mb_type))
1812 av_log(s->avctx, AV_LOG_DEBUG, "I");
1813 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1814 av_log(s->avctx, AV_LOG_DEBUG, "d");
1815 else if (IS_DIRECT(mb_type))
1816 av_log(s->avctx, AV_LOG_DEBUG, "D");
1817 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1818 av_log(s->avctx, AV_LOG_DEBUG, "g");
1819 else if (IS_GMC(mb_type))
1820 av_log(s->avctx, AV_LOG_DEBUG, "G");
1821 else if (IS_SKIP(mb_type))
1822 av_log(s->avctx, AV_LOG_DEBUG, "S");
1823 else if (!USES_LIST(mb_type, 1))
1824 av_log(s->avctx, AV_LOG_DEBUG, ">");
1825 else if (!USES_LIST(mb_type, 0))
1826 av_log(s->avctx, AV_LOG_DEBUG, "<");
1828 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1829 av_log(s->avctx, AV_LOG_DEBUG, "X");
1833 if (IS_8X8(mb_type))
1834 av_log(s->avctx, AV_LOG_DEBUG, "+");
1835 else if (IS_16X8(mb_type))
1836 av_log(s->avctx, AV_LOG_DEBUG, "-");
1837 else if (IS_8X16(mb_type))
1838 av_log(s->avctx, AV_LOG_DEBUG, "|");
1839 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1840 av_log(s->avctx, AV_LOG_DEBUG, " ");
1842 av_log(s->avctx, AV_LOG_DEBUG, "?");
1845 if (IS_INTERLACED(mb_type))
1846 av_log(s->avctx, AV_LOG_DEBUG, "=");
1848 av_log(s->avctx, AV_LOG_DEBUG, " ");
1851 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1857 * find the lowest MB row referenced in the MVs
1859 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1861 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1862 int my, off, i, mvs;
1864 if (s->picture_structure != PICT_FRAME || s->mcsel)
1867 switch (s->mv_type) {
1881 for (i = 0; i < mvs; i++) {
1882 my = s->mv[dir][i][1]<<qpel_shift;
1883 my_max = FFMAX(my_max, my);
1884 my_min = FFMIN(my_min, my);
1887 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1889 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1891 return s->mb_height-1;
1894 /* put block[] to dest[] */
1895 static inline void put_dct(MpegEncContext *s,
1896 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1898 s->dct_unquantize_intra(s, block, i, qscale);
1899 s->dsp.idct_put (dest, line_size, block);
1902 /* add block[] to dest[] */
1903 static inline void add_dct(MpegEncContext *s,
1904 int16_t *block, int i, uint8_t *dest, int line_size)
1906 if (s->block_last_index[i] >= 0) {
1907 s->dsp.idct_add (dest, line_size, block);
1911 static inline void add_dequant_dct(MpegEncContext *s,
1912 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1914 if (s->block_last_index[i] >= 0) {
1915 s->dct_unquantize_inter(s, block, i, qscale);
1917 s->dsp.idct_add (dest, line_size, block);
1922 * Clean dc, ac, coded_block for the current non-intra MB.
1924 void ff_clean_intra_table_entries(MpegEncContext *s)
1926 int wrap = s->b8_stride;
1927 int xy = s->block_index[0];
1930 s->dc_val[0][xy + 1 ] =
1931 s->dc_val[0][xy + wrap] =
1932 s->dc_val[0][xy + 1 + wrap] = 1024;
1934 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1935 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1936 if (s->msmpeg4_version>=3) {
1937 s->coded_block[xy ] =
1938 s->coded_block[xy + 1 ] =
1939 s->coded_block[xy + wrap] =
1940 s->coded_block[xy + 1 + wrap] = 0;
1943 wrap = s->mb_stride;
1944 xy = s->mb_x + s->mb_y * wrap;
1946 s->dc_val[2][xy] = 1024;
1948 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1949 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1951 s->mbintra_table[xy]= 0;
1954 /* generic function called after a macroblock has been parsed by the
1955 decoder or after it has been encoded by the encoder.
1957 Important variables used:
1958 s->mb_intra : true if intra macroblock
1959 s->mv_dir : motion vector direction
1960 s->mv_type : motion vector type
1961 s->mv : motion vector
1962 s->interlaced_dct : true if interlaced dct used (mpeg2)
1964 static av_always_inline
1965 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1968 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1969 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1970 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1974 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1975 /* print DCT coefficients */
1977 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1979 for(j=0; j<64; j++){
1980 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
1982 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1986 s->current_picture.qscale_table[mb_xy] = s->qscale;
1988 /* update DC predictors for P macroblocks */
1990 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1991 if(s->mbintra_table[mb_xy])
1992 ff_clean_intra_table_entries(s);
1996 s->last_dc[2] = 128 << s->intra_dc_precision;
1999 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2000 s->mbintra_table[mb_xy]=1;
2002 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2003 uint8_t *dest_y, *dest_cb, *dest_cr;
2004 int dct_linesize, dct_offset;
2005 op_pixels_func (*op_pix)[4];
2006 qpel_mc_func (*op_qpix)[16];
2007 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2008 const int uvlinesize = s->current_picture.f.linesize[1];
2009 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2010 const int block_size = 8;
2012 /* avoid copy if macroblock skipped in last frame too */
2013 /* skip only during decoding as we might trash the buffers during encoding a bit */
2015 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2017 if (s->mb_skipped) {
2019 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2021 } else if(!s->current_picture.reference) {
2024 *mbskip_ptr = 0; /* not skipped */
2028 dct_linesize = linesize << s->interlaced_dct;
2029 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2033 dest_cb= s->dest[1];
2034 dest_cr= s->dest[2];
2036 dest_y = s->b_scratchpad;
2037 dest_cb= s->b_scratchpad+16*linesize;
2038 dest_cr= s->b_scratchpad+32*linesize;
2042 /* motion handling */
2043 /* decoding or more than one mb_type (MC was already done otherwise) */
2046 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2047 if (s->mv_dir & MV_DIR_FORWARD) {
2048 ff_thread_await_progress(&s->last_picture_ptr->tf,
2049 ff_MPV_lowest_referenced_row(s, 0),
2052 if (s->mv_dir & MV_DIR_BACKWARD) {
2053 ff_thread_await_progress(&s->next_picture_ptr->tf,
2054 ff_MPV_lowest_referenced_row(s, 1),
2059 op_qpix= s->me.qpel_put;
2060 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2061 op_pix = s->hdsp.put_pixels_tab;
2063 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2065 if (s->mv_dir & MV_DIR_FORWARD) {
2066 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2067 op_pix = s->hdsp.avg_pixels_tab;
2068 op_qpix= s->me.qpel_avg;
2070 if (s->mv_dir & MV_DIR_BACKWARD) {
2071 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2075 /* skip dequant / idct if we are really late ;) */
2076 if(s->avctx->skip_idct){
2077 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2078 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2079 || s->avctx->skip_idct >= AVDISCARD_ALL)
2083 /* add dct residue */
2084 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2085 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2086 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2087 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2088 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2089 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2091 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2092 if (s->chroma_y_shift){
2093 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2094 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2098 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2099 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2100 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2101 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2104 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2105 add_dct(s, block[0], 0, dest_y , dct_linesize);
2106 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2107 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2108 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2110 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2111 if(s->chroma_y_shift){//Chroma420
2112 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2113 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2116 dct_linesize = uvlinesize << s->interlaced_dct;
2117 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2119 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2120 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2121 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2122 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2123 if(!s->chroma_x_shift){//Chroma444
2124 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2125 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2126 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2127 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2132 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2133 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2136 /* dct only in intra block */
2137 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2138 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2139 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2140 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2141 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2143 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2144 if(s->chroma_y_shift){
2145 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2146 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2150 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2151 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2152 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2153 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2157 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2158 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2159 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2160 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2162 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2163 if(s->chroma_y_shift){
2164 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2165 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2168 dct_linesize = uvlinesize << s->interlaced_dct;
2169 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2171 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2172 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2173 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2174 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2175 if(!s->chroma_x_shift){//Chroma444
2176 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2177 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2178 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2179 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2187 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2188 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2189 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2194 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2196 if(s->out_format == FMT_MPEG1) {
2197 MPV_decode_mb_internal(s, block, 1);
2200 MPV_decode_mb_internal(s, block, 0);
2204 * @param h is the normal height, this will be reduced automatically if needed for the last row
2206 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2207 Picture *last, int y, int h, int picture_structure,
2208 int first_field, int draw_edges, int low_delay,
2209 int v_edge_pos, int h_edge_pos)
2211 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2212 int hshift = desc->log2_chroma_w;
2213 int vshift = desc->log2_chroma_h;
2214 const int field_pic = picture_structure != PICT_FRAME;
2220 if (!avctx->hwaccel &&
2221 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2224 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2225 int *linesize = cur->f.linesize;
2226 int sides = 0, edge_h;
2227 if (y==0) sides |= EDGE_TOP;
2228 if (y + h >= v_edge_pos)
2229 sides |= EDGE_BOTTOM;
2231 edge_h= FFMIN(h, v_edge_pos - y);
2233 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2234 linesize[0], h_edge_pos, edge_h,
2235 EDGE_WIDTH, EDGE_WIDTH, sides);
2236 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2237 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2238 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2239 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2240 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2241 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2244 h = FFMIN(h, avctx->height - y);
2246 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2248 if (avctx->draw_horiz_band) {
2250 int offset[AV_NUM_DATA_POINTERS];
2253 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2254 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2261 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2262 picture_structure == PICT_FRAME &&
2263 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2264 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2267 offset[0]= y * src->linesize[0];
2269 offset[2]= (y >> vshift) * src->linesize[1];
2270 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2276 avctx->draw_horiz_band(avctx, src, offset,
2277 y, picture_structure, h);
2281 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2283 int draw_edges = s->unrestricted_mv && !s->intra_only;
2284 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2285 &s->last_picture, y, h, s->picture_structure,
2286 s->first_field, draw_edges, s->low_delay,
2287 s->v_edge_pos, s->h_edge_pos);
2290 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2291 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2292 const int uvlinesize = s->current_picture.f.linesize[1];
2293 const int mb_size= 4;
2295 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2296 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2297 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2298 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2299 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2300 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2301 //block_index is not used by mpeg2, so it is not affected by chroma_format
2303 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2304 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2305 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2307 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2309 if(s->picture_structure==PICT_FRAME){
2310 s->dest[0] += s->mb_y * linesize << mb_size;
2311 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2312 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2314 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2315 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2316 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2317 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2323 * Permute an 8x8 block.
2324 * @param block the block which will be permuted according to the given permutation vector
2325 * @param permutation the permutation vector
2326 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2327 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2328 * (inverse) permutated to scantable order!
2330 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2336 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2338 for(i=0; i<=last; i++){
2339 const int j= scantable[i];
2344 for(i=0; i<=last; i++){
2345 const int j= scantable[i];
2346 const int perm_j= permutation[j];
2347 block[perm_j]= temp[j];
2351 void ff_mpeg_flush(AVCodecContext *avctx){
2353 MpegEncContext *s = avctx->priv_data;
2355 if(s==NULL || s->picture==NULL)
2358 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2359 ff_mpeg_unref_picture(s, &s->picture[i]);
2360 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2362 ff_mpeg_unref_picture(s, &s->current_picture);
2363 ff_mpeg_unref_picture(s, &s->last_picture);
2364 ff_mpeg_unref_picture(s, &s->next_picture);
2366 s->mb_x= s->mb_y= 0;
2368 s->parse_context.state= -1;
2369 s->parse_context.frame_start_found= 0;
2370 s->parse_context.overread= 0;
2371 s->parse_context.overread_index= 0;
2372 s->parse_context.index= 0;
2373 s->parse_context.last_index= 0;
2374 s->bitstream_buffer_size=0;
2378 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2379 int16_t *block, int n, int qscale)
2381 int i, level, nCoeffs;
2382 const uint16_t *quant_matrix;
2384 nCoeffs= s->block_last_index[n];
2387 block[0] = block[0] * s->y_dc_scale;
2389 block[0] = block[0] * s->c_dc_scale;
2390 /* XXX: only mpeg1 */
2391 quant_matrix = s->intra_matrix;
2392 for(i=1;i<=nCoeffs;i++) {
2393 int j= s->intra_scantable.permutated[i];
2398 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2399 level = (level - 1) | 1;
2402 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2403 level = (level - 1) | 1;
2410 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2411 int16_t *block, int n, int qscale)
2413 int i, level, nCoeffs;
2414 const uint16_t *quant_matrix;
2416 nCoeffs= s->block_last_index[n];
2418 quant_matrix = s->inter_matrix;
2419 for(i=0; i<=nCoeffs; i++) {
2420 int j= s->intra_scantable.permutated[i];
2425 level = (((level << 1) + 1) * qscale *
2426 ((int) (quant_matrix[j]))) >> 4;
2427 level = (level - 1) | 1;
2430 level = (((level << 1) + 1) * qscale *
2431 ((int) (quant_matrix[j]))) >> 4;
2432 level = (level - 1) | 1;
2439 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2440 int16_t *block, int n, int qscale)
2442 int i, level, nCoeffs;
2443 const uint16_t *quant_matrix;
2445 if(s->alternate_scan) nCoeffs= 63;
2446 else nCoeffs= s->block_last_index[n];
2449 block[0] = block[0] * s->y_dc_scale;
2451 block[0] = block[0] * s->c_dc_scale;
2452 quant_matrix = s->intra_matrix;
2453 for(i=1;i<=nCoeffs;i++) {
2454 int j= s->intra_scantable.permutated[i];
2459 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2462 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2469 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2470 int16_t *block, int n, int qscale)
2472 int i, level, nCoeffs;
2473 const uint16_t *quant_matrix;
2476 if(s->alternate_scan) nCoeffs= 63;
2477 else nCoeffs= s->block_last_index[n];
2480 block[0] = block[0] * s->y_dc_scale;
2482 block[0] = block[0] * s->c_dc_scale;
2483 quant_matrix = s->intra_matrix;
2484 for(i=1;i<=nCoeffs;i++) {
2485 int j= s->intra_scantable.permutated[i];
2490 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2493 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2502 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2503 int16_t *block, int n, int qscale)
2505 int i, level, nCoeffs;
2506 const uint16_t *quant_matrix;
2509 if(s->alternate_scan) nCoeffs= 63;
2510 else nCoeffs= s->block_last_index[n];
2512 quant_matrix = s->inter_matrix;
2513 for(i=0; i<=nCoeffs; i++) {
2514 int j= s->intra_scantable.permutated[i];
2519 level = (((level << 1) + 1) * qscale *
2520 ((int) (quant_matrix[j]))) >> 4;
2523 level = (((level << 1) + 1) * qscale *
2524 ((int) (quant_matrix[j]))) >> 4;
2533 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2534 int16_t *block, int n, int qscale)
2536 int i, level, qmul, qadd;
2539 assert(s->block_last_index[n]>=0);
2545 block[0] = block[0] * s->y_dc_scale;
2547 block[0] = block[0] * s->c_dc_scale;
2548 qadd = (qscale - 1) | 1;
2555 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2557 for(i=1; i<=nCoeffs; i++) {
2561 level = level * qmul - qadd;
2563 level = level * qmul + qadd;
2570 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2571 int16_t *block, int n, int qscale)
2573 int i, level, qmul, qadd;
2576 assert(s->block_last_index[n]>=0);
2578 qadd = (qscale - 1) | 1;
2581 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2583 for(i=0; i<=nCoeffs; i++) {
2587 level = level * qmul - qadd;
2589 level = level * qmul + qadd;
2597 * set qscale and update qscale dependent variables.
2599 void ff_set_qscale(MpegEncContext * s, int qscale)
2603 else if (qscale > 31)
2607 s->chroma_qscale= s->chroma_qscale_table[qscale];
2609 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2610 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2613 void ff_MPV_report_decode_progress(MpegEncContext *s)
2615 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2616 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2619 #if CONFIG_ERROR_RESILIENCE
2620 void ff_mpeg_er_frame_start(MpegEncContext *s)
2622 ERContext *er = &s->er;
2624 er->cur_pic = s->current_picture_ptr;
2625 er->last_pic = s->last_picture_ptr;
2626 er->next_pic = s->next_picture_ptr;
2628 er->pp_time = s->pp_time;
2629 er->pb_time = s->pb_time;
2630 er->quarter_sample = s->quarter_sample;
2631 er->partitioned_frame = s->partitioned_frame;
2633 ff_er_frame_start(er);
2635 #endif /* CONFIG_ERROR_RESILIENCE */