2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/avassert.h"
31 #include "libavutil/imgutils.h"
36 #include "mpegvideo.h"
39 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 int16_t *block, int n, int qscale);
62 /* enable all paranoid tests for rounding, overflows, etc... */
68 static const uint8_t ff_default_chroma_qscale_table[32] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
71 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
74 const uint8_t ff_mpeg1_dc_scale_table[128] = {
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
86 static const uint8_t mpeg2_dc_scale_table1[128] = {
87 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
98 static const uint8_t mpeg2_dc_scale_table2[128] = {
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
110 static const uint8_t mpeg2_dc_scale_table3[128] = {
111 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
122 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
123 ff_mpeg1_dc_scale_table,
124 mpeg2_dc_scale_table1,
125 mpeg2_dc_scale_table2,
126 mpeg2_dc_scale_table3,
129 const enum AVPixelFormat ff_pixfmt_list_420[] = {
134 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
136 int mb_x, int mb_y, int mb_intra, int mb_skipped)
138 MpegEncContext *s = opaque;
141 s->mv_type = mv_type;
142 s->mb_intra = mb_intra;
143 s->mb_skipped = mb_skipped;
146 memcpy(s->mv, mv, sizeof(*mv));
148 ff_init_block_index(s);
149 ff_update_block_index(s);
151 s->dsp.clear_blocks(s->block[0]);
153 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
154 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
155 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
158 ff_MPV_decode_mb(s, s->block);
161 /* init common dct for both encoder and decoder */
162 av_cold int ff_dct_common_init(MpegEncContext *s)
164 ff_dsputil_init(&s->dsp, s->avctx);
165 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
166 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
168 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
169 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
170 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
171 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
172 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
173 if (s->flags & CODEC_FLAG_BITEXACT)
174 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
175 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
178 ff_MPV_common_init_x86(s);
180 ff_MPV_common_init_axp(s);
182 ff_MPV_common_init_arm(s);
184 ff_MPV_common_init_altivec(s);
186 ff_MPV_common_init_bfin(s);
189 /* load & permutate scantables
190 * note: only wmv uses different ones
192 if (s->alternate_scan) {
193 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
196 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
197 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
199 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
200 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
205 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
207 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
209 // edge emu needs blocksize + filter length - 1
210 // (= 17x17 for halfpel / 21x21 for h264)
211 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
212 // at uvlinesize. It supports only YUV420 so 24x24 is enough
213 // linesize * interlaced * MBsize
214 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
217 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
219 s->me.temp = s->me.scratchpad;
220 s->rd_scratchpad = s->me.scratchpad;
221 s->b_scratchpad = s->me.scratchpad;
222 s->obmc_scratchpad = s->me.scratchpad + 16;
226 av_freep(&s->edge_emu_buffer);
227 return AVERROR(ENOMEM);
231 * Allocate a frame buffer
233 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
238 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
239 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
240 s->codec_id != AV_CODEC_ID_MSS2)
241 r = ff_thread_get_buffer(s->avctx, &pic->tf,
242 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
244 pic->f.width = s->avctx->width;
245 pic->f.height = s->avctx->height;
246 pic->f.format = s->avctx->pix_fmt;
247 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
250 if (r < 0 || !pic->f.data[0]) {
251 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
256 if (s->avctx->hwaccel) {
257 assert(!pic->hwaccel_picture_private);
258 if (s->avctx->hwaccel->priv_data_size) {
259 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
260 if (!pic->hwaccel_priv_buf) {
261 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
264 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
268 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
269 s->uvlinesize != pic->f.linesize[1])) {
270 av_log(s->avctx, AV_LOG_ERROR,
271 "get_buffer() failed (stride changed)\n");
272 ff_mpeg_unref_picture(s, pic);
276 if (pic->f.linesize[1] != pic->f.linesize[2]) {
277 av_log(s->avctx, AV_LOG_ERROR,
278 "get_buffer() failed (uv stride mismatch)\n");
279 ff_mpeg_unref_picture(s, pic);
283 if (!s->edge_emu_buffer &&
284 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
285 av_log(s->avctx, AV_LOG_ERROR,
286 "get_buffer() failed to allocate context scratch buffers.\n");
287 ff_mpeg_unref_picture(s, pic);
294 static void free_picture_tables(Picture *pic)
298 av_buffer_unref(&pic->mb_var_buf);
299 av_buffer_unref(&pic->mc_mb_var_buf);
300 av_buffer_unref(&pic->mb_mean_buf);
301 av_buffer_unref(&pic->mbskip_table_buf);
302 av_buffer_unref(&pic->qscale_table_buf);
303 av_buffer_unref(&pic->mb_type_buf);
305 for (i = 0; i < 2; i++) {
306 av_buffer_unref(&pic->motion_val_buf[i]);
307 av_buffer_unref(&pic->ref_index_buf[i]);
311 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
313 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
314 const int mb_array_size = s->mb_stride * s->mb_height;
315 const int b8_array_size = s->b8_stride * s->mb_height * 2;
319 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
320 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
321 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
323 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
324 return AVERROR(ENOMEM);
327 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
328 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
329 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
330 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
331 return AVERROR(ENOMEM);
334 if (s->out_format == FMT_H263 || s->encoding ||
335 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
336 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
337 int ref_index_size = 4 * mb_array_size;
339 for (i = 0; mv_size && i < 2; i++) {
340 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
341 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
342 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
343 return AVERROR(ENOMEM);
350 static int make_tables_writable(Picture *pic)
353 #define MAKE_WRITABLE(table) \
356 (ret = av_buffer_make_writable(&pic->table)) < 0)\
360 MAKE_WRITABLE(mb_var_buf);
361 MAKE_WRITABLE(mc_mb_var_buf);
362 MAKE_WRITABLE(mb_mean_buf);
363 MAKE_WRITABLE(mbskip_table_buf);
364 MAKE_WRITABLE(qscale_table_buf);
365 MAKE_WRITABLE(mb_type_buf);
367 for (i = 0; i < 2; i++) {
368 MAKE_WRITABLE(motion_val_buf[i]);
369 MAKE_WRITABLE(ref_index_buf[i]);
376 * Allocate a Picture.
377 * The pixels are allocated/set by calling get_buffer() if shared = 0
379 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
384 assert(pic->f.data[0]);
387 assert(!pic->f.data[0]);
389 if (alloc_frame_buffer(s, pic) < 0)
392 s->linesize = pic->f.linesize[0];
393 s->uvlinesize = pic->f.linesize[1];
396 if (!pic->qscale_table_buf)
397 ret = alloc_picture_tables(s, pic);
399 ret = make_tables_writable(pic);
404 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
405 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
406 pic->mb_mean = pic->mb_mean_buf->data;
409 pic->mbskip_table = pic->mbskip_table_buf->data;
410 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
411 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
413 if (pic->motion_val_buf[0]) {
414 for (i = 0; i < 2; i++) {
415 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
416 pic->ref_index[i] = pic->ref_index_buf[i]->data;
422 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
423 ff_mpeg_unref_picture(s, pic);
424 free_picture_tables(pic);
425 return AVERROR(ENOMEM);
429 * Deallocate a picture.
431 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
433 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
436 /* WM Image / Screen codecs allocate internal buffers with different
437 * dimensions / colorspaces; ignore user-defined callbacks for these. */
438 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
439 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
440 s->codec_id != AV_CODEC_ID_MSS2)
441 ff_thread_release_buffer(s->avctx, &pic->tf);
443 av_frame_unref(&pic->f);
445 av_buffer_unref(&pic->hwaccel_priv_buf);
447 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
450 static int update_picture_tables(Picture *dst, Picture *src)
454 #define UPDATE_TABLE(table)\
457 (!dst->table || dst->table->buffer != src->table->buffer)) {\
458 av_buffer_unref(&dst->table);\
459 dst->table = av_buffer_ref(src->table);\
461 free_picture_tables(dst);\
462 return AVERROR(ENOMEM);\
467 UPDATE_TABLE(mb_var_buf);
468 UPDATE_TABLE(mc_mb_var_buf);
469 UPDATE_TABLE(mb_mean_buf);
470 UPDATE_TABLE(mbskip_table_buf);
471 UPDATE_TABLE(qscale_table_buf);
472 UPDATE_TABLE(mb_type_buf);
473 for (i = 0; i < 2; i++) {
474 UPDATE_TABLE(motion_val_buf[i]);
475 UPDATE_TABLE(ref_index_buf[i]);
478 dst->mb_var = src->mb_var;
479 dst->mc_mb_var = src->mc_mb_var;
480 dst->mb_mean = src->mb_mean;
481 dst->mbskip_table = src->mbskip_table;
482 dst->qscale_table = src->qscale_table;
483 dst->mb_type = src->mb_type;
484 for (i = 0; i < 2; i++) {
485 dst->motion_val[i] = src->motion_val[i];
486 dst->ref_index[i] = src->ref_index[i];
492 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
496 av_assert0(!dst->f.buf[0]);
497 av_assert0(src->f.buf[0]);
501 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
505 ret = update_picture_tables(dst, src);
509 if (src->hwaccel_picture_private) {
510 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
511 if (!dst->hwaccel_priv_buf)
513 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
516 dst->field_picture = src->field_picture;
517 dst->mb_var_sum = src->mb_var_sum;
518 dst->mc_mb_var_sum = src->mc_mb_var_sum;
519 dst->b_frame_score = src->b_frame_score;
520 dst->needs_realloc = src->needs_realloc;
521 dst->reference = src->reference;
522 dst->shared = src->shared;
526 ff_mpeg_unref_picture(s, dst);
530 static int init_duplicate_context(MpegEncContext *s)
532 int y_size = s->b8_stride * (2 * s->mb_height + 1);
533 int c_size = s->mb_stride * (s->mb_height + 1);
534 int yc_size = y_size + 2 * c_size;
542 s->obmc_scratchpad = NULL;
545 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
546 ME_MAP_SIZE * sizeof(uint32_t), fail)
547 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
548 ME_MAP_SIZE * sizeof(uint32_t), fail)
549 if (s->avctx->noise_reduction) {
550 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
551 2 * 64 * sizeof(int), fail)
554 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
555 s->block = s->blocks[0];
557 for (i = 0; i < 12; i++) {
558 s->pblocks[i] = &s->block[i];
561 if (s->out_format == FMT_H263) {
563 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
564 yc_size * sizeof(int16_t) * 16, fail);
565 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
566 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
567 s->ac_val[2] = s->ac_val[1] + c_size;
572 return -1; // free() through ff_MPV_common_end()
575 static void free_duplicate_context(MpegEncContext *s)
580 av_freep(&s->edge_emu_buffer);
581 av_freep(&s->me.scratchpad);
585 s->obmc_scratchpad = NULL;
587 av_freep(&s->dct_error_sum);
588 av_freep(&s->me.map);
589 av_freep(&s->me.score_map);
590 av_freep(&s->blocks);
591 av_freep(&s->ac_val_base);
595 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
597 #define COPY(a) bak->a = src->a
598 COPY(edge_emu_buffer);
603 COPY(obmc_scratchpad);
610 COPY(me.map_generation);
622 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
626 // FIXME copy only needed parts
628 backup_duplicate_context(&bak, dst);
629 memcpy(dst, src, sizeof(MpegEncContext));
630 backup_duplicate_context(dst, &bak);
631 for (i = 0; i < 12; i++) {
632 dst->pblocks[i] = &dst->block[i];
634 if (!dst->edge_emu_buffer &&
635 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
636 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
637 "scratch buffers.\n");
640 // STOP_TIMER("update_duplicate_context")
641 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
645 int ff_mpeg_update_thread_context(AVCodecContext *dst,
646 const AVCodecContext *src)
649 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
651 if (dst == src || !s1->context_initialized)
654 // FIXME can parameters change on I-frames?
655 // in that case dst may need a reinit
656 if (!s->context_initialized) {
657 memcpy(s, s1, sizeof(MpegEncContext));
660 s->bitstream_buffer = NULL;
661 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
663 ff_MPV_common_init(s);
666 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
668 s->context_reinit = 0;
669 s->height = s1->height;
670 s->width = s1->width;
671 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
675 s->avctx->coded_height = s1->avctx->coded_height;
676 s->avctx->coded_width = s1->avctx->coded_width;
677 s->avctx->width = s1->avctx->width;
678 s->avctx->height = s1->avctx->height;
680 s->coded_picture_number = s1->coded_picture_number;
681 s->picture_number = s1->picture_number;
682 s->input_picture_number = s1->input_picture_number;
684 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
685 ff_mpeg_unref_picture(s, &s->picture[i]);
686 if (s1->picture[i].f.data[0] &&
687 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
691 #define UPDATE_PICTURE(pic)\
693 ff_mpeg_unref_picture(s, &s->pic);\
694 if (s1->pic.f.data[0])\
695 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
697 ret = update_picture_tables(&s->pic, &s1->pic);\
702 UPDATE_PICTURE(current_picture);
703 UPDATE_PICTURE(last_picture);
704 UPDATE_PICTURE(next_picture);
706 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
707 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
708 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
710 // Error/bug resilience
711 s->next_p_frame_damaged = s1->next_p_frame_damaged;
712 s->workaround_bugs = s1->workaround_bugs;
715 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
716 (char *) &s1->shape - (char *) &s1->time_increment_bits);
719 s->max_b_frames = s1->max_b_frames;
720 s->low_delay = s1->low_delay;
721 s->droppable = s1->droppable;
723 // DivX handling (doesn't work)
724 s->divx_packed = s1->divx_packed;
726 if (s1->bitstream_buffer) {
727 if (s1->bitstream_buffer_size +
728 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
729 av_fast_malloc(&s->bitstream_buffer,
730 &s->allocated_bitstream_buffer_size,
731 s1->allocated_bitstream_buffer_size);
732 s->bitstream_buffer_size = s1->bitstream_buffer_size;
733 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
734 s1->bitstream_buffer_size);
735 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
736 FF_INPUT_BUFFER_PADDING_SIZE);
739 // linesize dependend scratch buffer allocation
740 if (!s->edge_emu_buffer)
742 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
743 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
744 "scratch buffers.\n");
745 return AVERROR(ENOMEM);
748 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
749 "be allocated due to unknown size.\n");
753 // MPEG2/interlacing info
754 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
755 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
757 if (!s1->first_field) {
758 s->last_pict_type = s1->pict_type;
759 if (s1->current_picture_ptr)
760 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
762 if (s1->pict_type != AV_PICTURE_TYPE_B) {
763 s->last_non_b_pict_type = s1->pict_type;
771 * Set the given MpegEncContext to common defaults
772 * (same for encoding and decoding).
773 * The changed fields will not depend upon the
774 * prior state of the MpegEncContext.
776 void ff_MPV_common_defaults(MpegEncContext *s)
778 s->y_dc_scale_table =
779 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
780 s->chroma_qscale_table = ff_default_chroma_qscale_table;
781 s->progressive_frame = 1;
782 s->progressive_sequence = 1;
783 s->picture_structure = PICT_FRAME;
785 s->coded_picture_number = 0;
786 s->picture_number = 0;
787 s->input_picture_number = 0;
789 s->picture_in_gop_number = 0;
794 s->slice_context_count = 1;
798 * Set the given MpegEncContext to defaults for decoding.
799 * the changed fields will not depend upon
800 * the prior state of the MpegEncContext.
802 void ff_MPV_decode_defaults(MpegEncContext *s)
804 ff_MPV_common_defaults(s);
807 static int init_er(MpegEncContext *s)
809 ERContext *er = &s->er;
810 int mb_array_size = s->mb_height * s->mb_stride;
813 er->avctx = s->avctx;
816 er->mb_index2xy = s->mb_index2xy;
817 er->mb_num = s->mb_num;
818 er->mb_width = s->mb_width;
819 er->mb_height = s->mb_height;
820 er->mb_stride = s->mb_stride;
821 er->b8_stride = s->b8_stride;
823 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
824 er->error_status_table = av_mallocz(mb_array_size);
825 if (!er->er_temp_buffer || !er->error_status_table)
828 er->mbskip_table = s->mbskip_table;
829 er->mbintra_table = s->mbintra_table;
831 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
832 er->dc_val[i] = s->dc_val[i];
834 er->decode_mb = mpeg_er_decode_mb;
839 av_freep(&er->er_temp_buffer);
840 av_freep(&er->error_status_table);
841 return AVERROR(ENOMEM);
845 * Initialize and allocates MpegEncContext fields dependent on the resolution.
847 static int init_context_frame(MpegEncContext *s)
849 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
851 s->mb_width = (s->width + 15) / 16;
852 s->mb_stride = s->mb_width + 1;
853 s->b8_stride = s->mb_width * 2 + 1;
854 s->b4_stride = s->mb_width * 4 + 1;
855 mb_array_size = s->mb_height * s->mb_stride;
856 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
858 /* set default edge pos, will be overriden
859 * in decode_header if needed */
860 s->h_edge_pos = s->mb_width * 16;
861 s->v_edge_pos = s->mb_height * 16;
863 s->mb_num = s->mb_width * s->mb_height;
868 s->block_wrap[3] = s->b8_stride;
870 s->block_wrap[5] = s->mb_stride;
872 y_size = s->b8_stride * (2 * s->mb_height + 1);
873 c_size = s->mb_stride * (s->mb_height + 1);
874 yc_size = y_size + 2 * c_size;
876 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
877 fail); // error ressilience code looks cleaner with this
878 for (y = 0; y < s->mb_height; y++)
879 for (x = 0; x < s->mb_width; x++)
880 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
882 s->mb_index2xy[s->mb_height * s->mb_width] =
883 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
886 /* Allocate MV tables */
887 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
888 mv_table_size * 2 * sizeof(int16_t), fail);
889 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
890 mv_table_size * 2 * sizeof(int16_t), fail);
891 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
892 mv_table_size * 2 * sizeof(int16_t), fail);
893 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
894 mv_table_size * 2 * sizeof(int16_t), fail);
895 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
896 mv_table_size * 2 * sizeof(int16_t), fail);
897 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
898 mv_table_size * 2 * sizeof(int16_t), fail);
899 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
900 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
901 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
902 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
904 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
906 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
908 /* Allocate MB type table */
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
910 sizeof(uint16_t), fail); // needed for encoding
912 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
915 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
916 mb_array_size * sizeof(float), fail);
917 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
918 mb_array_size * sizeof(float), fail);
922 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
923 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
924 /* interlaced direct mode decoding tables */
925 for (i = 0; i < 2; i++) {
927 for (j = 0; j < 2; j++) {
928 for (k = 0; k < 2; k++) {
929 FF_ALLOCZ_OR_GOTO(s->avctx,
930 s->b_field_mv_table_base[i][j][k],
931 mv_table_size * 2 * sizeof(int16_t),
933 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
936 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
937 mb_array_size * 2 * sizeof(uint8_t), fail);
938 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
939 mv_table_size * 2 * sizeof(int16_t), fail);
940 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
944 mb_array_size * 2 * sizeof(uint8_t), fail);
947 if (s->out_format == FMT_H263) {
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
950 s->coded_block = s->coded_block_base + s->b8_stride + 1;
952 /* cbp, ac_pred, pred_dir */
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
954 mb_array_size * sizeof(uint8_t), fail);
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
956 mb_array_size * sizeof(uint8_t), fail);
959 if (s->h263_pred || s->h263_plus || !s->encoding) {
961 // MN: we need these for error resilience of intra-frames
962 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
963 yc_size * sizeof(int16_t), fail);
964 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
965 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
966 s->dc_val[2] = s->dc_val[1] + c_size;
967 for (i = 0; i < yc_size; i++)
968 s->dc_val_base[i] = 1024;
971 /* which mb is a intra block */
972 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
973 memset(s->mbintra_table, 1, mb_array_size);
975 /* init macroblock skip table */
976 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
977 // Note the + 1 is for a quicker mpeg4 slice_end detection
981 return AVERROR(ENOMEM);
985 * init common structure for both encoder and decoder.
986 * this assumes that some variables like width/height are already set
988 av_cold int ff_MPV_common_init(MpegEncContext *s)
991 int nb_slices = (HAVE_THREADS &&
992 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
993 s->avctx->thread_count : 1;
995 if (s->encoding && s->avctx->slices)
996 nb_slices = s->avctx->slices;
998 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
999 s->mb_height = (s->height + 31) / 32 * 2;
1001 s->mb_height = (s->height + 15) / 16;
1003 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1004 av_log(s->avctx, AV_LOG_ERROR,
1005 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1009 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1012 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1014 max_slices = MAX_THREADS;
1015 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1016 " reducing to %d\n", nb_slices, max_slices);
1017 nb_slices = max_slices;
1020 if ((s->width || s->height) &&
1021 av_image_check_size(s->width, s->height, 0, s->avctx))
1024 ff_dct_common_init(s);
1026 s->flags = s->avctx->flags;
1027 s->flags2 = s->avctx->flags2;
1029 if (s->width && s->height) {
1030 /* set chroma shifts */
1031 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1033 &s->chroma_y_shift);
1035 /* convert fourcc to upper case */
1036 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1038 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1040 s->avctx->coded_frame = &s->current_picture.f;
1043 if (s->msmpeg4_version) {
1044 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1045 2 * 2 * (MAX_LEVEL + 1) *
1046 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1048 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1051 64 * 32 * sizeof(int), fail);
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1053 64 * 32 * sizeof(int), fail);
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1055 64 * 32 * 2 * sizeof(uint16_t), fail);
1056 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1057 64 * 32 * 2 * sizeof(uint16_t), fail);
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1059 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1060 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1061 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1063 if (s->avctx->noise_reduction) {
1064 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1065 2 * 64 * sizeof(uint16_t), fail);
1070 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1071 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1072 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1073 avcodec_get_frame_defaults(&s->picture[i].f);
1075 memset(&s->next_picture, 0, sizeof(s->next_picture));
1076 memset(&s->last_picture, 0, sizeof(s->last_picture));
1077 memset(&s->current_picture, 0, sizeof(s->current_picture));
1078 avcodec_get_frame_defaults(&s->next_picture.f);
1079 avcodec_get_frame_defaults(&s->last_picture.f);
1080 avcodec_get_frame_defaults(&s->current_picture.f);
1082 if (s->width && s->height) {
1083 if (init_context_frame(s))
1086 s->parse_context.state = -1;
1089 s->context_initialized = 1;
1090 s->thread_context[0] = s;
1092 if (s->width && s->height) {
1093 if (nb_slices > 1) {
1094 for (i = 1; i < nb_slices; i++) {
1095 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1096 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1099 for (i = 0; i < nb_slices; i++) {
1100 if (init_duplicate_context(s->thread_context[i]) < 0)
1102 s->thread_context[i]->start_mb_y =
1103 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1104 s->thread_context[i]->end_mb_y =
1105 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1108 if (init_duplicate_context(s) < 0)
1111 s->end_mb_y = s->mb_height;
1113 s->slice_context_count = nb_slices;
1118 ff_MPV_common_end(s);
1123 * Frees and resets MpegEncContext fields depending on the resolution.
1124 * Is used during resolution changes to avoid a full reinitialization of the
1127 static int free_context_frame(MpegEncContext *s)
1131 av_freep(&s->mb_type);
1132 av_freep(&s->p_mv_table_base);
1133 av_freep(&s->b_forw_mv_table_base);
1134 av_freep(&s->b_back_mv_table_base);
1135 av_freep(&s->b_bidir_forw_mv_table_base);
1136 av_freep(&s->b_bidir_back_mv_table_base);
1137 av_freep(&s->b_direct_mv_table_base);
1138 s->p_mv_table = NULL;
1139 s->b_forw_mv_table = NULL;
1140 s->b_back_mv_table = NULL;
1141 s->b_bidir_forw_mv_table = NULL;
1142 s->b_bidir_back_mv_table = NULL;
1143 s->b_direct_mv_table = NULL;
1144 for (i = 0; i < 2; i++) {
1145 for (j = 0; j < 2; j++) {
1146 for (k = 0; k < 2; k++) {
1147 av_freep(&s->b_field_mv_table_base[i][j][k]);
1148 s->b_field_mv_table[i][j][k] = NULL;
1150 av_freep(&s->b_field_select_table[i][j]);
1151 av_freep(&s->p_field_mv_table_base[i][j]);
1152 s->p_field_mv_table[i][j] = NULL;
1154 av_freep(&s->p_field_select_table[i]);
1157 av_freep(&s->dc_val_base);
1158 av_freep(&s->coded_block_base);
1159 av_freep(&s->mbintra_table);
1160 av_freep(&s->cbp_table);
1161 av_freep(&s->pred_dir_table);
1163 av_freep(&s->mbskip_table);
1165 av_freep(&s->er.error_status_table);
1166 av_freep(&s->er.er_temp_buffer);
1167 av_freep(&s->mb_index2xy);
1168 av_freep(&s->lambda_table);
1169 av_freep(&s->cplx_tab);
1170 av_freep(&s->bits_tab);
1172 s->linesize = s->uvlinesize = 0;
1177 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1181 if (s->slice_context_count > 1) {
1182 for (i = 0; i < s->slice_context_count; i++) {
1183 free_duplicate_context(s->thread_context[i]);
1185 for (i = 1; i < s->slice_context_count; i++) {
1186 av_freep(&s->thread_context[i]);
1189 free_duplicate_context(s);
1191 if ((err = free_context_frame(s)) < 0)
1195 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1196 s->picture[i].needs_realloc = 1;
1199 s->last_picture_ptr =
1200 s->next_picture_ptr =
1201 s->current_picture_ptr = NULL;
1204 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1205 s->mb_height = (s->height + 31) / 32 * 2;
1207 s->mb_height = (s->height + 15) / 16;
1209 if ((s->width || s->height) &&
1210 av_image_check_size(s->width, s->height, 0, s->avctx))
1211 return AVERROR_INVALIDDATA;
1213 if ((err = init_context_frame(s)))
1216 s->thread_context[0] = s;
1218 if (s->width && s->height) {
1219 int nb_slices = s->slice_context_count;
1220 if (nb_slices > 1) {
1221 for (i = 1; i < nb_slices; i++) {
1222 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1223 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1226 for (i = 0; i < nb_slices; i++) {
1227 if (init_duplicate_context(s->thread_context[i]) < 0)
1229 s->thread_context[i]->start_mb_y =
1230 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1231 s->thread_context[i]->end_mb_y =
1232 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1235 if (init_duplicate_context(s) < 0)
1238 s->end_mb_y = s->mb_height;
1240 s->slice_context_count = nb_slices;
1245 ff_MPV_common_end(s);
1249 /* init common structure for both encoder and decoder */
1250 void ff_MPV_common_end(MpegEncContext *s)
1254 if (s->slice_context_count > 1) {
1255 for (i = 0; i < s->slice_context_count; i++) {
1256 free_duplicate_context(s->thread_context[i]);
1258 for (i = 1; i < s->slice_context_count; i++) {
1259 av_freep(&s->thread_context[i]);
1261 s->slice_context_count = 1;
1262 } else free_duplicate_context(s);
1264 av_freep(&s->parse_context.buffer);
1265 s->parse_context.buffer_size = 0;
1267 av_freep(&s->bitstream_buffer);
1268 s->allocated_bitstream_buffer_size = 0;
1270 av_freep(&s->avctx->stats_out);
1271 av_freep(&s->ac_stats);
1273 av_freep(&s->q_intra_matrix);
1274 av_freep(&s->q_inter_matrix);
1275 av_freep(&s->q_intra_matrix16);
1276 av_freep(&s->q_inter_matrix16);
1277 av_freep(&s->input_picture);
1278 av_freep(&s->reordered_input_picture);
1279 av_freep(&s->dct_offset);
1282 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1283 free_picture_tables(&s->picture[i]);
1284 ff_mpeg_unref_picture(s, &s->picture[i]);
1287 av_freep(&s->picture);
1288 free_picture_tables(&s->last_picture);
1289 ff_mpeg_unref_picture(s, &s->last_picture);
1290 free_picture_tables(&s->current_picture);
1291 ff_mpeg_unref_picture(s, &s->current_picture);
1292 free_picture_tables(&s->next_picture);
1293 ff_mpeg_unref_picture(s, &s->next_picture);
1294 free_picture_tables(&s->new_picture);
1295 ff_mpeg_unref_picture(s, &s->new_picture);
1297 free_context_frame(s);
1299 s->context_initialized = 0;
1300 s->last_picture_ptr =
1301 s->next_picture_ptr =
1302 s->current_picture_ptr = NULL;
1303 s->linesize = s->uvlinesize = 0;
1306 void ff_init_rl(RLTable *rl,
1307 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1309 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1310 uint8_t index_run[MAX_RUN + 1];
1311 int last, run, level, start, end, i;
1313 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1314 if (static_store && rl->max_level[0])
1317 /* compute max_level[], max_run[] and index_run[] */
1318 for (last = 0; last < 2; last++) {
1327 memset(max_level, 0, MAX_RUN + 1);
1328 memset(max_run, 0, MAX_LEVEL + 1);
1329 memset(index_run, rl->n, MAX_RUN + 1);
1330 for (i = start; i < end; i++) {
1331 run = rl->table_run[i];
1332 level = rl->table_level[i];
1333 if (index_run[run] == rl->n)
1335 if (level > max_level[run])
1336 max_level[run] = level;
1337 if (run > max_run[level])
1338 max_run[level] = run;
1341 rl->max_level[last] = static_store[last];
1343 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1344 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1346 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1348 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1349 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1351 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1353 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1354 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1358 void ff_init_vlc_rl(RLTable *rl)
1362 for (q = 0; q < 32; q++) {
1364 int qadd = (q - 1) | 1;
1370 for (i = 0; i < rl->vlc.table_size; i++) {
1371 int code = rl->vlc.table[i][0];
1372 int len = rl->vlc.table[i][1];
1375 if (len == 0) { // illegal code
1378 } else if (len < 0) { // more bits needed
1382 if (code == rl->n) { // esc
1386 run = rl->table_run[code] + 1;
1387 level = rl->table_level[code] * qmul + qadd;
1388 if (code >= rl->last) run += 192;
1391 rl->rl_vlc[q][i].len = len;
1392 rl->rl_vlc[q][i].level = level;
1393 rl->rl_vlc[q][i].run = run;
1398 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1402 /* release non reference frames */
1403 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1404 if (!s->picture[i].reference &&
1405 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1406 ff_mpeg_unref_picture(s, &s->picture[i]);
1411 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1413 if (pic->f.data[0] == NULL)
1415 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1420 static int find_unused_picture(MpegEncContext *s, int shared)
1425 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1426 if (s->picture[i].f.data[0] == NULL)
1430 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1431 if (pic_is_unused(s, &s->picture[i]))
1436 return AVERROR_INVALIDDATA;
1439 int ff_find_unused_picture(MpegEncContext *s, int shared)
1441 int ret = find_unused_picture(s, shared);
1443 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1444 if (s->picture[ret].needs_realloc) {
1445 s->picture[ret].needs_realloc = 0;
1446 free_picture_tables(&s->picture[ret]);
1447 ff_mpeg_unref_picture(s, &s->picture[ret]);
1448 avcodec_get_frame_defaults(&s->picture[ret].f);
1454 static void update_noise_reduction(MpegEncContext *s)
1458 for (intra = 0; intra < 2; intra++) {
1459 if (s->dct_count[intra] > (1 << 16)) {
1460 for (i = 0; i < 64; i++) {
1461 s->dct_error_sum[intra][i] >>= 1;
1463 s->dct_count[intra] >>= 1;
1466 for (i = 0; i < 64; i++) {
1467 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1468 s->dct_count[intra] +
1469 s->dct_error_sum[intra][i] / 2) /
1470 (s->dct_error_sum[intra][i] + 1);
1476 * generic function for encode/decode called after coding/decoding
1477 * the header and before a frame is coded/decoded.
1479 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1485 /* mark & release old frames */
1486 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1487 s->last_picture_ptr != s->next_picture_ptr &&
1488 s->last_picture_ptr->f.data[0]) {
1489 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1492 /* release forgotten pictures */
1493 /* if (mpeg124/h263) */
1495 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1496 if (&s->picture[i] != s->last_picture_ptr &&
1497 &s->picture[i] != s->next_picture_ptr &&
1498 s->picture[i].reference && !s->picture[i].needs_realloc) {
1499 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1500 av_log(avctx, AV_LOG_ERROR,
1501 "releasing zombie picture\n");
1502 ff_mpeg_unref_picture(s, &s->picture[i]);
1508 ff_release_unused_pictures(s, 1);
1510 if (s->current_picture_ptr &&
1511 s->current_picture_ptr->f.data[0] == NULL) {
1512 // we already have a unused image
1513 // (maybe it was set before reading the header)
1514 pic = s->current_picture_ptr;
1516 i = ff_find_unused_picture(s, 0);
1518 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1521 pic = &s->picture[i];
1525 if (!s->droppable) {
1526 if (s->pict_type != AV_PICTURE_TYPE_B)
1530 pic->f.coded_picture_number = s->coded_picture_number++;
1532 if (ff_alloc_picture(s, pic, 0) < 0)
1535 s->current_picture_ptr = pic;
1536 // FIXME use only the vars from current_pic
1537 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1538 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1539 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1540 if (s->picture_structure != PICT_FRAME)
1541 s->current_picture_ptr->f.top_field_first =
1542 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1544 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1545 !s->progressive_sequence;
1546 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1549 s->current_picture_ptr->f.pict_type = s->pict_type;
1550 // if (s->flags && CODEC_FLAG_QSCALE)
1551 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1552 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1554 ff_mpeg_unref_picture(s, &s->current_picture);
1555 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1556 s->current_picture_ptr)) < 0)
1559 if (s->pict_type != AV_PICTURE_TYPE_B) {
1560 s->last_picture_ptr = s->next_picture_ptr;
1562 s->next_picture_ptr = s->current_picture_ptr;
1564 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1565 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1566 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1567 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1568 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1569 s->pict_type, s->droppable);
1571 if ((s->last_picture_ptr == NULL ||
1572 s->last_picture_ptr->f.data[0] == NULL) &&
1573 (s->pict_type != AV_PICTURE_TYPE_I ||
1574 s->picture_structure != PICT_FRAME)) {
1575 int h_chroma_shift, v_chroma_shift;
1576 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1577 &h_chroma_shift, &v_chroma_shift);
1578 if (s->pict_type != AV_PICTURE_TYPE_I)
1579 av_log(avctx, AV_LOG_ERROR,
1580 "warning: first frame is no keyframe\n");
1581 else if (s->picture_structure != PICT_FRAME)
1582 av_log(avctx, AV_LOG_INFO,
1583 "allocate dummy last picture for field based first keyframe\n");
1585 /* Allocate a dummy frame */
1586 i = ff_find_unused_picture(s, 0);
1588 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1591 s->last_picture_ptr = &s->picture[i];
1592 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1593 s->last_picture_ptr = NULL;
1597 memset(s->last_picture_ptr->f.data[0], 0,
1598 avctx->height * s->last_picture_ptr->f.linesize[0]);
1599 memset(s->last_picture_ptr->f.data[1], 0x80,
1600 (avctx->height >> v_chroma_shift) *
1601 s->last_picture_ptr->f.linesize[1]);
1602 memset(s->last_picture_ptr->f.data[2], 0x80,
1603 (avctx->height >> v_chroma_shift) *
1604 s->last_picture_ptr->f.linesize[2]);
1606 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1607 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1609 if ((s->next_picture_ptr == NULL ||
1610 s->next_picture_ptr->f.data[0] == NULL) &&
1611 s->pict_type == AV_PICTURE_TYPE_B) {
1612 /* Allocate a dummy frame */
1613 i = ff_find_unused_picture(s, 0);
1615 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1618 s->next_picture_ptr = &s->picture[i];
1619 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1620 s->next_picture_ptr = NULL;
1623 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1624 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1627 if (s->last_picture_ptr) {
1628 ff_mpeg_unref_picture(s, &s->last_picture);
1629 if (s->last_picture_ptr->f.data[0] &&
1630 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1631 s->last_picture_ptr)) < 0)
1634 if (s->next_picture_ptr) {
1635 ff_mpeg_unref_picture(s, &s->next_picture);
1636 if (s->next_picture_ptr->f.data[0] &&
1637 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1638 s->next_picture_ptr)) < 0)
1642 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1643 s->last_picture_ptr->f.data[0]));
1645 if (s->picture_structure!= PICT_FRAME) {
1647 for (i = 0; i < 4; i++) {
1648 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1649 s->current_picture.f.data[i] +=
1650 s->current_picture.f.linesize[i];
1652 s->current_picture.f.linesize[i] *= 2;
1653 s->last_picture.f.linesize[i] *= 2;
1654 s->next_picture.f.linesize[i] *= 2;
1658 s->err_recognition = avctx->err_recognition;
1660 /* set dequantizer, we can't do it during init as
1661 * it might change for mpeg4 and we can't do it in the header
1662 * decode as init is not called for mpeg4 there yet */
1663 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1664 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1665 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1666 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1667 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1668 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1670 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1671 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1674 if (s->dct_error_sum) {
1675 assert(s->avctx->noise_reduction && s->encoding);
1676 update_noise_reduction(s);
1679 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1680 return ff_xvmc_field_start(s, avctx);
1685 /* generic function for encode/decode called after a
1686 * frame has been coded/decoded. */
1687 void ff_MPV_frame_end(MpegEncContext *s)
1690 /* redraw edges for the frame if decoding didn't complete */
1691 // just to make sure that all data is rendered.
1692 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1693 ff_xvmc_field_end(s);
1694 } else if ((s->er.error_count || s->encoding) &&
1695 !s->avctx->hwaccel &&
1696 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1697 s->unrestricted_mv &&
1698 s->current_picture.reference &&
1700 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1701 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1702 int hshift = desc->log2_chroma_w;
1703 int vshift = desc->log2_chroma_h;
1704 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1705 s->h_edge_pos, s->v_edge_pos,
1706 EDGE_WIDTH, EDGE_WIDTH,
1707 EDGE_TOP | EDGE_BOTTOM);
1708 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1709 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1710 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1711 EDGE_TOP | EDGE_BOTTOM);
1712 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1713 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1714 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1715 EDGE_TOP | EDGE_BOTTOM);
1720 s->last_pict_type = s->pict_type;
1721 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1722 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1723 s->last_non_b_pict_type = s->pict_type;
1726 /* copy back current_picture variables */
1727 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1728 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1729 s->picture[i] = s->current_picture;
1733 assert(i < MAX_PICTURE_COUNT);
1737 /* release non-reference frames */
1738 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1739 if (!s->picture[i].reference)
1740 ff_mpeg_unref_picture(s, &s->picture[i]);
1743 // clear copies, to avoid confusion
1745 memset(&s->last_picture, 0, sizeof(Picture));
1746 memset(&s->next_picture, 0, sizeof(Picture));
1747 memset(&s->current_picture, 0, sizeof(Picture));
1749 s->avctx->coded_frame = &s->current_picture_ptr->f;
1751 if (s->current_picture.reference)
1752 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1756 * Print debugging info for the given picture.
1758 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1761 if (s->avctx->hwaccel || !p || !p->mb_type)
1765 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1768 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1769 switch (pict->pict_type) {
1770 case AV_PICTURE_TYPE_I:
1771 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1773 case AV_PICTURE_TYPE_P:
1774 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1776 case AV_PICTURE_TYPE_B:
1777 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1779 case AV_PICTURE_TYPE_S:
1780 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1782 case AV_PICTURE_TYPE_SI:
1783 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1785 case AV_PICTURE_TYPE_SP:
1786 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1789 for (y = 0; y < s->mb_height; y++) {
1790 for (x = 0; x < s->mb_width; x++) {
1791 if (s->avctx->debug & FF_DEBUG_SKIP) {
1792 int count = s->mbskip_table[x + y * s->mb_stride];
1795 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1797 if (s->avctx->debug & FF_DEBUG_QP) {
1798 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1799 p->qscale_table[x + y * s->mb_stride]);
1801 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1802 int mb_type = p->mb_type[x + y * s->mb_stride];
1803 // Type & MV direction
1804 if (IS_PCM(mb_type))
1805 av_log(s->avctx, AV_LOG_DEBUG, "P");
1806 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1807 av_log(s->avctx, AV_LOG_DEBUG, "A");
1808 else if (IS_INTRA4x4(mb_type))
1809 av_log(s->avctx, AV_LOG_DEBUG, "i");
1810 else if (IS_INTRA16x16(mb_type))
1811 av_log(s->avctx, AV_LOG_DEBUG, "I");
1812 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1813 av_log(s->avctx, AV_LOG_DEBUG, "d");
1814 else if (IS_DIRECT(mb_type))
1815 av_log(s->avctx, AV_LOG_DEBUG, "D");
1816 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1817 av_log(s->avctx, AV_LOG_DEBUG, "g");
1818 else if (IS_GMC(mb_type))
1819 av_log(s->avctx, AV_LOG_DEBUG, "G");
1820 else if (IS_SKIP(mb_type))
1821 av_log(s->avctx, AV_LOG_DEBUG, "S");
1822 else if (!USES_LIST(mb_type, 1))
1823 av_log(s->avctx, AV_LOG_DEBUG, ">");
1824 else if (!USES_LIST(mb_type, 0))
1825 av_log(s->avctx, AV_LOG_DEBUG, "<");
1827 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1828 av_log(s->avctx, AV_LOG_DEBUG, "X");
1832 if (IS_8X8(mb_type))
1833 av_log(s->avctx, AV_LOG_DEBUG, "+");
1834 else if (IS_16X8(mb_type))
1835 av_log(s->avctx, AV_LOG_DEBUG, "-");
1836 else if (IS_8X16(mb_type))
1837 av_log(s->avctx, AV_LOG_DEBUG, "|");
1838 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1839 av_log(s->avctx, AV_LOG_DEBUG, " ");
1841 av_log(s->avctx, AV_LOG_DEBUG, "?");
1844 if (IS_INTERLACED(mb_type))
1845 av_log(s->avctx, AV_LOG_DEBUG, "=");
1847 av_log(s->avctx, AV_LOG_DEBUG, " ");
1850 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1856 * find the lowest MB row referenced in the MVs
1858 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1860 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1861 int my, off, i, mvs;
1863 if (s->picture_structure != PICT_FRAME || s->mcsel)
1866 switch (s->mv_type) {
1880 for (i = 0; i < mvs; i++) {
1881 my = s->mv[dir][i][1]<<qpel_shift;
1882 my_max = FFMAX(my_max, my);
1883 my_min = FFMIN(my_min, my);
1886 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1888 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1890 return s->mb_height-1;
1893 /* put block[] to dest[] */
1894 static inline void put_dct(MpegEncContext *s,
1895 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1897 s->dct_unquantize_intra(s, block, i, qscale);
1898 s->dsp.idct_put (dest, line_size, block);
1901 /* add block[] to dest[] */
1902 static inline void add_dct(MpegEncContext *s,
1903 int16_t *block, int i, uint8_t *dest, int line_size)
1905 if (s->block_last_index[i] >= 0) {
1906 s->dsp.idct_add (dest, line_size, block);
1910 static inline void add_dequant_dct(MpegEncContext *s,
1911 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1913 if (s->block_last_index[i] >= 0) {
1914 s->dct_unquantize_inter(s, block, i, qscale);
1916 s->dsp.idct_add (dest, line_size, block);
1921 * Clean dc, ac, coded_block for the current non-intra MB.
1923 void ff_clean_intra_table_entries(MpegEncContext *s)
1925 int wrap = s->b8_stride;
1926 int xy = s->block_index[0];
1929 s->dc_val[0][xy + 1 ] =
1930 s->dc_val[0][xy + wrap] =
1931 s->dc_val[0][xy + 1 + wrap] = 1024;
1933 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1934 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1935 if (s->msmpeg4_version>=3) {
1936 s->coded_block[xy ] =
1937 s->coded_block[xy + 1 ] =
1938 s->coded_block[xy + wrap] =
1939 s->coded_block[xy + 1 + wrap] = 0;
1942 wrap = s->mb_stride;
1943 xy = s->mb_x + s->mb_y * wrap;
1945 s->dc_val[2][xy] = 1024;
1947 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1948 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1950 s->mbintra_table[xy]= 0;
1953 /* generic function called after a macroblock has been parsed by the
1954 decoder or after it has been encoded by the encoder.
1956 Important variables used:
1957 s->mb_intra : true if intra macroblock
1958 s->mv_dir : motion vector direction
1959 s->mv_type : motion vector type
1960 s->mv : motion vector
1961 s->interlaced_dct : true if interlaced dct used (mpeg2)
1963 static av_always_inline
1964 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1967 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1968 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1969 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1973 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1974 /* print DCT coefficients */
1976 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1978 for(j=0; j<64; j++){
1979 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
1981 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1985 s->current_picture.qscale_table[mb_xy] = s->qscale;
1987 /* update DC predictors for P macroblocks */
1989 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1990 if(s->mbintra_table[mb_xy])
1991 ff_clean_intra_table_entries(s);
1995 s->last_dc[2] = 128 << s->intra_dc_precision;
1998 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1999 s->mbintra_table[mb_xy]=1;
2001 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2002 uint8_t *dest_y, *dest_cb, *dest_cr;
2003 int dct_linesize, dct_offset;
2004 op_pixels_func (*op_pix)[4];
2005 qpel_mc_func (*op_qpix)[16];
2006 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2007 const int uvlinesize = s->current_picture.f.linesize[1];
2008 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2009 const int block_size = 8;
2011 /* avoid copy if macroblock skipped in last frame too */
2012 /* skip only during decoding as we might trash the buffers during encoding a bit */
2014 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2016 if (s->mb_skipped) {
2018 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2020 } else if(!s->current_picture.reference) {
2023 *mbskip_ptr = 0; /* not skipped */
2027 dct_linesize = linesize << s->interlaced_dct;
2028 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2032 dest_cb= s->dest[1];
2033 dest_cr= s->dest[2];
2035 dest_y = s->b_scratchpad;
2036 dest_cb= s->b_scratchpad+16*linesize;
2037 dest_cr= s->b_scratchpad+32*linesize;
2041 /* motion handling */
2042 /* decoding or more than one mb_type (MC was already done otherwise) */
2045 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2046 if (s->mv_dir & MV_DIR_FORWARD) {
2047 ff_thread_await_progress(&s->last_picture_ptr->tf,
2048 ff_MPV_lowest_referenced_row(s, 0),
2051 if (s->mv_dir & MV_DIR_BACKWARD) {
2052 ff_thread_await_progress(&s->next_picture_ptr->tf,
2053 ff_MPV_lowest_referenced_row(s, 1),
2058 op_qpix= s->me.qpel_put;
2059 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2060 op_pix = s->hdsp.put_pixels_tab;
2062 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2064 if (s->mv_dir & MV_DIR_FORWARD) {
2065 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2066 op_pix = s->hdsp.avg_pixels_tab;
2067 op_qpix= s->me.qpel_avg;
2069 if (s->mv_dir & MV_DIR_BACKWARD) {
2070 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2074 /* skip dequant / idct if we are really late ;) */
2075 if(s->avctx->skip_idct){
2076 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2077 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2078 || s->avctx->skip_idct >= AVDISCARD_ALL)
2082 /* add dct residue */
2083 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2084 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2085 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2086 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2087 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2088 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2090 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2091 if (s->chroma_y_shift){
2092 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2093 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2097 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2098 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2099 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2100 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2103 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2104 add_dct(s, block[0], 0, dest_y , dct_linesize);
2105 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2106 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2107 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2109 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2110 if(s->chroma_y_shift){//Chroma420
2111 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2112 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2115 dct_linesize = uvlinesize << s->interlaced_dct;
2116 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2118 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2119 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2120 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2121 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2122 if(!s->chroma_x_shift){//Chroma444
2123 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2124 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2125 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2126 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2131 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2132 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2135 /* dct only in intra block */
2136 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2137 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2138 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2139 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2140 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2142 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2143 if(s->chroma_y_shift){
2144 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2145 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2149 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2150 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2151 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2152 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2156 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2157 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2158 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2159 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2161 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2162 if(s->chroma_y_shift){
2163 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2164 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2167 dct_linesize = uvlinesize << s->interlaced_dct;
2168 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2170 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2171 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2172 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2173 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2174 if(!s->chroma_x_shift){//Chroma444
2175 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2176 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2177 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2178 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2186 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2187 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2188 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2193 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2195 if(s->out_format == FMT_MPEG1) {
2196 MPV_decode_mb_internal(s, block, 1);
2199 MPV_decode_mb_internal(s, block, 0);
2203 * @param h is the normal height, this will be reduced automatically if needed for the last row
2205 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2206 Picture *last, int y, int h, int picture_structure,
2207 int first_field, int draw_edges, int low_delay,
2208 int v_edge_pos, int h_edge_pos)
2210 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2211 int hshift = desc->log2_chroma_w;
2212 int vshift = desc->log2_chroma_h;
2213 const int field_pic = picture_structure != PICT_FRAME;
2219 if (!avctx->hwaccel &&
2220 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2223 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2224 int *linesize = cur->f.linesize;
2225 int sides = 0, edge_h;
2226 if (y==0) sides |= EDGE_TOP;
2227 if (y + h >= v_edge_pos)
2228 sides |= EDGE_BOTTOM;
2230 edge_h= FFMIN(h, v_edge_pos - y);
2232 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2233 linesize[0], h_edge_pos, edge_h,
2234 EDGE_WIDTH, EDGE_WIDTH, sides);
2235 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2236 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2237 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2238 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2239 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2240 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2243 h = FFMIN(h, avctx->height - y);
2245 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2247 if (avctx->draw_horiz_band) {
2249 int offset[AV_NUM_DATA_POINTERS];
2252 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2253 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2260 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2261 picture_structure == PICT_FRAME &&
2262 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2263 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2266 offset[0]= y * src->linesize[0];
2268 offset[2]= (y >> vshift) * src->linesize[1];
2269 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2275 avctx->draw_horiz_band(avctx, src, offset,
2276 y, picture_structure, h);
2280 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2282 int draw_edges = s->unrestricted_mv && !s->intra_only;
2283 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2284 &s->last_picture, y, h, s->picture_structure,
2285 s->first_field, draw_edges, s->low_delay,
2286 s->v_edge_pos, s->h_edge_pos);
2289 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2290 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2291 const int uvlinesize = s->current_picture.f.linesize[1];
2292 const int mb_size= 4;
2294 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2295 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2296 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2297 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2298 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2299 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2300 //block_index is not used by mpeg2, so it is not affected by chroma_format
2302 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2303 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2304 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2306 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2308 if(s->picture_structure==PICT_FRAME){
2309 s->dest[0] += s->mb_y * linesize << mb_size;
2310 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2311 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2313 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2314 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2315 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2316 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2322 * Permute an 8x8 block.
2323 * @param block the block which will be permuted according to the given permutation vector
2324 * @param permutation the permutation vector
2325 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2326 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2327 * (inverse) permutated to scantable order!
2329 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2335 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2337 for(i=0; i<=last; i++){
2338 const int j= scantable[i];
2343 for(i=0; i<=last; i++){
2344 const int j= scantable[i];
2345 const int perm_j= permutation[j];
2346 block[perm_j]= temp[j];
2350 void ff_mpeg_flush(AVCodecContext *avctx){
2352 MpegEncContext *s = avctx->priv_data;
2354 if(s==NULL || s->picture==NULL)
2357 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2358 ff_mpeg_unref_picture(s, &s->picture[i]);
2359 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2361 ff_mpeg_unref_picture(s, &s->current_picture);
2362 ff_mpeg_unref_picture(s, &s->last_picture);
2363 ff_mpeg_unref_picture(s, &s->next_picture);
2365 s->mb_x= s->mb_y= 0;
2367 s->parse_context.state= -1;
2368 s->parse_context.frame_start_found= 0;
2369 s->parse_context.overread= 0;
2370 s->parse_context.overread_index= 0;
2371 s->parse_context.index= 0;
2372 s->parse_context.last_index= 0;
2373 s->bitstream_buffer_size=0;
2377 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2378 int16_t *block, int n, int qscale)
2380 int i, level, nCoeffs;
2381 const uint16_t *quant_matrix;
2383 nCoeffs= s->block_last_index[n];
2386 block[0] = block[0] * s->y_dc_scale;
2388 block[0] = block[0] * s->c_dc_scale;
2389 /* XXX: only mpeg1 */
2390 quant_matrix = s->intra_matrix;
2391 for(i=1;i<=nCoeffs;i++) {
2392 int j= s->intra_scantable.permutated[i];
2397 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2398 level = (level - 1) | 1;
2401 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2402 level = (level - 1) | 1;
2409 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2410 int16_t *block, int n, int qscale)
2412 int i, level, nCoeffs;
2413 const uint16_t *quant_matrix;
2415 nCoeffs= s->block_last_index[n];
2417 quant_matrix = s->inter_matrix;
2418 for(i=0; i<=nCoeffs; i++) {
2419 int j= s->intra_scantable.permutated[i];
2424 level = (((level << 1) + 1) * qscale *
2425 ((int) (quant_matrix[j]))) >> 4;
2426 level = (level - 1) | 1;
2429 level = (((level << 1) + 1) * qscale *
2430 ((int) (quant_matrix[j]))) >> 4;
2431 level = (level - 1) | 1;
2438 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2439 int16_t *block, int n, int qscale)
2441 int i, level, nCoeffs;
2442 const uint16_t *quant_matrix;
2444 if(s->alternate_scan) nCoeffs= 63;
2445 else nCoeffs= s->block_last_index[n];
2448 block[0] = block[0] * s->y_dc_scale;
2450 block[0] = block[0] * s->c_dc_scale;
2451 quant_matrix = s->intra_matrix;
2452 for(i=1;i<=nCoeffs;i++) {
2453 int j= s->intra_scantable.permutated[i];
2458 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2461 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2468 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2469 int16_t *block, int n, int qscale)
2471 int i, level, nCoeffs;
2472 const uint16_t *quant_matrix;
2475 if(s->alternate_scan) nCoeffs= 63;
2476 else nCoeffs= s->block_last_index[n];
2479 block[0] = block[0] * s->y_dc_scale;
2481 block[0] = block[0] * s->c_dc_scale;
2482 quant_matrix = s->intra_matrix;
2483 for(i=1;i<=nCoeffs;i++) {
2484 int j= s->intra_scantable.permutated[i];
2489 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2492 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2501 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2502 int16_t *block, int n, int qscale)
2504 int i, level, nCoeffs;
2505 const uint16_t *quant_matrix;
2508 if(s->alternate_scan) nCoeffs= 63;
2509 else nCoeffs= s->block_last_index[n];
2511 quant_matrix = s->inter_matrix;
2512 for(i=0; i<=nCoeffs; i++) {
2513 int j= s->intra_scantable.permutated[i];
2518 level = (((level << 1) + 1) * qscale *
2519 ((int) (quant_matrix[j]))) >> 4;
2522 level = (((level << 1) + 1) * qscale *
2523 ((int) (quant_matrix[j]))) >> 4;
2532 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2533 int16_t *block, int n, int qscale)
2535 int i, level, qmul, qadd;
2538 assert(s->block_last_index[n]>=0);
2544 block[0] = block[0] * s->y_dc_scale;
2546 block[0] = block[0] * s->c_dc_scale;
2547 qadd = (qscale - 1) | 1;
2554 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2556 for(i=1; i<=nCoeffs; i++) {
2560 level = level * qmul - qadd;
2562 level = level * qmul + qadd;
2569 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2570 int16_t *block, int n, int qscale)
2572 int i, level, qmul, qadd;
2575 assert(s->block_last_index[n]>=0);
2577 qadd = (qscale - 1) | 1;
2580 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2582 for(i=0; i<=nCoeffs; i++) {
2586 level = level * qmul - qadd;
2588 level = level * qmul + qadd;
2596 * set qscale and update qscale dependent variables.
2598 void ff_set_qscale(MpegEncContext * s, int qscale)
2602 else if (qscale > 31)
2606 s->chroma_qscale= s->chroma_qscale_table[qscale];
2608 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2609 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2612 void ff_MPV_report_decode_progress(MpegEncContext *s)
2614 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2615 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2618 #if CONFIG_ERROR_RESILIENCE
2619 void ff_mpeg_er_frame_start(MpegEncContext *s)
2621 ERContext *er = &s->er;
2623 er->cur_pic = s->current_picture_ptr;
2624 er->last_pic = s->last_picture_ptr;
2625 er->next_pic = s->next_picture_ptr;
2627 er->pp_time = s->pp_time;
2628 er->pb_time = s->pb_time;
2629 er->quarter_sample = s->quarter_sample;
2630 er->partitioned_frame = s->partitioned_frame;
2632 ff_er_frame_start(er);
2634 #endif /* CONFIG_ERROR_RESILIENCE */