2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
130 MpegEncContext *s = opaque;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
138 memcpy(s->mv, mv, sizeof(*mv));
140 ff_init_block_index(s);
141 ff_update_block_index(s);
143 s->dsp.clear_blocks(s->block[0]);
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
150 ff_MPV_decode_mb(s, s->block);
153 /* init common dct for both encoder and decoder */
154 av_cold int ff_dct_common_init(MpegEncContext *s)
156 ff_dsputil_init(&s->dsp, s->avctx);
157 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
158 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
160 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
161 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
162 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
163 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
165 if (s->flags & CODEC_FLAG_BITEXACT)
166 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
167 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
170 ff_MPV_common_init_arm(s);
172 ff_MPV_common_init_bfin(s);
174 ff_MPV_common_init_ppc(s);
176 ff_MPV_common_init_x86(s);
178 /* load & permutate scantables
179 * note: only wmv uses different ones
181 if (s->alternate_scan) {
182 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
183 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
185 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
186 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
194 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
196 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
198 // edge emu needs blocksize + filter length - 1
199 // (= 17x17 for halfpel / 21x21 for h264)
200 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
201 // at uvlinesize. It supports only YUV420 so 24x24 is enough
202 // linesize * interlaced * MBsize
203 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
206 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
208 s->me.temp = s->me.scratchpad;
209 s->rd_scratchpad = s->me.scratchpad;
210 s->b_scratchpad = s->me.scratchpad;
211 s->obmc_scratchpad = s->me.scratchpad + 16;
215 av_freep(&s->edge_emu_buffer);
216 return AVERROR(ENOMEM);
220 * Allocate a frame buffer
222 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
227 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
228 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
229 s->codec_id != AV_CODEC_ID_MSS2)
230 r = ff_thread_get_buffer(s->avctx, &pic->tf,
231 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
233 pic->f.width = s->avctx->width;
234 pic->f.height = s->avctx->height;
235 pic->f.format = s->avctx->pix_fmt;
236 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
239 if (r < 0 || !pic->f.data[0]) {
240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
245 if (s->avctx->hwaccel) {
246 assert(!pic->hwaccel_picture_private);
247 if (s->avctx->hwaccel->priv_data_size) {
248 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
249 if (!pic->hwaccel_priv_buf) {
250 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
253 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
257 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
258 s->uvlinesize != pic->f.linesize[1])) {
259 av_log(s->avctx, AV_LOG_ERROR,
260 "get_buffer() failed (stride changed)\n");
261 ff_mpeg_unref_picture(s, pic);
265 if (pic->f.linesize[1] != pic->f.linesize[2]) {
266 av_log(s->avctx, AV_LOG_ERROR,
267 "get_buffer() failed (uv stride mismatch)\n");
268 ff_mpeg_unref_picture(s, pic);
272 if (!s->edge_emu_buffer &&
273 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
274 av_log(s->avctx, AV_LOG_ERROR,
275 "get_buffer() failed to allocate context scratch buffers.\n");
276 ff_mpeg_unref_picture(s, pic);
283 static void free_picture_tables(Picture *pic)
287 av_buffer_unref(&pic->mb_var_buf);
288 av_buffer_unref(&pic->mc_mb_var_buf);
289 av_buffer_unref(&pic->mb_mean_buf);
290 av_buffer_unref(&pic->mbskip_table_buf);
291 av_buffer_unref(&pic->qscale_table_buf);
292 av_buffer_unref(&pic->mb_type_buf);
294 for (i = 0; i < 2; i++) {
295 av_buffer_unref(&pic->motion_val_buf[i]);
296 av_buffer_unref(&pic->ref_index_buf[i]);
300 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
302 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
303 const int mb_array_size = s->mb_stride * s->mb_height;
304 const int b8_array_size = s->b8_stride * s->mb_height * 2;
308 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
309 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
310 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
312 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
313 return AVERROR(ENOMEM);
316 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
317 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
318 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
319 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
320 return AVERROR(ENOMEM);
323 if (s->out_format == FMT_H263 || s->encoding) {
324 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
325 int ref_index_size = 4 * mb_array_size;
327 for (i = 0; mv_size && i < 2; i++) {
328 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
329 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
330 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
331 return AVERROR(ENOMEM);
338 static int make_tables_writable(Picture *pic)
341 #define MAKE_WRITABLE(table) \
344 (ret = av_buffer_make_writable(&pic->table)) < 0)\
348 MAKE_WRITABLE(mb_var_buf);
349 MAKE_WRITABLE(mc_mb_var_buf);
350 MAKE_WRITABLE(mb_mean_buf);
351 MAKE_WRITABLE(mbskip_table_buf);
352 MAKE_WRITABLE(qscale_table_buf);
353 MAKE_WRITABLE(mb_type_buf);
355 for (i = 0; i < 2; i++) {
356 MAKE_WRITABLE(motion_val_buf[i]);
357 MAKE_WRITABLE(ref_index_buf[i]);
364 * Allocate a Picture.
365 * The pixels are allocated/set by calling get_buffer() if shared = 0
367 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
372 assert(pic->f.data[0]);
375 assert(!pic->f.data[0]);
377 if (alloc_frame_buffer(s, pic) < 0)
380 s->linesize = pic->f.linesize[0];
381 s->uvlinesize = pic->f.linesize[1];
384 if (!pic->qscale_table_buf)
385 ret = alloc_picture_tables(s, pic);
387 ret = make_tables_writable(pic);
392 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
393 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
394 pic->mb_mean = pic->mb_mean_buf->data;
397 pic->mbskip_table = pic->mbskip_table_buf->data;
398 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
399 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
401 if (pic->motion_val_buf[0]) {
402 for (i = 0; i < 2; i++) {
403 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
404 pic->ref_index[i] = pic->ref_index_buf[i]->data;
410 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
411 ff_mpeg_unref_picture(s, pic);
412 free_picture_tables(pic);
413 return AVERROR(ENOMEM);
417 * Deallocate a picture.
419 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
421 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
424 /* WM Image / Screen codecs allocate internal buffers with different
425 * dimensions / colorspaces; ignore user-defined callbacks for these. */
426 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
427 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
428 s->codec_id != AV_CODEC_ID_MSS2)
429 ff_thread_release_buffer(s->avctx, &pic->tf);
431 av_frame_unref(&pic->f);
433 av_buffer_unref(&pic->hwaccel_priv_buf);
435 if (pic->needs_realloc)
436 free_picture_tables(pic);
438 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
441 static int update_picture_tables(Picture *dst, Picture *src)
445 #define UPDATE_TABLE(table)\
448 (!dst->table || dst->table->buffer != src->table->buffer)) {\
449 av_buffer_unref(&dst->table);\
450 dst->table = av_buffer_ref(src->table);\
452 free_picture_tables(dst);\
453 return AVERROR(ENOMEM);\
458 UPDATE_TABLE(mb_var_buf);
459 UPDATE_TABLE(mc_mb_var_buf);
460 UPDATE_TABLE(mb_mean_buf);
461 UPDATE_TABLE(mbskip_table_buf);
462 UPDATE_TABLE(qscale_table_buf);
463 UPDATE_TABLE(mb_type_buf);
464 for (i = 0; i < 2; i++) {
465 UPDATE_TABLE(motion_val_buf[i]);
466 UPDATE_TABLE(ref_index_buf[i]);
469 dst->mb_var = src->mb_var;
470 dst->mc_mb_var = src->mc_mb_var;
471 dst->mb_mean = src->mb_mean;
472 dst->mbskip_table = src->mbskip_table;
473 dst->qscale_table = src->qscale_table;
474 dst->mb_type = src->mb_type;
475 for (i = 0; i < 2; i++) {
476 dst->motion_val[i] = src->motion_val[i];
477 dst->ref_index[i] = src->ref_index[i];
483 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
487 av_assert0(!dst->f.buf[0]);
488 av_assert0(src->f.buf[0]);
492 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
496 ret = update_picture_tables(dst, src);
500 if (src->hwaccel_picture_private) {
501 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
502 if (!dst->hwaccel_priv_buf)
504 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
507 dst->field_picture = src->field_picture;
508 dst->mb_var_sum = src->mb_var_sum;
509 dst->mc_mb_var_sum = src->mc_mb_var_sum;
510 dst->b_frame_score = src->b_frame_score;
511 dst->needs_realloc = src->needs_realloc;
512 dst->reference = src->reference;
513 dst->shared = src->shared;
517 ff_mpeg_unref_picture(s, dst);
521 static int init_duplicate_context(MpegEncContext *s)
523 int y_size = s->b8_stride * (2 * s->mb_height + 1);
524 int c_size = s->mb_stride * (s->mb_height + 1);
525 int yc_size = y_size + 2 * c_size;
533 s->obmc_scratchpad = NULL;
536 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
537 ME_MAP_SIZE * sizeof(uint32_t), fail)
538 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
539 ME_MAP_SIZE * sizeof(uint32_t), fail)
540 if (s->avctx->noise_reduction) {
541 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
542 2 * 64 * sizeof(int), fail)
545 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
546 s->block = s->blocks[0];
548 for (i = 0; i < 12; i++) {
549 s->pblocks[i] = &s->block[i];
552 if (s->out_format == FMT_H263) {
554 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
555 yc_size * sizeof(int16_t) * 16, fail);
556 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
557 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
558 s->ac_val[2] = s->ac_val[1] + c_size;
563 return -1; // free() through ff_MPV_common_end()
566 static void free_duplicate_context(MpegEncContext *s)
571 av_freep(&s->edge_emu_buffer);
572 av_freep(&s->me.scratchpad);
576 s->obmc_scratchpad = NULL;
578 av_freep(&s->dct_error_sum);
579 av_freep(&s->me.map);
580 av_freep(&s->me.score_map);
581 av_freep(&s->blocks);
582 av_freep(&s->ac_val_base);
586 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
588 #define COPY(a) bak->a = src->a
589 COPY(edge_emu_buffer);
594 COPY(obmc_scratchpad);
601 COPY(me.map_generation);
613 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
617 // FIXME copy only needed parts
619 backup_duplicate_context(&bak, dst);
620 memcpy(dst, src, sizeof(MpegEncContext));
621 backup_duplicate_context(dst, &bak);
622 for (i = 0; i < 12; i++) {
623 dst->pblocks[i] = &dst->block[i];
625 if (!dst->edge_emu_buffer &&
626 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
627 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
628 "scratch buffers.\n");
631 // STOP_TIMER("update_duplicate_context")
632 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
636 int ff_mpeg_update_thread_context(AVCodecContext *dst,
637 const AVCodecContext *src)
640 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
642 if (dst == src || !s1->context_initialized)
645 // FIXME can parameters change on I-frames?
646 // in that case dst may need a reinit
647 if (!s->context_initialized) {
648 memcpy(s, s1, sizeof(MpegEncContext));
651 s->bitstream_buffer = NULL;
652 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
654 ff_MPV_common_init(s);
657 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
659 s->context_reinit = 0;
660 s->height = s1->height;
661 s->width = s1->width;
662 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
666 s->avctx->coded_height = s1->avctx->coded_height;
667 s->avctx->coded_width = s1->avctx->coded_width;
668 s->avctx->width = s1->avctx->width;
669 s->avctx->height = s1->avctx->height;
671 s->coded_picture_number = s1->coded_picture_number;
672 s->picture_number = s1->picture_number;
673 s->input_picture_number = s1->input_picture_number;
675 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
676 ff_mpeg_unref_picture(s, &s->picture[i]);
677 if (s1->picture[i].f.data[0] &&
678 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
682 #define UPDATE_PICTURE(pic)\
684 ff_mpeg_unref_picture(s, &s->pic);\
685 if (s1->pic.f.data[0])\
686 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
688 ret = update_picture_tables(&s->pic, &s1->pic);\
693 UPDATE_PICTURE(current_picture);
694 UPDATE_PICTURE(last_picture);
695 UPDATE_PICTURE(next_picture);
697 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
698 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
699 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
701 // Error/bug resilience
702 s->next_p_frame_damaged = s1->next_p_frame_damaged;
703 s->workaround_bugs = s1->workaround_bugs;
706 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
707 (char *) &s1->shape - (char *) &s1->time_increment_bits);
710 s->max_b_frames = s1->max_b_frames;
711 s->low_delay = s1->low_delay;
712 s->droppable = s1->droppable;
714 // DivX handling (doesn't work)
715 s->divx_packed = s1->divx_packed;
717 if (s1->bitstream_buffer) {
718 if (s1->bitstream_buffer_size +
719 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
720 av_fast_malloc(&s->bitstream_buffer,
721 &s->allocated_bitstream_buffer_size,
722 s1->allocated_bitstream_buffer_size);
723 s->bitstream_buffer_size = s1->bitstream_buffer_size;
724 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
725 s1->bitstream_buffer_size);
726 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
727 FF_INPUT_BUFFER_PADDING_SIZE);
730 // linesize dependend scratch buffer allocation
731 if (!s->edge_emu_buffer)
733 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
734 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
735 "scratch buffers.\n");
736 return AVERROR(ENOMEM);
739 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
740 "be allocated due to unknown size.\n");
744 // MPEG2/interlacing info
745 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
746 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
748 if (!s1->first_field) {
749 s->last_pict_type = s1->pict_type;
750 if (s1->current_picture_ptr)
751 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
753 if (s1->pict_type != AV_PICTURE_TYPE_B) {
754 s->last_non_b_pict_type = s1->pict_type;
762 * Set the given MpegEncContext to common defaults
763 * (same for encoding and decoding).
764 * The changed fields will not depend upon the
765 * prior state of the MpegEncContext.
767 void ff_MPV_common_defaults(MpegEncContext *s)
769 s->y_dc_scale_table =
770 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
771 s->chroma_qscale_table = ff_default_chroma_qscale_table;
772 s->progressive_frame = 1;
773 s->progressive_sequence = 1;
774 s->picture_structure = PICT_FRAME;
776 s->coded_picture_number = 0;
777 s->picture_number = 0;
778 s->input_picture_number = 0;
780 s->picture_in_gop_number = 0;
785 s->slice_context_count = 1;
789 * Set the given MpegEncContext to defaults for decoding.
790 * the changed fields will not depend upon
791 * the prior state of the MpegEncContext.
793 void ff_MPV_decode_defaults(MpegEncContext *s)
795 ff_MPV_common_defaults(s);
798 static int init_er(MpegEncContext *s)
800 ERContext *er = &s->er;
801 int mb_array_size = s->mb_height * s->mb_stride;
804 er->avctx = s->avctx;
807 er->mb_index2xy = s->mb_index2xy;
808 er->mb_num = s->mb_num;
809 er->mb_width = s->mb_width;
810 er->mb_height = s->mb_height;
811 er->mb_stride = s->mb_stride;
812 er->b8_stride = s->b8_stride;
814 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
815 er->error_status_table = av_mallocz(mb_array_size);
816 if (!er->er_temp_buffer || !er->error_status_table)
819 er->mbskip_table = s->mbskip_table;
820 er->mbintra_table = s->mbintra_table;
822 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
823 er->dc_val[i] = s->dc_val[i];
825 er->decode_mb = mpeg_er_decode_mb;
830 av_freep(&er->er_temp_buffer);
831 av_freep(&er->error_status_table);
832 return AVERROR(ENOMEM);
836 * Initialize and allocates MpegEncContext fields dependent on the resolution.
838 static int init_context_frame(MpegEncContext *s)
840 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
842 s->mb_width = (s->width + 15) / 16;
843 s->mb_stride = s->mb_width + 1;
844 s->b8_stride = s->mb_width * 2 + 1;
845 s->b4_stride = s->mb_width * 4 + 1;
846 mb_array_size = s->mb_height * s->mb_stride;
847 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
849 /* set default edge pos, will be overriden
850 * in decode_header if needed */
851 s->h_edge_pos = s->mb_width * 16;
852 s->v_edge_pos = s->mb_height * 16;
854 s->mb_num = s->mb_width * s->mb_height;
859 s->block_wrap[3] = s->b8_stride;
861 s->block_wrap[5] = s->mb_stride;
863 y_size = s->b8_stride * (2 * s->mb_height + 1);
864 c_size = s->mb_stride * (s->mb_height + 1);
865 yc_size = y_size + 2 * c_size;
867 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
868 fail); // error ressilience code looks cleaner with this
869 for (y = 0; y < s->mb_height; y++)
870 for (x = 0; x < s->mb_width; x++)
871 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
873 s->mb_index2xy[s->mb_height * s->mb_width] =
874 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
877 /* Allocate MV tables */
878 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
879 mv_table_size * 2 * sizeof(int16_t), fail);
880 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
881 mv_table_size * 2 * sizeof(int16_t), fail);
882 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
883 mv_table_size * 2 * sizeof(int16_t), fail);
884 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
885 mv_table_size * 2 * sizeof(int16_t), fail);
886 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
887 mv_table_size * 2 * sizeof(int16_t), fail);
888 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
889 mv_table_size * 2 * sizeof(int16_t), fail);
890 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
891 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
892 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
893 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
895 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
897 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
899 /* Allocate MB type table */
900 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
901 sizeof(uint16_t), fail); // needed for encoding
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
906 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
907 mb_array_size * sizeof(float), fail);
908 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
909 mb_array_size * sizeof(float), fail);
913 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
914 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
915 /* interlaced direct mode decoding tables */
916 for (i = 0; i < 2; i++) {
918 for (j = 0; j < 2; j++) {
919 for (k = 0; k < 2; k++) {
920 FF_ALLOCZ_OR_GOTO(s->avctx,
921 s->b_field_mv_table_base[i][j][k],
922 mv_table_size * 2 * sizeof(int16_t),
924 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
927 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
928 mb_array_size * 2 * sizeof(uint8_t), fail);
929 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
930 mv_table_size * 2 * sizeof(int16_t), fail);
931 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
934 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
935 mb_array_size * 2 * sizeof(uint8_t), fail);
938 if (s->out_format == FMT_H263) {
940 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
941 s->coded_block = s->coded_block_base + s->b8_stride + 1;
943 /* cbp, ac_pred, pred_dir */
944 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
945 mb_array_size * sizeof(uint8_t), fail);
946 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
947 mb_array_size * sizeof(uint8_t), fail);
950 if (s->h263_pred || s->h263_plus || !s->encoding) {
952 // MN: we need these for error resilience of intra-frames
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
954 yc_size * sizeof(int16_t), fail);
955 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
956 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
957 s->dc_val[2] = s->dc_val[1] + c_size;
958 for (i = 0; i < yc_size; i++)
959 s->dc_val_base[i] = 1024;
962 /* which mb is a intra block */
963 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
964 memset(s->mbintra_table, 1, mb_array_size);
966 /* init macroblock skip table */
967 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
968 // Note the + 1 is for a quicker mpeg4 slice_end detection
972 return AVERROR(ENOMEM);
976 * init common structure for both encoder and decoder.
977 * this assumes that some variables like width/height are already set
979 av_cold int ff_MPV_common_init(MpegEncContext *s)
982 int nb_slices = (HAVE_THREADS &&
983 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
984 s->avctx->thread_count : 1;
986 if (s->encoding && s->avctx->slices)
987 nb_slices = s->avctx->slices;
989 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
990 s->mb_height = (s->height + 31) / 32 * 2;
992 s->mb_height = (s->height + 15) / 16;
994 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
995 av_log(s->avctx, AV_LOG_ERROR,
996 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1000 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1003 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1005 max_slices = MAX_THREADS;
1006 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1007 " reducing to %d\n", nb_slices, max_slices);
1008 nb_slices = max_slices;
1011 if ((s->width || s->height) &&
1012 av_image_check_size(s->width, s->height, 0, s->avctx))
1015 ff_dct_common_init(s);
1017 s->flags = s->avctx->flags;
1018 s->flags2 = s->avctx->flags2;
1020 /* set chroma shifts */
1021 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1023 &s->chroma_y_shift);
1025 /* convert fourcc to upper case */
1026 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1028 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1030 if (s->width && s->height) {
1031 s->avctx->coded_frame = &s->current_picture.f;
1034 if (s->msmpeg4_version) {
1035 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1036 2 * 2 * (MAX_LEVEL + 1) *
1037 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1039 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1041 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1042 64 * 32 * sizeof(int), fail);
1043 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1044 64 * 32 * sizeof(int), fail);
1045 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1046 64 * 32 * 2 * sizeof(uint16_t), fail);
1047 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1048 64 * 32 * 2 * sizeof(uint16_t), fail);
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1050 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1052 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1054 if (s->avctx->noise_reduction) {
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1056 2 * 64 * sizeof(uint16_t), fail);
1061 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1062 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1063 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1064 avcodec_get_frame_defaults(&s->picture[i].f);
1066 memset(&s->next_picture, 0, sizeof(s->next_picture));
1067 memset(&s->last_picture, 0, sizeof(s->last_picture));
1068 memset(&s->current_picture, 0, sizeof(s->current_picture));
1069 avcodec_get_frame_defaults(&s->next_picture.f);
1070 avcodec_get_frame_defaults(&s->last_picture.f);
1071 avcodec_get_frame_defaults(&s->current_picture.f);
1073 if (s->width && s->height) {
1074 if (init_context_frame(s))
1077 s->parse_context.state = -1;
1080 s->context_initialized = 1;
1081 s->thread_context[0] = s;
1083 if (s->width && s->height) {
1084 if (nb_slices > 1) {
1085 for (i = 1; i < nb_slices; i++) {
1086 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1087 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1090 for (i = 0; i < nb_slices; i++) {
1091 if (init_duplicate_context(s->thread_context[i]) < 0)
1093 s->thread_context[i]->start_mb_y =
1094 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1095 s->thread_context[i]->end_mb_y =
1096 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1099 if (init_duplicate_context(s) < 0)
1102 s->end_mb_y = s->mb_height;
1104 s->slice_context_count = nb_slices;
1109 ff_MPV_common_end(s);
1114 * Frees and resets MpegEncContext fields depending on the resolution.
1115 * Is used during resolution changes to avoid a full reinitialization of the
1118 static int free_context_frame(MpegEncContext *s)
1122 av_freep(&s->mb_type);
1123 av_freep(&s->p_mv_table_base);
1124 av_freep(&s->b_forw_mv_table_base);
1125 av_freep(&s->b_back_mv_table_base);
1126 av_freep(&s->b_bidir_forw_mv_table_base);
1127 av_freep(&s->b_bidir_back_mv_table_base);
1128 av_freep(&s->b_direct_mv_table_base);
1129 s->p_mv_table = NULL;
1130 s->b_forw_mv_table = NULL;
1131 s->b_back_mv_table = NULL;
1132 s->b_bidir_forw_mv_table = NULL;
1133 s->b_bidir_back_mv_table = NULL;
1134 s->b_direct_mv_table = NULL;
1135 for (i = 0; i < 2; i++) {
1136 for (j = 0; j < 2; j++) {
1137 for (k = 0; k < 2; k++) {
1138 av_freep(&s->b_field_mv_table_base[i][j][k]);
1139 s->b_field_mv_table[i][j][k] = NULL;
1141 av_freep(&s->b_field_select_table[i][j]);
1142 av_freep(&s->p_field_mv_table_base[i][j]);
1143 s->p_field_mv_table[i][j] = NULL;
1145 av_freep(&s->p_field_select_table[i]);
1148 av_freep(&s->dc_val_base);
1149 av_freep(&s->coded_block_base);
1150 av_freep(&s->mbintra_table);
1151 av_freep(&s->cbp_table);
1152 av_freep(&s->pred_dir_table);
1154 av_freep(&s->mbskip_table);
1156 av_freep(&s->er.error_status_table);
1157 av_freep(&s->er.er_temp_buffer);
1158 av_freep(&s->mb_index2xy);
1159 av_freep(&s->lambda_table);
1160 av_freep(&s->cplx_tab);
1161 av_freep(&s->bits_tab);
1163 s->linesize = s->uvlinesize = 0;
1168 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1172 if (s->slice_context_count > 1) {
1173 for (i = 0; i < s->slice_context_count; i++) {
1174 free_duplicate_context(s->thread_context[i]);
1176 for (i = 1; i < s->slice_context_count; i++) {
1177 av_freep(&s->thread_context[i]);
1180 free_duplicate_context(s);
1182 if ((err = free_context_frame(s)) < 0)
1186 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1187 s->picture[i].needs_realloc = 1;
1190 s->last_picture_ptr =
1191 s->next_picture_ptr =
1192 s->current_picture_ptr = NULL;
1195 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1196 s->mb_height = (s->height + 31) / 32 * 2;
1198 s->mb_height = (s->height + 15) / 16;
1200 if ((s->width || s->height) &&
1201 av_image_check_size(s->width, s->height, 0, s->avctx))
1202 return AVERROR_INVALIDDATA;
1204 if ((err = init_context_frame(s)))
1207 s->thread_context[0] = s;
1209 if (s->width && s->height) {
1210 int nb_slices = s->slice_context_count;
1211 if (nb_slices > 1) {
1212 for (i = 1; i < nb_slices; i++) {
1213 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1214 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1217 for (i = 0; i < nb_slices; i++) {
1218 if (init_duplicate_context(s->thread_context[i]) < 0)
1220 s->thread_context[i]->start_mb_y =
1221 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1222 s->thread_context[i]->end_mb_y =
1223 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1226 if (init_duplicate_context(s) < 0)
1229 s->end_mb_y = s->mb_height;
1231 s->slice_context_count = nb_slices;
1236 ff_MPV_common_end(s);
1240 /* init common structure for both encoder and decoder */
1241 void ff_MPV_common_end(MpegEncContext *s)
1245 if (s->slice_context_count > 1) {
1246 for (i = 0; i < s->slice_context_count; i++) {
1247 free_duplicate_context(s->thread_context[i]);
1249 for (i = 1; i < s->slice_context_count; i++) {
1250 av_freep(&s->thread_context[i]);
1252 s->slice_context_count = 1;
1253 } else free_duplicate_context(s);
1255 av_freep(&s->parse_context.buffer);
1256 s->parse_context.buffer_size = 0;
1258 av_freep(&s->bitstream_buffer);
1259 s->allocated_bitstream_buffer_size = 0;
1261 av_freep(&s->avctx->stats_out);
1262 av_freep(&s->ac_stats);
1264 av_freep(&s->q_intra_matrix);
1265 av_freep(&s->q_inter_matrix);
1266 av_freep(&s->q_intra_matrix16);
1267 av_freep(&s->q_inter_matrix16);
1268 av_freep(&s->input_picture);
1269 av_freep(&s->reordered_input_picture);
1270 av_freep(&s->dct_offset);
1273 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1274 free_picture_tables(&s->picture[i]);
1275 ff_mpeg_unref_picture(s, &s->picture[i]);
1278 av_freep(&s->picture);
1279 free_picture_tables(&s->last_picture);
1280 ff_mpeg_unref_picture(s, &s->last_picture);
1281 free_picture_tables(&s->current_picture);
1282 ff_mpeg_unref_picture(s, &s->current_picture);
1283 free_picture_tables(&s->next_picture);
1284 ff_mpeg_unref_picture(s, &s->next_picture);
1285 free_picture_tables(&s->new_picture);
1286 ff_mpeg_unref_picture(s, &s->new_picture);
1288 free_context_frame(s);
1290 s->context_initialized = 0;
1291 s->last_picture_ptr =
1292 s->next_picture_ptr =
1293 s->current_picture_ptr = NULL;
1294 s->linesize = s->uvlinesize = 0;
1297 av_cold void ff_init_rl(RLTable *rl,
1298 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1300 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1301 uint8_t index_run[MAX_RUN + 1];
1302 int last, run, level, start, end, i;
1304 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1305 if (static_store && rl->max_level[0])
1308 /* compute max_level[], max_run[] and index_run[] */
1309 for (last = 0; last < 2; last++) {
1318 memset(max_level, 0, MAX_RUN + 1);
1319 memset(max_run, 0, MAX_LEVEL + 1);
1320 memset(index_run, rl->n, MAX_RUN + 1);
1321 for (i = start; i < end; i++) {
1322 run = rl->table_run[i];
1323 level = rl->table_level[i];
1324 if (index_run[run] == rl->n)
1326 if (level > max_level[run])
1327 max_level[run] = level;
1328 if (run > max_run[level])
1329 max_run[level] = run;
1332 rl->max_level[last] = static_store[last];
1334 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1335 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1337 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1339 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1340 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1342 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1344 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1345 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1349 av_cold void ff_init_vlc_rl(RLTable *rl)
1353 for (q = 0; q < 32; q++) {
1355 int qadd = (q - 1) | 1;
1361 for (i = 0; i < rl->vlc.table_size; i++) {
1362 int code = rl->vlc.table[i][0];
1363 int len = rl->vlc.table[i][1];
1366 if (len == 0) { // illegal code
1369 } else if (len < 0) { // more bits needed
1373 if (code == rl->n) { // esc
1377 run = rl->table_run[code] + 1;
1378 level = rl->table_level[code] * qmul + qadd;
1379 if (code >= rl->last) run += 192;
1382 rl->rl_vlc[q][i].len = len;
1383 rl->rl_vlc[q][i].level = level;
1384 rl->rl_vlc[q][i].run = run;
1389 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1393 /* release non reference frames */
1394 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1395 if (!s->picture[i].reference &&
1396 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1397 ff_mpeg_unref_picture(s, &s->picture[i]);
1402 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1404 if (pic->f.data[0] == NULL)
1406 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1411 static int find_unused_picture(MpegEncContext *s, int shared)
1416 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1417 if (s->picture[i].f.data[0] == NULL)
1421 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1422 if (pic_is_unused(s, &s->picture[i]))
1427 return AVERROR_INVALIDDATA;
1430 int ff_find_unused_picture(MpegEncContext *s, int shared)
1432 int ret = find_unused_picture(s, shared);
1434 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1435 if (s->picture[ret].needs_realloc) {
1436 s->picture[ret].needs_realloc = 0;
1437 free_picture_tables(&s->picture[ret]);
1438 ff_mpeg_unref_picture(s, &s->picture[ret]);
1439 avcodec_get_frame_defaults(&s->picture[ret].f);
1445 static void update_noise_reduction(MpegEncContext *s)
1449 for (intra = 0; intra < 2; intra++) {
1450 if (s->dct_count[intra] > (1 << 16)) {
1451 for (i = 0; i < 64; i++) {
1452 s->dct_error_sum[intra][i] >>= 1;
1454 s->dct_count[intra] >>= 1;
1457 for (i = 0; i < 64; i++) {
1458 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1459 s->dct_count[intra] +
1460 s->dct_error_sum[intra][i] / 2) /
1461 (s->dct_error_sum[intra][i] + 1);
1467 * generic function for encode/decode called after coding/decoding
1468 * the header and before a frame is coded/decoded.
1470 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1476 /* mark & release old frames */
1477 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1478 s->last_picture_ptr != s->next_picture_ptr &&
1479 s->last_picture_ptr->f.data[0]) {
1480 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1483 /* release forgotten pictures */
1484 /* if (mpeg124/h263) */
1486 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1487 if (&s->picture[i] != s->last_picture_ptr &&
1488 &s->picture[i] != s->next_picture_ptr &&
1489 s->picture[i].reference && !s->picture[i].needs_realloc) {
1490 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1491 av_log(avctx, AV_LOG_ERROR,
1492 "releasing zombie picture\n");
1493 ff_mpeg_unref_picture(s, &s->picture[i]);
1498 ff_mpeg_unref_picture(s, &s->current_picture);
1501 ff_release_unused_pictures(s, 1);
1503 if (s->current_picture_ptr &&
1504 s->current_picture_ptr->f.data[0] == NULL) {
1505 // we already have a unused image
1506 // (maybe it was set before reading the header)
1507 pic = s->current_picture_ptr;
1509 i = ff_find_unused_picture(s, 0);
1511 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1514 pic = &s->picture[i];
1518 if (!s->droppable) {
1519 if (s->pict_type != AV_PICTURE_TYPE_B)
1523 pic->f.coded_picture_number = s->coded_picture_number++;
1525 if (ff_alloc_picture(s, pic, 0) < 0)
1528 s->current_picture_ptr = pic;
1529 // FIXME use only the vars from current_pic
1530 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1531 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1532 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1533 if (s->picture_structure != PICT_FRAME)
1534 s->current_picture_ptr->f.top_field_first =
1535 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1537 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1538 !s->progressive_sequence;
1539 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1542 s->current_picture_ptr->f.pict_type = s->pict_type;
1543 // if (s->flags && CODEC_FLAG_QSCALE)
1544 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1545 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1547 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1548 s->current_picture_ptr)) < 0)
1551 if (s->pict_type != AV_PICTURE_TYPE_B) {
1552 s->last_picture_ptr = s->next_picture_ptr;
1554 s->next_picture_ptr = s->current_picture_ptr;
1556 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1557 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1558 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1559 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1560 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1561 s->pict_type, s->droppable);
1563 if ((s->last_picture_ptr == NULL ||
1564 s->last_picture_ptr->f.data[0] == NULL) &&
1565 (s->pict_type != AV_PICTURE_TYPE_I ||
1566 s->picture_structure != PICT_FRAME)) {
1567 int h_chroma_shift, v_chroma_shift;
1568 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1569 &h_chroma_shift, &v_chroma_shift);
1570 if (s->pict_type != AV_PICTURE_TYPE_I)
1571 av_log(avctx, AV_LOG_ERROR,
1572 "warning: first frame is no keyframe\n");
1573 else if (s->picture_structure != PICT_FRAME)
1574 av_log(avctx, AV_LOG_INFO,
1575 "allocate dummy last picture for field based first keyframe\n");
1577 /* Allocate a dummy frame */
1578 i = ff_find_unused_picture(s, 0);
1580 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1583 s->last_picture_ptr = &s->picture[i];
1584 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1585 s->last_picture_ptr = NULL;
1589 memset(s->last_picture_ptr->f.data[0], 0,
1590 avctx->height * s->last_picture_ptr->f.linesize[0]);
1591 memset(s->last_picture_ptr->f.data[1], 0x80,
1592 (avctx->height >> v_chroma_shift) *
1593 s->last_picture_ptr->f.linesize[1]);
1594 memset(s->last_picture_ptr->f.data[2], 0x80,
1595 (avctx->height >> v_chroma_shift) *
1596 s->last_picture_ptr->f.linesize[2]);
1598 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1599 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1601 if ((s->next_picture_ptr == NULL ||
1602 s->next_picture_ptr->f.data[0] == NULL) &&
1603 s->pict_type == AV_PICTURE_TYPE_B) {
1604 /* Allocate a dummy frame */
1605 i = ff_find_unused_picture(s, 0);
1607 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1610 s->next_picture_ptr = &s->picture[i];
1611 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1612 s->next_picture_ptr = NULL;
1615 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1616 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1619 if (s->last_picture_ptr) {
1620 ff_mpeg_unref_picture(s, &s->last_picture);
1621 if (s->last_picture_ptr->f.data[0] &&
1622 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1623 s->last_picture_ptr)) < 0)
1626 if (s->next_picture_ptr) {
1627 ff_mpeg_unref_picture(s, &s->next_picture);
1628 if (s->next_picture_ptr->f.data[0] &&
1629 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1630 s->next_picture_ptr)) < 0)
1634 if (s->pict_type != AV_PICTURE_TYPE_I &&
1635 !(s->last_picture_ptr && s->last_picture_ptr->f.data[0])) {
1636 av_log(s, AV_LOG_ERROR,
1637 "Non-reference picture received and no reference available\n");
1638 return AVERROR_INVALIDDATA;
1641 if (s->picture_structure!= PICT_FRAME) {
1643 for (i = 0; i < 4; i++) {
1644 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1645 s->current_picture.f.data[i] +=
1646 s->current_picture.f.linesize[i];
1648 s->current_picture.f.linesize[i] *= 2;
1649 s->last_picture.f.linesize[i] *= 2;
1650 s->next_picture.f.linesize[i] *= 2;
1654 s->err_recognition = avctx->err_recognition;
1656 /* set dequantizer, we can't do it during init as
1657 * it might change for mpeg4 and we can't do it in the header
1658 * decode as init is not called for mpeg4 there yet */
1659 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1660 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1661 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1662 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1663 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1664 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1666 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1667 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1670 if (s->dct_error_sum) {
1671 assert(s->avctx->noise_reduction && s->encoding);
1672 update_noise_reduction(s);
1676 FF_DISABLE_DEPRECATION_WARNINGS
1677 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1678 return ff_xvmc_field_start(s, avctx);
1679 FF_ENABLE_DEPRECATION_WARNINGS
1680 #endif /* FF_API_XVMC */
1685 /* generic function for encode/decode called after a
1686 * frame has been coded/decoded. */
1687 void ff_MPV_frame_end(MpegEncContext *s)
1692 FF_DISABLE_DEPRECATION_WARNINGS
1693 /* redraw edges for the frame if decoding didn't complete */
1694 // just to make sure that all data is rendered.
1695 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1696 ff_xvmc_field_end(s);
1698 FF_ENABLE_DEPRECATION_WARNINGS
1699 #endif /* FF_API_XVMC */
1700 if ((s->er.error_count || s->encoding) &&
1701 !s->avctx->hwaccel &&
1702 s->unrestricted_mv &&
1703 s->current_picture.reference &&
1705 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1706 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1707 int hshift = desc->log2_chroma_w;
1708 int vshift = desc->log2_chroma_h;
1709 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1710 s->h_edge_pos, s->v_edge_pos,
1711 EDGE_WIDTH, EDGE_WIDTH,
1712 EDGE_TOP | EDGE_BOTTOM);
1713 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1714 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1715 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1716 EDGE_TOP | EDGE_BOTTOM);
1717 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1718 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1719 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1720 EDGE_TOP | EDGE_BOTTOM);
1725 s->last_pict_type = s->pict_type;
1726 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1727 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1728 s->last_non_b_pict_type = s->pict_type;
1731 /* copy back current_picture variables */
1732 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1733 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1734 s->picture[i] = s->current_picture;
1738 assert(i < MAX_PICTURE_COUNT);
1742 /* release non-reference frames */
1743 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1744 if (!s->picture[i].reference)
1745 ff_mpeg_unref_picture(s, &s->picture[i]);
1748 // clear copies, to avoid confusion
1750 memset(&s->last_picture, 0, sizeof(Picture));
1751 memset(&s->next_picture, 0, sizeof(Picture));
1752 memset(&s->current_picture, 0, sizeof(Picture));
1754 s->avctx->coded_frame = &s->current_picture_ptr->f;
1756 if (s->current_picture.reference)
1757 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1761 * Print debugging info for the given picture.
1763 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1766 if (s->avctx->hwaccel || !p || !p->mb_type)
1770 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1773 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1774 switch (pict->pict_type) {
1775 case AV_PICTURE_TYPE_I:
1776 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1778 case AV_PICTURE_TYPE_P:
1779 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1781 case AV_PICTURE_TYPE_B:
1782 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1784 case AV_PICTURE_TYPE_S:
1785 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1787 case AV_PICTURE_TYPE_SI:
1788 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1790 case AV_PICTURE_TYPE_SP:
1791 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1794 for (y = 0; y < s->mb_height; y++) {
1795 for (x = 0; x < s->mb_width; x++) {
1796 if (s->avctx->debug & FF_DEBUG_SKIP) {
1797 int count = s->mbskip_table[x + y * s->mb_stride];
1800 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1802 if (s->avctx->debug & FF_DEBUG_QP) {
1803 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1804 p->qscale_table[x + y * s->mb_stride]);
1806 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1807 int mb_type = p->mb_type[x + y * s->mb_stride];
1808 // Type & MV direction
1809 if (IS_PCM(mb_type))
1810 av_log(s->avctx, AV_LOG_DEBUG, "P");
1811 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1812 av_log(s->avctx, AV_LOG_DEBUG, "A");
1813 else if (IS_INTRA4x4(mb_type))
1814 av_log(s->avctx, AV_LOG_DEBUG, "i");
1815 else if (IS_INTRA16x16(mb_type))
1816 av_log(s->avctx, AV_LOG_DEBUG, "I");
1817 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1818 av_log(s->avctx, AV_LOG_DEBUG, "d");
1819 else if (IS_DIRECT(mb_type))
1820 av_log(s->avctx, AV_LOG_DEBUG, "D");
1821 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1822 av_log(s->avctx, AV_LOG_DEBUG, "g");
1823 else if (IS_GMC(mb_type))
1824 av_log(s->avctx, AV_LOG_DEBUG, "G");
1825 else if (IS_SKIP(mb_type))
1826 av_log(s->avctx, AV_LOG_DEBUG, "S");
1827 else if (!USES_LIST(mb_type, 1))
1828 av_log(s->avctx, AV_LOG_DEBUG, ">");
1829 else if (!USES_LIST(mb_type, 0))
1830 av_log(s->avctx, AV_LOG_DEBUG, "<");
1832 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1833 av_log(s->avctx, AV_LOG_DEBUG, "X");
1837 if (IS_8X8(mb_type))
1838 av_log(s->avctx, AV_LOG_DEBUG, "+");
1839 else if (IS_16X8(mb_type))
1840 av_log(s->avctx, AV_LOG_DEBUG, "-");
1841 else if (IS_8X16(mb_type))
1842 av_log(s->avctx, AV_LOG_DEBUG, "|");
1843 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1844 av_log(s->avctx, AV_LOG_DEBUG, " ");
1846 av_log(s->avctx, AV_LOG_DEBUG, "?");
1849 if (IS_INTERLACED(mb_type))
1850 av_log(s->avctx, AV_LOG_DEBUG, "=");
1852 av_log(s->avctx, AV_LOG_DEBUG, " ");
1855 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1861 * find the lowest MB row referenced in the MVs
1863 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1865 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1866 int my, off, i, mvs;
1868 if (s->picture_structure != PICT_FRAME || s->mcsel)
1871 switch (s->mv_type) {
1885 for (i = 0; i < mvs; i++) {
1886 my = s->mv[dir][i][1]<<qpel_shift;
1887 my_max = FFMAX(my_max, my);
1888 my_min = FFMIN(my_min, my);
1891 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1893 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1895 return s->mb_height-1;
1898 /* put block[] to dest[] */
1899 static inline void put_dct(MpegEncContext *s,
1900 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1902 s->dct_unquantize_intra(s, block, i, qscale);
1903 s->dsp.idct_put (dest, line_size, block);
1906 /* add block[] to dest[] */
1907 static inline void add_dct(MpegEncContext *s,
1908 int16_t *block, int i, uint8_t *dest, int line_size)
1910 if (s->block_last_index[i] >= 0) {
1911 s->dsp.idct_add (dest, line_size, block);
1915 static inline void add_dequant_dct(MpegEncContext *s,
1916 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1918 if (s->block_last_index[i] >= 0) {
1919 s->dct_unquantize_inter(s, block, i, qscale);
1921 s->dsp.idct_add (dest, line_size, block);
1926 * Clean dc, ac, coded_block for the current non-intra MB.
1928 void ff_clean_intra_table_entries(MpegEncContext *s)
1930 int wrap = s->b8_stride;
1931 int xy = s->block_index[0];
1934 s->dc_val[0][xy + 1 ] =
1935 s->dc_val[0][xy + wrap] =
1936 s->dc_val[0][xy + 1 + wrap] = 1024;
1938 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1939 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1940 if (s->msmpeg4_version>=3) {
1941 s->coded_block[xy ] =
1942 s->coded_block[xy + 1 ] =
1943 s->coded_block[xy + wrap] =
1944 s->coded_block[xy + 1 + wrap] = 0;
1947 wrap = s->mb_stride;
1948 xy = s->mb_x + s->mb_y * wrap;
1950 s->dc_val[2][xy] = 1024;
1952 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1953 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1955 s->mbintra_table[xy]= 0;
1958 /* generic function called after a macroblock has been parsed by the
1959 decoder or after it has been encoded by the encoder.
1961 Important variables used:
1962 s->mb_intra : true if intra macroblock
1963 s->mv_dir : motion vector direction
1964 s->mv_type : motion vector type
1965 s->mv : motion vector
1966 s->interlaced_dct : true if interlaced dct used (mpeg2)
1968 static av_always_inline
1969 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1972 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1975 FF_DISABLE_DEPRECATION_WARNINGS
1976 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1977 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1980 FF_ENABLE_DEPRECATION_WARNINGS
1981 #endif /* FF_API_XVMC */
1983 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1984 /* print DCT coefficients */
1986 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1988 for(j=0; j<64; j++){
1989 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
1991 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1995 s->current_picture.qscale_table[mb_xy] = s->qscale;
1997 /* update DC predictors for P macroblocks */
1999 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2000 if(s->mbintra_table[mb_xy])
2001 ff_clean_intra_table_entries(s);
2005 s->last_dc[2] = 128 << s->intra_dc_precision;
2008 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2009 s->mbintra_table[mb_xy]=1;
2011 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2012 uint8_t *dest_y, *dest_cb, *dest_cr;
2013 int dct_linesize, dct_offset;
2014 op_pixels_func (*op_pix)[4];
2015 qpel_mc_func (*op_qpix)[16];
2016 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2017 const int uvlinesize = s->current_picture.f.linesize[1];
2018 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2019 const int block_size = 8;
2021 /* avoid copy if macroblock skipped in last frame too */
2022 /* skip only during decoding as we might trash the buffers during encoding a bit */
2024 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2026 if (s->mb_skipped) {
2028 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2030 } else if(!s->current_picture.reference) {
2033 *mbskip_ptr = 0; /* not skipped */
2037 dct_linesize = linesize << s->interlaced_dct;
2038 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2042 dest_cb= s->dest[1];
2043 dest_cr= s->dest[2];
2045 dest_y = s->b_scratchpad;
2046 dest_cb= s->b_scratchpad+16*linesize;
2047 dest_cr= s->b_scratchpad+32*linesize;
2051 /* motion handling */
2052 /* decoding or more than one mb_type (MC was already done otherwise) */
2055 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2056 if (s->mv_dir & MV_DIR_FORWARD) {
2057 ff_thread_await_progress(&s->last_picture_ptr->tf,
2058 ff_MPV_lowest_referenced_row(s, 0),
2061 if (s->mv_dir & MV_DIR_BACKWARD) {
2062 ff_thread_await_progress(&s->next_picture_ptr->tf,
2063 ff_MPV_lowest_referenced_row(s, 1),
2068 op_qpix= s->me.qpel_put;
2069 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2070 op_pix = s->hdsp.put_pixels_tab;
2072 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2074 if (s->mv_dir & MV_DIR_FORWARD) {
2075 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2076 op_pix = s->hdsp.avg_pixels_tab;
2077 op_qpix= s->me.qpel_avg;
2079 if (s->mv_dir & MV_DIR_BACKWARD) {
2080 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2084 /* skip dequant / idct if we are really late ;) */
2085 if(s->avctx->skip_idct){
2086 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2087 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2088 || s->avctx->skip_idct >= AVDISCARD_ALL)
2092 /* add dct residue */
2093 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2094 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2095 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2096 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2097 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2098 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2100 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2101 if (s->chroma_y_shift){
2102 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2103 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2107 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2108 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2109 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2110 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2113 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2114 add_dct(s, block[0], 0, dest_y , dct_linesize);
2115 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2116 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2117 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2119 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2120 if(s->chroma_y_shift){//Chroma420
2121 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2122 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2125 dct_linesize = uvlinesize << s->interlaced_dct;
2126 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2128 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2129 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2130 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2131 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2132 if(!s->chroma_x_shift){//Chroma444
2133 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2134 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2135 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2136 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2141 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2142 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2145 /* dct only in intra block */
2146 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2147 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2148 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2149 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2150 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2152 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2153 if(s->chroma_y_shift){
2154 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2155 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2159 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2160 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2161 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2162 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2166 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2167 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2168 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2169 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2171 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2172 if(s->chroma_y_shift){
2173 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2174 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2177 dct_linesize = uvlinesize << s->interlaced_dct;
2178 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2180 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2181 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2182 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2183 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2184 if(!s->chroma_x_shift){//Chroma444
2185 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2186 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2187 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2188 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2196 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2197 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2198 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2203 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2205 if(s->out_format == FMT_MPEG1) {
2206 MPV_decode_mb_internal(s, block, 1);
2209 MPV_decode_mb_internal(s, block, 0);
2213 * @param h is the normal height, this will be reduced automatically if needed for the last row
2215 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2216 Picture *last, int y, int h, int picture_structure,
2217 int first_field, int draw_edges, int low_delay,
2218 int v_edge_pos, int h_edge_pos)
2220 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2221 int hshift = desc->log2_chroma_w;
2222 int vshift = desc->log2_chroma_h;
2223 const int field_pic = picture_structure != PICT_FRAME;
2229 if (!avctx->hwaccel &&
2232 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2233 int *linesize = cur->f.linesize;
2234 int sides = 0, edge_h;
2235 if (y==0) sides |= EDGE_TOP;
2236 if (y + h >= v_edge_pos)
2237 sides |= EDGE_BOTTOM;
2239 edge_h= FFMIN(h, v_edge_pos - y);
2241 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2242 linesize[0], h_edge_pos, edge_h,
2243 EDGE_WIDTH, EDGE_WIDTH, sides);
2244 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2245 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2246 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2247 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2248 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2249 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2252 h = FFMIN(h, avctx->height - y);
2254 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2256 if (avctx->draw_horiz_band) {
2258 int offset[AV_NUM_DATA_POINTERS];
2261 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2262 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2269 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2270 picture_structure == PICT_FRAME &&
2271 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2272 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2275 offset[0]= y * src->linesize[0];
2277 offset[2]= (y >> vshift) * src->linesize[1];
2278 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2284 avctx->draw_horiz_band(avctx, src, offset,
2285 y, picture_structure, h);
2289 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2291 int draw_edges = s->unrestricted_mv && !s->intra_only;
2292 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2293 &s->last_picture, y, h, s->picture_structure,
2294 s->first_field, draw_edges, s->low_delay,
2295 s->v_edge_pos, s->h_edge_pos);
2298 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2299 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2300 const int uvlinesize = s->current_picture.f.linesize[1];
2301 const int mb_size= 4;
2303 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2304 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2305 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2306 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2307 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2308 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2309 //block_index is not used by mpeg2, so it is not affected by chroma_format
2311 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2312 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2313 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2315 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2317 if(s->picture_structure==PICT_FRAME){
2318 s->dest[0] += s->mb_y * linesize << mb_size;
2319 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2320 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2322 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2323 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2324 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2325 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2331 * Permute an 8x8 block.
2332 * @param block the block which will be permuted according to the given permutation vector
2333 * @param permutation the permutation vector
2334 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2335 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2336 * (inverse) permutated to scantable order!
2338 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2344 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2346 for(i=0; i<=last; i++){
2347 const int j= scantable[i];
2352 for(i=0; i<=last; i++){
2353 const int j= scantable[i];
2354 const int perm_j= permutation[j];
2355 block[perm_j]= temp[j];
2359 void ff_mpeg_flush(AVCodecContext *avctx){
2361 MpegEncContext *s = avctx->priv_data;
2363 if(s==NULL || s->picture==NULL)
2366 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2367 ff_mpeg_unref_picture(s, &s->picture[i]);
2368 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2370 ff_mpeg_unref_picture(s, &s->current_picture);
2371 ff_mpeg_unref_picture(s, &s->last_picture);
2372 ff_mpeg_unref_picture(s, &s->next_picture);
2374 s->mb_x= s->mb_y= 0;
2376 s->parse_context.state= -1;
2377 s->parse_context.frame_start_found= 0;
2378 s->parse_context.overread= 0;
2379 s->parse_context.overread_index= 0;
2380 s->parse_context.index= 0;
2381 s->parse_context.last_index= 0;
2382 s->bitstream_buffer_size=0;
2386 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2387 int16_t *block, int n, int qscale)
2389 int i, level, nCoeffs;
2390 const uint16_t *quant_matrix;
2392 nCoeffs= s->block_last_index[n];
2395 block[0] = block[0] * s->y_dc_scale;
2397 block[0] = block[0] * s->c_dc_scale;
2398 /* XXX: only mpeg1 */
2399 quant_matrix = s->intra_matrix;
2400 for(i=1;i<=nCoeffs;i++) {
2401 int j= s->intra_scantable.permutated[i];
2406 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2407 level = (level - 1) | 1;
2410 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2411 level = (level - 1) | 1;
2418 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2419 int16_t *block, int n, int qscale)
2421 int i, level, nCoeffs;
2422 const uint16_t *quant_matrix;
2424 nCoeffs= s->block_last_index[n];
2426 quant_matrix = s->inter_matrix;
2427 for(i=0; i<=nCoeffs; i++) {
2428 int j= s->intra_scantable.permutated[i];
2433 level = (((level << 1) + 1) * qscale *
2434 ((int) (quant_matrix[j]))) >> 4;
2435 level = (level - 1) | 1;
2438 level = (((level << 1) + 1) * qscale *
2439 ((int) (quant_matrix[j]))) >> 4;
2440 level = (level - 1) | 1;
2447 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2448 int16_t *block, int n, int qscale)
2450 int i, level, nCoeffs;
2451 const uint16_t *quant_matrix;
2453 if(s->alternate_scan) nCoeffs= 63;
2454 else nCoeffs= s->block_last_index[n];
2457 block[0] = block[0] * s->y_dc_scale;
2459 block[0] = block[0] * s->c_dc_scale;
2460 quant_matrix = s->intra_matrix;
2461 for(i=1;i<=nCoeffs;i++) {
2462 int j= s->intra_scantable.permutated[i];
2467 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2470 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2477 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2478 int16_t *block, int n, int qscale)
2480 int i, level, nCoeffs;
2481 const uint16_t *quant_matrix;
2484 if(s->alternate_scan) nCoeffs= 63;
2485 else nCoeffs= s->block_last_index[n];
2488 block[0] = block[0] * s->y_dc_scale;
2490 block[0] = block[0] * s->c_dc_scale;
2491 quant_matrix = s->intra_matrix;
2492 for(i=1;i<=nCoeffs;i++) {
2493 int j= s->intra_scantable.permutated[i];
2498 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2501 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2510 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2511 int16_t *block, int n, int qscale)
2513 int i, level, nCoeffs;
2514 const uint16_t *quant_matrix;
2517 if(s->alternate_scan) nCoeffs= 63;
2518 else nCoeffs= s->block_last_index[n];
2520 quant_matrix = s->inter_matrix;
2521 for(i=0; i<=nCoeffs; i++) {
2522 int j= s->intra_scantable.permutated[i];
2527 level = (((level << 1) + 1) * qscale *
2528 ((int) (quant_matrix[j]))) >> 4;
2531 level = (((level << 1) + 1) * qscale *
2532 ((int) (quant_matrix[j]))) >> 4;
2541 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2542 int16_t *block, int n, int qscale)
2544 int i, level, qmul, qadd;
2547 assert(s->block_last_index[n]>=0);
2553 block[0] = block[0] * s->y_dc_scale;
2555 block[0] = block[0] * s->c_dc_scale;
2556 qadd = (qscale - 1) | 1;
2563 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2565 for(i=1; i<=nCoeffs; i++) {
2569 level = level * qmul - qadd;
2571 level = level * qmul + qadd;
2578 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2579 int16_t *block, int n, int qscale)
2581 int i, level, qmul, qadd;
2584 assert(s->block_last_index[n]>=0);
2586 qadd = (qscale - 1) | 1;
2589 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2591 for(i=0; i<=nCoeffs; i++) {
2595 level = level * qmul - qadd;
2597 level = level * qmul + qadd;
2605 * set qscale and update qscale dependent variables.
2607 void ff_set_qscale(MpegEncContext * s, int qscale)
2611 else if (qscale > 31)
2615 s->chroma_qscale= s->chroma_qscale_table[qscale];
2617 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2618 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2621 void ff_MPV_report_decode_progress(MpegEncContext *s)
2623 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2624 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2627 #if CONFIG_ERROR_RESILIENCE
2628 void ff_mpeg_er_frame_start(MpegEncContext *s)
2630 ERContext *er = &s->er;
2632 er->cur_pic = s->current_picture_ptr;
2633 er->last_pic = s->last_picture_ptr;
2634 er->next_pic = s->next_picture_ptr;
2636 er->pp_time = s->pp_time;
2637 er->pb_time = s->pb_time;
2638 er->quarter_sample = s->quarter_sample;
2639 er->partitioned_frame = s->partitioned_frame;
2641 ff_er_frame_start(er);
2643 #endif /* CONFIG_ERROR_RESILIENCE */