2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/avassert.h"
31 #include "libavutil/imgutils.h"
36 #include "mpegvideo.h"
39 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 int16_t *block, int n, int qscale);
62 /* enable all paranoid tests for rounding, overflows, etc... */
68 static const uint8_t ff_default_chroma_qscale_table[32] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
71 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
74 const uint8_t ff_mpeg1_dc_scale_table[128] = {
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
86 static const uint8_t mpeg2_dc_scale_table1[128] = {
87 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
98 static const uint8_t mpeg2_dc_scale_table2[128] = {
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
110 static const uint8_t mpeg2_dc_scale_table3[128] = {
111 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
122 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
123 ff_mpeg1_dc_scale_table,
124 mpeg2_dc_scale_table1,
125 mpeg2_dc_scale_table2,
126 mpeg2_dc_scale_table3,
129 const enum AVPixelFormat ff_pixfmt_list_420[] = {
134 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
136 int mb_x, int mb_y, int mb_intra, int mb_skipped)
138 MpegEncContext *s = opaque;
141 s->mv_type = mv_type;
142 s->mb_intra = mb_intra;
143 s->mb_skipped = mb_skipped;
146 memcpy(s->mv, mv, sizeof(*mv));
148 ff_init_block_index(s);
149 ff_update_block_index(s);
151 s->dsp.clear_blocks(s->block[0]);
153 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
154 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
155 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
158 ff_MPV_decode_mb(s, s->block);
161 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
163 uint32_t * restrict state)
171 for (i = 0; i < 3; i++) {
172 uint32_t tmp = *state << 8;
173 *state = tmp + *(p++);
174 if (tmp == 0x100 || p == end)
179 if (p[-1] > 1 ) p += 3;
180 else if (p[-2] ) p += 2;
181 else if (p[-3]|(p[-1]-1)) p++;
188 p = FFMIN(p, end) - 4;
194 /* init common dct for both encoder and decoder */
195 av_cold int ff_dct_common_init(MpegEncContext *s)
197 ff_dsputil_init(&s->dsp, s->avctx);
198 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
200 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
201 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
202 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
203 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
204 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
205 if (s->flags & CODEC_FLAG_BITEXACT)
206 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
207 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
210 ff_MPV_common_init_x86(s);
212 ff_MPV_common_init_axp(s);
214 ff_MPV_common_init_arm(s);
216 ff_MPV_common_init_altivec(s);
218 ff_MPV_common_init_bfin(s);
221 /* load & permutate scantables
222 * note: only wmv uses different ones
224 if (s->alternate_scan) {
225 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
226 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
228 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
229 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
231 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
232 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
237 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
239 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
241 // edge emu needs blocksize + filter length - 1
242 // (= 17x17 for halfpel / 21x21 for h264)
243 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
244 // at uvlinesize. It supports only YUV420 so 24x24 is enough
245 // linesize * interlaced * MBsize
246 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
249 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
251 s->me.temp = s->me.scratchpad;
252 s->rd_scratchpad = s->me.scratchpad;
253 s->b_scratchpad = s->me.scratchpad;
254 s->obmc_scratchpad = s->me.scratchpad + 16;
258 av_freep(&s->edge_emu_buffer);
259 return AVERROR(ENOMEM);
263 * Allocate a frame buffer
265 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
270 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
271 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
272 s->codec_id != AV_CODEC_ID_MSS2)
273 r = ff_thread_get_buffer(s->avctx, &pic->tf,
274 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
276 pic->f.width = s->avctx->width;
277 pic->f.height = s->avctx->height;
278 pic->f.format = s->avctx->pix_fmt;
279 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
282 if (r < 0 || !pic->f.data[0]) {
283 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
288 if (s->avctx->hwaccel) {
289 assert(!pic->hwaccel_picture_private);
290 if (s->avctx->hwaccel->priv_data_size) {
291 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
292 if (!pic->hwaccel_priv_buf) {
293 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
296 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
300 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
301 s->uvlinesize != pic->f.linesize[1])) {
302 av_log(s->avctx, AV_LOG_ERROR,
303 "get_buffer() failed (stride changed)\n");
304 ff_mpeg_unref_picture(s, pic);
308 if (pic->f.linesize[1] != pic->f.linesize[2]) {
309 av_log(s->avctx, AV_LOG_ERROR,
310 "get_buffer() failed (uv stride mismatch)\n");
311 ff_mpeg_unref_picture(s, pic);
315 if (!s->edge_emu_buffer &&
316 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
317 av_log(s->avctx, AV_LOG_ERROR,
318 "get_buffer() failed to allocate context scratch buffers.\n");
319 ff_mpeg_unref_picture(s, pic);
326 static void free_picture_tables(Picture *pic)
330 av_buffer_unref(&pic->mb_var_buf);
331 av_buffer_unref(&pic->mc_mb_var_buf);
332 av_buffer_unref(&pic->mb_mean_buf);
333 av_buffer_unref(&pic->mbskip_table_buf);
334 av_buffer_unref(&pic->qscale_table_buf);
335 av_buffer_unref(&pic->mb_type_buf);
337 for (i = 0; i < 2; i++) {
338 av_buffer_unref(&pic->motion_val_buf[i]);
339 av_buffer_unref(&pic->ref_index_buf[i]);
343 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
345 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
346 const int mb_array_size = s->mb_stride * s->mb_height;
347 const int b8_array_size = s->b8_stride * s->mb_height * 2;
351 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
352 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
353 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
355 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
356 return AVERROR(ENOMEM);
359 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
360 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
361 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
362 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
363 return AVERROR(ENOMEM);
366 if (s->out_format == FMT_H263 || s->encoding ||
367 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
368 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
369 int ref_index_size = 4 * mb_array_size;
371 for (i = 0; mv_size && i < 2; i++) {
372 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
373 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
374 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
375 return AVERROR(ENOMEM);
382 static int make_tables_writable(Picture *pic)
385 #define MAKE_WRITABLE(table) \
388 (ret = av_buffer_make_writable(&pic->table)) < 0)\
392 MAKE_WRITABLE(mb_var_buf);
393 MAKE_WRITABLE(mc_mb_var_buf);
394 MAKE_WRITABLE(mb_mean_buf);
395 MAKE_WRITABLE(mbskip_table_buf);
396 MAKE_WRITABLE(qscale_table_buf);
397 MAKE_WRITABLE(mb_type_buf);
399 for (i = 0; i < 2; i++) {
400 MAKE_WRITABLE(motion_val_buf[i]);
401 MAKE_WRITABLE(ref_index_buf[i]);
408 * Allocate a Picture.
409 * The pixels are allocated/set by calling get_buffer() if shared = 0
411 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
416 assert(pic->f.data[0]);
419 assert(!pic->f.data[0]);
421 if (alloc_frame_buffer(s, pic) < 0)
424 s->linesize = pic->f.linesize[0];
425 s->uvlinesize = pic->f.linesize[1];
428 if (!pic->qscale_table_buf)
429 ret = alloc_picture_tables(s, pic);
431 ret = make_tables_writable(pic);
436 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
437 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
438 pic->mb_mean = pic->mb_mean_buf->data;
441 pic->mbskip_table = pic->mbskip_table_buf->data;
442 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
443 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
445 if (pic->motion_val_buf[0]) {
446 for (i = 0; i < 2; i++) {
447 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
448 pic->ref_index[i] = pic->ref_index_buf[i]->data;
454 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
455 ff_mpeg_unref_picture(s, pic);
456 free_picture_tables(pic);
457 return AVERROR(ENOMEM);
461 * Deallocate a picture.
463 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
465 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
468 /* WM Image / Screen codecs allocate internal buffers with different
469 * dimensions / colorspaces; ignore user-defined callbacks for these. */
470 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
471 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
472 s->codec_id != AV_CODEC_ID_MSS2)
473 ff_thread_release_buffer(s->avctx, &pic->tf);
475 av_frame_unref(&pic->f);
477 av_buffer_unref(&pic->hwaccel_priv_buf);
479 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
482 static int update_picture_tables(Picture *dst, Picture *src)
486 #define UPDATE_TABLE(table)\
489 (!dst->table || dst->table->buffer != src->table->buffer)) {\
490 av_buffer_unref(&dst->table);\
491 dst->table = av_buffer_ref(src->table);\
493 free_picture_tables(dst);\
494 return AVERROR(ENOMEM);\
499 UPDATE_TABLE(mb_var_buf);
500 UPDATE_TABLE(mc_mb_var_buf);
501 UPDATE_TABLE(mb_mean_buf);
502 UPDATE_TABLE(mbskip_table_buf);
503 UPDATE_TABLE(qscale_table_buf);
504 UPDATE_TABLE(mb_type_buf);
505 for (i = 0; i < 2; i++) {
506 UPDATE_TABLE(motion_val_buf[i]);
507 UPDATE_TABLE(ref_index_buf[i]);
510 dst->mb_var = src->mb_var;
511 dst->mc_mb_var = src->mc_mb_var;
512 dst->mb_mean = src->mb_mean;
513 dst->mbskip_table = src->mbskip_table;
514 dst->qscale_table = src->qscale_table;
515 dst->mb_type = src->mb_type;
516 for (i = 0; i < 2; i++) {
517 dst->motion_val[i] = src->motion_val[i];
518 dst->ref_index[i] = src->ref_index[i];
524 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
528 av_assert0(!dst->f.buf[0]);
529 av_assert0(src->f.buf[0]);
533 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
537 ret = update_picture_tables(dst, src);
541 if (src->hwaccel_picture_private) {
542 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
543 if (!dst->hwaccel_priv_buf)
545 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
548 dst->field_picture = src->field_picture;
549 dst->mb_var_sum = src->mb_var_sum;
550 dst->mc_mb_var_sum = src->mc_mb_var_sum;
551 dst->b_frame_score = src->b_frame_score;
552 dst->needs_realloc = src->needs_realloc;
553 dst->reference = src->reference;
554 dst->shared = src->shared;
558 ff_mpeg_unref_picture(s, dst);
562 static int init_duplicate_context(MpegEncContext *s)
564 int y_size = s->b8_stride * (2 * s->mb_height + 1);
565 int c_size = s->mb_stride * (s->mb_height + 1);
566 int yc_size = y_size + 2 * c_size;
574 s->obmc_scratchpad = NULL;
577 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
578 ME_MAP_SIZE * sizeof(uint32_t), fail)
579 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
580 ME_MAP_SIZE * sizeof(uint32_t), fail)
581 if (s->avctx->noise_reduction) {
582 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
583 2 * 64 * sizeof(int), fail)
586 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
587 s->block = s->blocks[0];
589 for (i = 0; i < 12; i++) {
590 s->pblocks[i] = &s->block[i];
593 if (s->out_format == FMT_H263) {
595 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
596 yc_size * sizeof(int16_t) * 16, fail);
597 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
598 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
599 s->ac_val[2] = s->ac_val[1] + c_size;
604 return -1; // free() through ff_MPV_common_end()
607 static void free_duplicate_context(MpegEncContext *s)
612 av_freep(&s->edge_emu_buffer);
613 av_freep(&s->me.scratchpad);
617 s->obmc_scratchpad = NULL;
619 av_freep(&s->dct_error_sum);
620 av_freep(&s->me.map);
621 av_freep(&s->me.score_map);
622 av_freep(&s->blocks);
623 av_freep(&s->ac_val_base);
627 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
629 #define COPY(a) bak->a = src->a
630 COPY(edge_emu_buffer);
635 COPY(obmc_scratchpad);
642 COPY(me.map_generation);
654 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
658 // FIXME copy only needed parts
660 backup_duplicate_context(&bak, dst);
661 memcpy(dst, src, sizeof(MpegEncContext));
662 backup_duplicate_context(dst, &bak);
663 for (i = 0; i < 12; i++) {
664 dst->pblocks[i] = &dst->block[i];
666 if (!dst->edge_emu_buffer &&
667 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
668 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
669 "scratch buffers.\n");
672 // STOP_TIMER("update_duplicate_context")
673 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
677 int ff_mpeg_update_thread_context(AVCodecContext *dst,
678 const AVCodecContext *src)
681 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
683 if (dst == src || !s1->context_initialized)
686 // FIXME can parameters change on I-frames?
687 // in that case dst may need a reinit
688 if (!s->context_initialized) {
689 memcpy(s, s1, sizeof(MpegEncContext));
692 s->bitstream_buffer = NULL;
693 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
695 ff_MPV_common_init(s);
698 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
700 s->context_reinit = 0;
701 s->height = s1->height;
702 s->width = s1->width;
703 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
707 s->avctx->coded_height = s1->avctx->coded_height;
708 s->avctx->coded_width = s1->avctx->coded_width;
709 s->avctx->width = s1->avctx->width;
710 s->avctx->height = s1->avctx->height;
712 s->coded_picture_number = s1->coded_picture_number;
713 s->picture_number = s1->picture_number;
714 s->input_picture_number = s1->input_picture_number;
716 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
717 ff_mpeg_unref_picture(s, &s->picture[i]);
718 if (s1->picture[i].f.data[0] &&
719 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
723 #define UPDATE_PICTURE(pic)\
725 ff_mpeg_unref_picture(s, &s->pic);\
726 if (s1->pic.f.data[0])\
727 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
729 ret = update_picture_tables(&s->pic, &s1->pic);\
734 UPDATE_PICTURE(current_picture);
735 UPDATE_PICTURE(last_picture);
736 UPDATE_PICTURE(next_picture);
738 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
739 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
740 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
742 // Error/bug resilience
743 s->next_p_frame_damaged = s1->next_p_frame_damaged;
744 s->workaround_bugs = s1->workaround_bugs;
747 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
748 (char *) &s1->shape - (char *) &s1->time_increment_bits);
751 s->max_b_frames = s1->max_b_frames;
752 s->low_delay = s1->low_delay;
753 s->droppable = s1->droppable;
755 // DivX handling (doesn't work)
756 s->divx_packed = s1->divx_packed;
758 if (s1->bitstream_buffer) {
759 if (s1->bitstream_buffer_size +
760 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
761 av_fast_malloc(&s->bitstream_buffer,
762 &s->allocated_bitstream_buffer_size,
763 s1->allocated_bitstream_buffer_size);
764 s->bitstream_buffer_size = s1->bitstream_buffer_size;
765 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
766 s1->bitstream_buffer_size);
767 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
768 FF_INPUT_BUFFER_PADDING_SIZE);
771 // linesize dependend scratch buffer allocation
772 if (!s->edge_emu_buffer)
774 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
775 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
776 "scratch buffers.\n");
777 return AVERROR(ENOMEM);
780 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
781 "be allocated due to unknown size.\n");
785 // MPEG2/interlacing info
786 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
787 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
789 if (!s1->first_field) {
790 s->last_pict_type = s1->pict_type;
791 if (s1->current_picture_ptr)
792 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
794 if (s1->pict_type != AV_PICTURE_TYPE_B) {
795 s->last_non_b_pict_type = s1->pict_type;
803 * Set the given MpegEncContext to common defaults
804 * (same for encoding and decoding).
805 * The changed fields will not depend upon the
806 * prior state of the MpegEncContext.
808 void ff_MPV_common_defaults(MpegEncContext *s)
810 s->y_dc_scale_table =
811 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
812 s->chroma_qscale_table = ff_default_chroma_qscale_table;
813 s->progressive_frame = 1;
814 s->progressive_sequence = 1;
815 s->picture_structure = PICT_FRAME;
817 s->coded_picture_number = 0;
818 s->picture_number = 0;
819 s->input_picture_number = 0;
821 s->picture_in_gop_number = 0;
826 s->slice_context_count = 1;
830 * Set the given MpegEncContext to defaults for decoding.
831 * the changed fields will not depend upon
832 * the prior state of the MpegEncContext.
834 void ff_MPV_decode_defaults(MpegEncContext *s)
836 ff_MPV_common_defaults(s);
839 static int init_er(MpegEncContext *s)
841 ERContext *er = &s->er;
842 int mb_array_size = s->mb_height * s->mb_stride;
845 er->avctx = s->avctx;
848 er->mb_index2xy = s->mb_index2xy;
849 er->mb_num = s->mb_num;
850 er->mb_width = s->mb_width;
851 er->mb_height = s->mb_height;
852 er->mb_stride = s->mb_stride;
853 er->b8_stride = s->b8_stride;
855 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
856 er->error_status_table = av_mallocz(mb_array_size);
857 if (!er->er_temp_buffer || !er->error_status_table)
860 er->mbskip_table = s->mbskip_table;
861 er->mbintra_table = s->mbintra_table;
863 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
864 er->dc_val[i] = s->dc_val[i];
866 er->decode_mb = mpeg_er_decode_mb;
871 av_freep(&er->er_temp_buffer);
872 av_freep(&er->error_status_table);
873 return AVERROR(ENOMEM);
877 * Initialize and allocates MpegEncContext fields dependent on the resolution.
879 static int init_context_frame(MpegEncContext *s)
881 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
883 s->mb_width = (s->width + 15) / 16;
884 s->mb_stride = s->mb_width + 1;
885 s->b8_stride = s->mb_width * 2 + 1;
886 s->b4_stride = s->mb_width * 4 + 1;
887 mb_array_size = s->mb_height * s->mb_stride;
888 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
890 /* set default edge pos, will be overriden
891 * in decode_header if needed */
892 s->h_edge_pos = s->mb_width * 16;
893 s->v_edge_pos = s->mb_height * 16;
895 s->mb_num = s->mb_width * s->mb_height;
900 s->block_wrap[3] = s->b8_stride;
902 s->block_wrap[5] = s->mb_stride;
904 y_size = s->b8_stride * (2 * s->mb_height + 1);
905 c_size = s->mb_stride * (s->mb_height + 1);
906 yc_size = y_size + 2 * c_size;
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
909 fail); // error ressilience code looks cleaner with this
910 for (y = 0; y < s->mb_height; y++)
911 for (x = 0; x < s->mb_width; x++)
912 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
914 s->mb_index2xy[s->mb_height * s->mb_width] =
915 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
918 /* Allocate MV tables */
919 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
920 mv_table_size * 2 * sizeof(int16_t), fail);
921 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
922 mv_table_size * 2 * sizeof(int16_t), fail);
923 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
924 mv_table_size * 2 * sizeof(int16_t), fail);
925 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
926 mv_table_size * 2 * sizeof(int16_t), fail);
927 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
928 mv_table_size * 2 * sizeof(int16_t), fail);
929 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
930 mv_table_size * 2 * sizeof(int16_t), fail);
931 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
932 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
933 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
934 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
936 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
938 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
940 /* Allocate MB type table */
941 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
942 sizeof(uint16_t), fail); // needed for encoding
944 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
947 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
948 mb_array_size * sizeof(float), fail);
949 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
950 mb_array_size * sizeof(float), fail);
954 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
955 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
956 /* interlaced direct mode decoding tables */
957 for (i = 0; i < 2; i++) {
959 for (j = 0; j < 2; j++) {
960 for (k = 0; k < 2; k++) {
961 FF_ALLOCZ_OR_GOTO(s->avctx,
962 s->b_field_mv_table_base[i][j][k],
963 mv_table_size * 2 * sizeof(int16_t),
965 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
968 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
969 mb_array_size * 2 * sizeof(uint8_t), fail);
970 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
971 mv_table_size * 2 * sizeof(int16_t), fail);
972 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
975 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
976 mb_array_size * 2 * sizeof(uint8_t), fail);
979 if (s->out_format == FMT_H263) {
981 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
982 s->coded_block = s->coded_block_base + s->b8_stride + 1;
984 /* cbp, ac_pred, pred_dir */
985 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
986 mb_array_size * sizeof(uint8_t), fail);
987 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
988 mb_array_size * sizeof(uint8_t), fail);
991 if (s->h263_pred || s->h263_plus || !s->encoding) {
993 // MN: we need these for error resilience of intra-frames
994 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
995 yc_size * sizeof(int16_t), fail);
996 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
997 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
998 s->dc_val[2] = s->dc_val[1] + c_size;
999 for (i = 0; i < yc_size; i++)
1000 s->dc_val_base[i] = 1024;
1003 /* which mb is a intra block */
1004 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1005 memset(s->mbintra_table, 1, mb_array_size);
1007 /* init macroblock skip table */
1008 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1009 // Note the + 1 is for a quicker mpeg4 slice_end detection
1013 return AVERROR(ENOMEM);
1017 * init common structure for both encoder and decoder.
1018 * this assumes that some variables like width/height are already set
1020 av_cold int ff_MPV_common_init(MpegEncContext *s)
1023 int nb_slices = (HAVE_THREADS &&
1024 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1025 s->avctx->thread_count : 1;
1027 if (s->encoding && s->avctx->slices)
1028 nb_slices = s->avctx->slices;
1030 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1031 s->mb_height = (s->height + 31) / 32 * 2;
1033 s->mb_height = (s->height + 15) / 16;
1035 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1036 av_log(s->avctx, AV_LOG_ERROR,
1037 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1041 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1044 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1046 max_slices = MAX_THREADS;
1047 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1048 " reducing to %d\n", nb_slices, max_slices);
1049 nb_slices = max_slices;
1052 if ((s->width || s->height) &&
1053 av_image_check_size(s->width, s->height, 0, s->avctx))
1056 ff_dct_common_init(s);
1058 s->flags = s->avctx->flags;
1059 s->flags2 = s->avctx->flags2;
1061 if (s->width && s->height) {
1062 /* set chroma shifts */
1063 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1065 &s->chroma_y_shift);
1067 /* convert fourcc to upper case */
1068 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1070 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1072 s->avctx->coded_frame = &s->current_picture.f;
1075 if (s->msmpeg4_version) {
1076 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1077 2 * 2 * (MAX_LEVEL + 1) *
1078 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1080 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1082 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1083 64 * 32 * sizeof(int), fail);
1084 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1085 64 * 32 * sizeof(int), fail);
1086 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1087 64 * 32 * 2 * sizeof(uint16_t), fail);
1088 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1089 64 * 32 * 2 * sizeof(uint16_t), fail);
1090 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1091 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1092 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1093 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1095 if (s->avctx->noise_reduction) {
1096 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1097 2 * 64 * sizeof(uint16_t), fail);
1102 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1103 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1104 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1105 avcodec_get_frame_defaults(&s->picture[i].f);
1107 memset(&s->next_picture, 0, sizeof(s->next_picture));
1108 memset(&s->last_picture, 0, sizeof(s->last_picture));
1109 memset(&s->current_picture, 0, sizeof(s->current_picture));
1110 avcodec_get_frame_defaults(&s->next_picture.f);
1111 avcodec_get_frame_defaults(&s->last_picture.f);
1112 avcodec_get_frame_defaults(&s->current_picture.f);
1114 if (s->width && s->height) {
1115 if (init_context_frame(s))
1118 s->parse_context.state = -1;
1121 s->context_initialized = 1;
1122 s->thread_context[0] = s;
1124 if (s->width && s->height) {
1125 if (nb_slices > 1) {
1126 for (i = 1; i < nb_slices; i++) {
1127 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1128 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1131 for (i = 0; i < nb_slices; i++) {
1132 if (init_duplicate_context(s->thread_context[i]) < 0)
1134 s->thread_context[i]->start_mb_y =
1135 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1136 s->thread_context[i]->end_mb_y =
1137 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1140 if (init_duplicate_context(s) < 0)
1143 s->end_mb_y = s->mb_height;
1145 s->slice_context_count = nb_slices;
1150 ff_MPV_common_end(s);
1155 * Frees and resets MpegEncContext fields depending on the resolution.
1156 * Is used during resolution changes to avoid a full reinitialization of the
1159 static int free_context_frame(MpegEncContext *s)
1163 av_freep(&s->mb_type);
1164 av_freep(&s->p_mv_table_base);
1165 av_freep(&s->b_forw_mv_table_base);
1166 av_freep(&s->b_back_mv_table_base);
1167 av_freep(&s->b_bidir_forw_mv_table_base);
1168 av_freep(&s->b_bidir_back_mv_table_base);
1169 av_freep(&s->b_direct_mv_table_base);
1170 s->p_mv_table = NULL;
1171 s->b_forw_mv_table = NULL;
1172 s->b_back_mv_table = NULL;
1173 s->b_bidir_forw_mv_table = NULL;
1174 s->b_bidir_back_mv_table = NULL;
1175 s->b_direct_mv_table = NULL;
1176 for (i = 0; i < 2; i++) {
1177 for (j = 0; j < 2; j++) {
1178 for (k = 0; k < 2; k++) {
1179 av_freep(&s->b_field_mv_table_base[i][j][k]);
1180 s->b_field_mv_table[i][j][k] = NULL;
1182 av_freep(&s->b_field_select_table[i][j]);
1183 av_freep(&s->p_field_mv_table_base[i][j]);
1184 s->p_field_mv_table[i][j] = NULL;
1186 av_freep(&s->p_field_select_table[i]);
1189 av_freep(&s->dc_val_base);
1190 av_freep(&s->coded_block_base);
1191 av_freep(&s->mbintra_table);
1192 av_freep(&s->cbp_table);
1193 av_freep(&s->pred_dir_table);
1195 av_freep(&s->mbskip_table);
1197 av_freep(&s->er.error_status_table);
1198 av_freep(&s->er.er_temp_buffer);
1199 av_freep(&s->mb_index2xy);
1200 av_freep(&s->lambda_table);
1201 av_freep(&s->cplx_tab);
1202 av_freep(&s->bits_tab);
1204 s->linesize = s->uvlinesize = 0;
1209 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1213 if (s->slice_context_count > 1) {
1214 for (i = 0; i < s->slice_context_count; i++) {
1215 free_duplicate_context(s->thread_context[i]);
1217 for (i = 1; i < s->slice_context_count; i++) {
1218 av_freep(&s->thread_context[i]);
1221 free_duplicate_context(s);
1223 if ((err = free_context_frame(s)) < 0)
1227 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1228 s->picture[i].needs_realloc = 1;
1231 s->last_picture_ptr =
1232 s->next_picture_ptr =
1233 s->current_picture_ptr = NULL;
1236 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1237 s->mb_height = (s->height + 31) / 32 * 2;
1239 s->mb_height = (s->height + 15) / 16;
1241 if ((s->width || s->height) &&
1242 av_image_check_size(s->width, s->height, 0, s->avctx))
1243 return AVERROR_INVALIDDATA;
1245 if ((err = init_context_frame(s)))
1248 s->thread_context[0] = s;
1250 if (s->width && s->height) {
1251 int nb_slices = s->slice_context_count;
1252 if (nb_slices > 1) {
1253 for (i = 1; i < nb_slices; i++) {
1254 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1255 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1258 for (i = 0; i < nb_slices; i++) {
1259 if (init_duplicate_context(s->thread_context[i]) < 0)
1261 s->thread_context[i]->start_mb_y =
1262 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1263 s->thread_context[i]->end_mb_y =
1264 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1267 if (init_duplicate_context(s) < 0)
1270 s->end_mb_y = s->mb_height;
1272 s->slice_context_count = nb_slices;
1277 ff_MPV_common_end(s);
1281 /* init common structure for both encoder and decoder */
1282 void ff_MPV_common_end(MpegEncContext *s)
1286 if (s->slice_context_count > 1) {
1287 for (i = 0; i < s->slice_context_count; i++) {
1288 free_duplicate_context(s->thread_context[i]);
1290 for (i = 1; i < s->slice_context_count; i++) {
1291 av_freep(&s->thread_context[i]);
1293 s->slice_context_count = 1;
1294 } else free_duplicate_context(s);
1296 av_freep(&s->parse_context.buffer);
1297 s->parse_context.buffer_size = 0;
1299 av_freep(&s->bitstream_buffer);
1300 s->allocated_bitstream_buffer_size = 0;
1302 av_freep(&s->avctx->stats_out);
1303 av_freep(&s->ac_stats);
1305 av_freep(&s->q_intra_matrix);
1306 av_freep(&s->q_inter_matrix);
1307 av_freep(&s->q_intra_matrix16);
1308 av_freep(&s->q_inter_matrix16);
1309 av_freep(&s->input_picture);
1310 av_freep(&s->reordered_input_picture);
1311 av_freep(&s->dct_offset);
1314 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1315 free_picture_tables(&s->picture[i]);
1316 ff_mpeg_unref_picture(s, &s->picture[i]);
1319 av_freep(&s->picture);
1320 free_picture_tables(&s->last_picture);
1321 ff_mpeg_unref_picture(s, &s->last_picture);
1322 free_picture_tables(&s->current_picture);
1323 ff_mpeg_unref_picture(s, &s->current_picture);
1324 free_picture_tables(&s->next_picture);
1325 ff_mpeg_unref_picture(s, &s->next_picture);
1326 free_picture_tables(&s->new_picture);
1327 ff_mpeg_unref_picture(s, &s->new_picture);
1329 free_context_frame(s);
1331 s->context_initialized = 0;
1332 s->last_picture_ptr =
1333 s->next_picture_ptr =
1334 s->current_picture_ptr = NULL;
1335 s->linesize = s->uvlinesize = 0;
1338 void ff_init_rl(RLTable *rl,
1339 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1341 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1342 uint8_t index_run[MAX_RUN + 1];
1343 int last, run, level, start, end, i;
1345 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1346 if (static_store && rl->max_level[0])
1349 /* compute max_level[], max_run[] and index_run[] */
1350 for (last = 0; last < 2; last++) {
1359 memset(max_level, 0, MAX_RUN + 1);
1360 memset(max_run, 0, MAX_LEVEL + 1);
1361 memset(index_run, rl->n, MAX_RUN + 1);
1362 for (i = start; i < end; i++) {
1363 run = rl->table_run[i];
1364 level = rl->table_level[i];
1365 if (index_run[run] == rl->n)
1367 if (level > max_level[run])
1368 max_level[run] = level;
1369 if (run > max_run[level])
1370 max_run[level] = run;
1373 rl->max_level[last] = static_store[last];
1375 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1376 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1378 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1380 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1381 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1383 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1385 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1386 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1390 void ff_init_vlc_rl(RLTable *rl)
1394 for (q = 0; q < 32; q++) {
1396 int qadd = (q - 1) | 1;
1402 for (i = 0; i < rl->vlc.table_size; i++) {
1403 int code = rl->vlc.table[i][0];
1404 int len = rl->vlc.table[i][1];
1407 if (len == 0) { // illegal code
1410 } else if (len < 0) { // more bits needed
1414 if (code == rl->n) { // esc
1418 run = rl->table_run[code] + 1;
1419 level = rl->table_level[code] * qmul + qadd;
1420 if (code >= rl->last) run += 192;
1423 rl->rl_vlc[q][i].len = len;
1424 rl->rl_vlc[q][i].level = level;
1425 rl->rl_vlc[q][i].run = run;
1430 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1434 /* release non reference frames */
1435 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1436 if (!s->picture[i].reference &&
1437 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1438 ff_mpeg_unref_picture(s, &s->picture[i]);
1443 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1445 if (pic->f.data[0] == NULL)
1447 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1452 static int find_unused_picture(MpegEncContext *s, int shared)
1457 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1458 if (s->picture[i].f.data[0] == NULL)
1462 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1463 if (pic_is_unused(s, &s->picture[i]))
1468 return AVERROR_INVALIDDATA;
1471 int ff_find_unused_picture(MpegEncContext *s, int shared)
1473 int ret = find_unused_picture(s, shared);
1475 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1476 if (s->picture[ret].needs_realloc) {
1477 s->picture[ret].needs_realloc = 0;
1478 free_picture_tables(&s->picture[ret]);
1479 ff_mpeg_unref_picture(s, &s->picture[ret]);
1480 avcodec_get_frame_defaults(&s->picture[ret].f);
1486 static void update_noise_reduction(MpegEncContext *s)
1490 for (intra = 0; intra < 2; intra++) {
1491 if (s->dct_count[intra] > (1 << 16)) {
1492 for (i = 0; i < 64; i++) {
1493 s->dct_error_sum[intra][i] >>= 1;
1495 s->dct_count[intra] >>= 1;
1498 for (i = 0; i < 64; i++) {
1499 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1500 s->dct_count[intra] +
1501 s->dct_error_sum[intra][i] / 2) /
1502 (s->dct_error_sum[intra][i] + 1);
1508 * generic function for encode/decode called after coding/decoding
1509 * the header and before a frame is coded/decoded.
1511 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1517 /* mark & release old frames */
1518 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1519 s->last_picture_ptr != s->next_picture_ptr &&
1520 s->last_picture_ptr->f.data[0]) {
1521 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1524 /* release forgotten pictures */
1525 /* if (mpeg124/h263) */
1527 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1528 if (&s->picture[i] != s->last_picture_ptr &&
1529 &s->picture[i] != s->next_picture_ptr &&
1530 s->picture[i].reference && !s->picture[i].needs_realloc) {
1531 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1532 av_log(avctx, AV_LOG_ERROR,
1533 "releasing zombie picture\n");
1534 ff_mpeg_unref_picture(s, &s->picture[i]);
1540 ff_release_unused_pictures(s, 1);
1542 if (s->current_picture_ptr &&
1543 s->current_picture_ptr->f.data[0] == NULL) {
1544 // we already have a unused image
1545 // (maybe it was set before reading the header)
1546 pic = s->current_picture_ptr;
1548 i = ff_find_unused_picture(s, 0);
1550 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1553 pic = &s->picture[i];
1557 if (!s->droppable) {
1558 if (s->pict_type != AV_PICTURE_TYPE_B)
1562 pic->f.coded_picture_number = s->coded_picture_number++;
1564 if (ff_alloc_picture(s, pic, 0) < 0)
1567 s->current_picture_ptr = pic;
1568 // FIXME use only the vars from current_pic
1569 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1570 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1571 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1572 if (s->picture_structure != PICT_FRAME)
1573 s->current_picture_ptr->f.top_field_first =
1574 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1576 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1577 !s->progressive_sequence;
1578 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1581 s->current_picture_ptr->f.pict_type = s->pict_type;
1582 // if (s->flags && CODEC_FLAG_QSCALE)
1583 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1584 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1586 ff_mpeg_unref_picture(s, &s->current_picture);
1587 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1588 s->current_picture_ptr)) < 0)
1591 if (s->pict_type != AV_PICTURE_TYPE_B) {
1592 s->last_picture_ptr = s->next_picture_ptr;
1594 s->next_picture_ptr = s->current_picture_ptr;
1596 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1597 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1598 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1599 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1600 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1601 s->pict_type, s->droppable);
1603 if ((s->last_picture_ptr == NULL ||
1604 s->last_picture_ptr->f.data[0] == NULL) &&
1605 (s->pict_type != AV_PICTURE_TYPE_I ||
1606 s->picture_structure != PICT_FRAME)) {
1607 int h_chroma_shift, v_chroma_shift;
1608 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1609 &h_chroma_shift, &v_chroma_shift);
1610 if (s->pict_type != AV_PICTURE_TYPE_I)
1611 av_log(avctx, AV_LOG_ERROR,
1612 "warning: first frame is no keyframe\n");
1613 else if (s->picture_structure != PICT_FRAME)
1614 av_log(avctx, AV_LOG_INFO,
1615 "allocate dummy last picture for field based first keyframe\n");
1617 /* Allocate a dummy frame */
1618 i = ff_find_unused_picture(s, 0);
1620 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1623 s->last_picture_ptr = &s->picture[i];
1624 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1625 s->last_picture_ptr = NULL;
1629 memset(s->last_picture_ptr->f.data[0], 0,
1630 avctx->height * s->last_picture_ptr->f.linesize[0]);
1631 memset(s->last_picture_ptr->f.data[1], 0x80,
1632 (avctx->height >> v_chroma_shift) *
1633 s->last_picture_ptr->f.linesize[1]);
1634 memset(s->last_picture_ptr->f.data[2], 0x80,
1635 (avctx->height >> v_chroma_shift) *
1636 s->last_picture_ptr->f.linesize[2]);
1638 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1639 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1641 if ((s->next_picture_ptr == NULL ||
1642 s->next_picture_ptr->f.data[0] == NULL) &&
1643 s->pict_type == AV_PICTURE_TYPE_B) {
1644 /* Allocate a dummy frame */
1645 i = ff_find_unused_picture(s, 0);
1647 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1650 s->next_picture_ptr = &s->picture[i];
1651 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1652 s->next_picture_ptr = NULL;
1655 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1656 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1659 if (s->last_picture_ptr) {
1660 ff_mpeg_unref_picture(s, &s->last_picture);
1661 if (s->last_picture_ptr->f.data[0] &&
1662 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1663 s->last_picture_ptr)) < 0)
1666 if (s->next_picture_ptr) {
1667 ff_mpeg_unref_picture(s, &s->next_picture);
1668 if (s->next_picture_ptr->f.data[0] &&
1669 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1670 s->next_picture_ptr)) < 0)
1674 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1675 s->last_picture_ptr->f.data[0]));
1677 if (s->picture_structure!= PICT_FRAME) {
1679 for (i = 0; i < 4; i++) {
1680 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1681 s->current_picture.f.data[i] +=
1682 s->current_picture.f.linesize[i];
1684 s->current_picture.f.linesize[i] *= 2;
1685 s->last_picture.f.linesize[i] *= 2;
1686 s->next_picture.f.linesize[i] *= 2;
1690 s->err_recognition = avctx->err_recognition;
1692 /* set dequantizer, we can't do it during init as
1693 * it might change for mpeg4 and we can't do it in the header
1694 * decode as init is not called for mpeg4 there yet */
1695 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1696 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1697 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1698 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1699 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1700 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1702 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1703 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1706 if (s->dct_error_sum) {
1707 assert(s->avctx->noise_reduction && s->encoding);
1708 update_noise_reduction(s);
1711 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1712 return ff_xvmc_field_start(s, avctx);
1717 /* generic function for encode/decode called after a
1718 * frame has been coded/decoded. */
1719 void ff_MPV_frame_end(MpegEncContext *s)
1722 /* redraw edges for the frame if decoding didn't complete */
1723 // just to make sure that all data is rendered.
1724 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1725 ff_xvmc_field_end(s);
1726 } else if ((s->er.error_count || s->encoding) &&
1727 !s->avctx->hwaccel &&
1728 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1729 s->unrestricted_mv &&
1730 s->current_picture.reference &&
1732 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1733 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1734 int hshift = desc->log2_chroma_w;
1735 int vshift = desc->log2_chroma_h;
1736 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1737 s->h_edge_pos, s->v_edge_pos,
1738 EDGE_WIDTH, EDGE_WIDTH,
1739 EDGE_TOP | EDGE_BOTTOM);
1740 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1741 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1742 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1743 EDGE_TOP | EDGE_BOTTOM);
1744 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1745 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1746 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1747 EDGE_TOP | EDGE_BOTTOM);
1752 s->last_pict_type = s->pict_type;
1753 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1754 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1755 s->last_non_b_pict_type = s->pict_type;
1758 /* copy back current_picture variables */
1759 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1760 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1761 s->picture[i] = s->current_picture;
1765 assert(i < MAX_PICTURE_COUNT);
1769 /* release non-reference frames */
1770 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1771 if (!s->picture[i].reference)
1772 ff_mpeg_unref_picture(s, &s->picture[i]);
1775 // clear copies, to avoid confusion
1777 memset(&s->last_picture, 0, sizeof(Picture));
1778 memset(&s->next_picture, 0, sizeof(Picture));
1779 memset(&s->current_picture, 0, sizeof(Picture));
1781 s->avctx->coded_frame = &s->current_picture_ptr->f;
1783 if (s->current_picture.reference)
1784 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1788 * Print debugging info for the given picture.
1790 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1793 if (s->avctx->hwaccel || !p || !p->mb_type)
1797 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1800 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1801 switch (pict->pict_type) {
1802 case AV_PICTURE_TYPE_I:
1803 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1805 case AV_PICTURE_TYPE_P:
1806 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1808 case AV_PICTURE_TYPE_B:
1809 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1811 case AV_PICTURE_TYPE_S:
1812 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1814 case AV_PICTURE_TYPE_SI:
1815 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1817 case AV_PICTURE_TYPE_SP:
1818 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1821 for (y = 0; y < s->mb_height; y++) {
1822 for (x = 0; x < s->mb_width; x++) {
1823 if (s->avctx->debug & FF_DEBUG_SKIP) {
1824 int count = s->mbskip_table[x + y * s->mb_stride];
1827 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1829 if (s->avctx->debug & FF_DEBUG_QP) {
1830 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1831 p->qscale_table[x + y * s->mb_stride]);
1833 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1834 int mb_type = p->mb_type[x + y * s->mb_stride];
1835 // Type & MV direction
1836 if (IS_PCM(mb_type))
1837 av_log(s->avctx, AV_LOG_DEBUG, "P");
1838 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1839 av_log(s->avctx, AV_LOG_DEBUG, "A");
1840 else if (IS_INTRA4x4(mb_type))
1841 av_log(s->avctx, AV_LOG_DEBUG, "i");
1842 else if (IS_INTRA16x16(mb_type))
1843 av_log(s->avctx, AV_LOG_DEBUG, "I");
1844 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1845 av_log(s->avctx, AV_LOG_DEBUG, "d");
1846 else if (IS_DIRECT(mb_type))
1847 av_log(s->avctx, AV_LOG_DEBUG, "D");
1848 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1849 av_log(s->avctx, AV_LOG_DEBUG, "g");
1850 else if (IS_GMC(mb_type))
1851 av_log(s->avctx, AV_LOG_DEBUG, "G");
1852 else if (IS_SKIP(mb_type))
1853 av_log(s->avctx, AV_LOG_DEBUG, "S");
1854 else if (!USES_LIST(mb_type, 1))
1855 av_log(s->avctx, AV_LOG_DEBUG, ">");
1856 else if (!USES_LIST(mb_type, 0))
1857 av_log(s->avctx, AV_LOG_DEBUG, "<");
1859 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1860 av_log(s->avctx, AV_LOG_DEBUG, "X");
1864 if (IS_8X8(mb_type))
1865 av_log(s->avctx, AV_LOG_DEBUG, "+");
1866 else if (IS_16X8(mb_type))
1867 av_log(s->avctx, AV_LOG_DEBUG, "-");
1868 else if (IS_8X16(mb_type))
1869 av_log(s->avctx, AV_LOG_DEBUG, "|");
1870 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1871 av_log(s->avctx, AV_LOG_DEBUG, " ");
1873 av_log(s->avctx, AV_LOG_DEBUG, "?");
1876 if (IS_INTERLACED(mb_type))
1877 av_log(s->avctx, AV_LOG_DEBUG, "=");
1879 av_log(s->avctx, AV_LOG_DEBUG, " ");
1882 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1888 * find the lowest MB row referenced in the MVs
1890 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1892 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1893 int my, off, i, mvs;
1895 if (s->picture_structure != PICT_FRAME || s->mcsel)
1898 switch (s->mv_type) {
1912 for (i = 0; i < mvs; i++) {
1913 my = s->mv[dir][i][1]<<qpel_shift;
1914 my_max = FFMAX(my_max, my);
1915 my_min = FFMIN(my_min, my);
1918 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1920 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1922 return s->mb_height-1;
1925 /* put block[] to dest[] */
1926 static inline void put_dct(MpegEncContext *s,
1927 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1929 s->dct_unquantize_intra(s, block, i, qscale);
1930 s->dsp.idct_put (dest, line_size, block);
1933 /* add block[] to dest[] */
1934 static inline void add_dct(MpegEncContext *s,
1935 int16_t *block, int i, uint8_t *dest, int line_size)
1937 if (s->block_last_index[i] >= 0) {
1938 s->dsp.idct_add (dest, line_size, block);
1942 static inline void add_dequant_dct(MpegEncContext *s,
1943 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1945 if (s->block_last_index[i] >= 0) {
1946 s->dct_unquantize_inter(s, block, i, qscale);
1948 s->dsp.idct_add (dest, line_size, block);
1953 * Clean dc, ac, coded_block for the current non-intra MB.
1955 void ff_clean_intra_table_entries(MpegEncContext *s)
1957 int wrap = s->b8_stride;
1958 int xy = s->block_index[0];
1961 s->dc_val[0][xy + 1 ] =
1962 s->dc_val[0][xy + wrap] =
1963 s->dc_val[0][xy + 1 + wrap] = 1024;
1965 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1966 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1967 if (s->msmpeg4_version>=3) {
1968 s->coded_block[xy ] =
1969 s->coded_block[xy + 1 ] =
1970 s->coded_block[xy + wrap] =
1971 s->coded_block[xy + 1 + wrap] = 0;
1974 wrap = s->mb_stride;
1975 xy = s->mb_x + s->mb_y * wrap;
1977 s->dc_val[2][xy] = 1024;
1979 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1980 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1982 s->mbintra_table[xy]= 0;
1985 /* generic function called after a macroblock has been parsed by the
1986 decoder or after it has been encoded by the encoder.
1988 Important variables used:
1989 s->mb_intra : true if intra macroblock
1990 s->mv_dir : motion vector direction
1991 s->mv_type : motion vector type
1992 s->mv : motion vector
1993 s->interlaced_dct : true if interlaced dct used (mpeg2)
1995 static av_always_inline
1996 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1999 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2000 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2001 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2005 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2006 /* print DCT coefficients */
2008 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2010 for(j=0; j<64; j++){
2011 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2013 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2017 s->current_picture.qscale_table[mb_xy] = s->qscale;
2019 /* update DC predictors for P macroblocks */
2021 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2022 if(s->mbintra_table[mb_xy])
2023 ff_clean_intra_table_entries(s);
2027 s->last_dc[2] = 128 << s->intra_dc_precision;
2030 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2031 s->mbintra_table[mb_xy]=1;
2033 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2034 uint8_t *dest_y, *dest_cb, *dest_cr;
2035 int dct_linesize, dct_offset;
2036 op_pixels_func (*op_pix)[4];
2037 qpel_mc_func (*op_qpix)[16];
2038 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2039 const int uvlinesize = s->current_picture.f.linesize[1];
2040 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2041 const int block_size = 8;
2043 /* avoid copy if macroblock skipped in last frame too */
2044 /* skip only during decoding as we might trash the buffers during encoding a bit */
2046 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2048 if (s->mb_skipped) {
2050 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2052 } else if(!s->current_picture.reference) {
2055 *mbskip_ptr = 0; /* not skipped */
2059 dct_linesize = linesize << s->interlaced_dct;
2060 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2064 dest_cb= s->dest[1];
2065 dest_cr= s->dest[2];
2067 dest_y = s->b_scratchpad;
2068 dest_cb= s->b_scratchpad+16*linesize;
2069 dest_cr= s->b_scratchpad+32*linesize;
2073 /* motion handling */
2074 /* decoding or more than one mb_type (MC was already done otherwise) */
2077 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2078 if (s->mv_dir & MV_DIR_FORWARD) {
2079 ff_thread_await_progress(&s->last_picture_ptr->tf,
2080 ff_MPV_lowest_referenced_row(s, 0),
2083 if (s->mv_dir & MV_DIR_BACKWARD) {
2084 ff_thread_await_progress(&s->next_picture_ptr->tf,
2085 ff_MPV_lowest_referenced_row(s, 1),
2090 op_qpix= s->me.qpel_put;
2091 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2092 op_pix = s->dsp.put_pixels_tab;
2094 op_pix = s->dsp.put_no_rnd_pixels_tab;
2096 if (s->mv_dir & MV_DIR_FORWARD) {
2097 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2098 op_pix = s->dsp.avg_pixels_tab;
2099 op_qpix= s->me.qpel_avg;
2101 if (s->mv_dir & MV_DIR_BACKWARD) {
2102 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2106 /* skip dequant / idct if we are really late ;) */
2107 if(s->avctx->skip_idct){
2108 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2109 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2110 || s->avctx->skip_idct >= AVDISCARD_ALL)
2114 /* add dct residue */
2115 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2116 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2117 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2118 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2119 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2120 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2122 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2123 if (s->chroma_y_shift){
2124 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2125 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2129 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2130 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2131 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2132 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2135 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2136 add_dct(s, block[0], 0, dest_y , dct_linesize);
2137 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2138 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2139 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2141 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2142 if(s->chroma_y_shift){//Chroma420
2143 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2144 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2147 dct_linesize = uvlinesize << s->interlaced_dct;
2148 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2150 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2151 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2152 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2153 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2154 if(!s->chroma_x_shift){//Chroma444
2155 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2156 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2157 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2158 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2163 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2164 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2167 /* dct only in intra block */
2168 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2169 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2170 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2171 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2172 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2174 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2175 if(s->chroma_y_shift){
2176 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2177 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2181 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2182 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2183 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2184 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2188 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2189 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2190 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2191 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2193 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2194 if(s->chroma_y_shift){
2195 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2196 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2199 dct_linesize = uvlinesize << s->interlaced_dct;
2200 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2202 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2203 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2204 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2205 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2206 if(!s->chroma_x_shift){//Chroma444
2207 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2208 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2209 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2210 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2218 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2219 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2220 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2225 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2227 if(s->out_format == FMT_MPEG1) {
2228 MPV_decode_mb_internal(s, block, 1);
2231 MPV_decode_mb_internal(s, block, 0);
2235 * @param h is the normal height, this will be reduced automatically if needed for the last row
2237 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2238 Picture *last, int y, int h, int picture_structure,
2239 int first_field, int draw_edges, int low_delay,
2240 int v_edge_pos, int h_edge_pos)
2242 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2243 int hshift = desc->log2_chroma_w;
2244 int vshift = desc->log2_chroma_h;
2245 const int field_pic = picture_structure != PICT_FRAME;
2251 if (!avctx->hwaccel &&
2252 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2255 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2256 int *linesize = cur->f.linesize;
2257 int sides = 0, edge_h;
2258 if (y==0) sides |= EDGE_TOP;
2259 if (y + h >= v_edge_pos)
2260 sides |= EDGE_BOTTOM;
2262 edge_h= FFMIN(h, v_edge_pos - y);
2264 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2265 linesize[0], h_edge_pos, edge_h,
2266 EDGE_WIDTH, EDGE_WIDTH, sides);
2267 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2268 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2269 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2270 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2271 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2272 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2275 h = FFMIN(h, avctx->height - y);
2277 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2279 if (avctx->draw_horiz_band) {
2281 int offset[AV_NUM_DATA_POINTERS];
2284 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2285 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2292 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2293 picture_structure == PICT_FRAME &&
2294 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2295 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2298 offset[0]= y * src->linesize[0];
2300 offset[2]= (y >> vshift) * src->linesize[1];
2301 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2307 avctx->draw_horiz_band(avctx, src, offset,
2308 y, picture_structure, h);
2312 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2314 int draw_edges = s->unrestricted_mv && !s->intra_only;
2315 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2316 &s->last_picture, y, h, s->picture_structure,
2317 s->first_field, draw_edges, s->low_delay,
2318 s->v_edge_pos, s->h_edge_pos);
2321 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2322 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2323 const int uvlinesize = s->current_picture.f.linesize[1];
2324 const int mb_size= 4;
2326 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2327 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2328 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2329 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2330 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2331 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2332 //block_index is not used by mpeg2, so it is not affected by chroma_format
2334 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2335 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2336 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2338 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2340 if(s->picture_structure==PICT_FRAME){
2341 s->dest[0] += s->mb_y * linesize << mb_size;
2342 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2343 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2345 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2346 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2347 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2348 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2354 * Permute an 8x8 block.
2355 * @param block the block which will be permuted according to the given permutation vector
2356 * @param permutation the permutation vector
2357 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2358 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2359 * (inverse) permutated to scantable order!
2361 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2367 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2369 for(i=0; i<=last; i++){
2370 const int j= scantable[i];
2375 for(i=0; i<=last; i++){
2376 const int j= scantable[i];
2377 const int perm_j= permutation[j];
2378 block[perm_j]= temp[j];
2382 void ff_mpeg_flush(AVCodecContext *avctx){
2384 MpegEncContext *s = avctx->priv_data;
2386 if(s==NULL || s->picture==NULL)
2389 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2390 ff_mpeg_unref_picture(s, &s->picture[i]);
2391 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2393 s->mb_x= s->mb_y= 0;
2395 s->parse_context.state= -1;
2396 s->parse_context.frame_start_found= 0;
2397 s->parse_context.overread= 0;
2398 s->parse_context.overread_index= 0;
2399 s->parse_context.index= 0;
2400 s->parse_context.last_index= 0;
2401 s->bitstream_buffer_size=0;
2405 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2406 int16_t *block, int n, int qscale)
2408 int i, level, nCoeffs;
2409 const uint16_t *quant_matrix;
2411 nCoeffs= s->block_last_index[n];
2414 block[0] = block[0] * s->y_dc_scale;
2416 block[0] = block[0] * s->c_dc_scale;
2417 /* XXX: only mpeg1 */
2418 quant_matrix = s->intra_matrix;
2419 for(i=1;i<=nCoeffs;i++) {
2420 int j= s->intra_scantable.permutated[i];
2425 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2426 level = (level - 1) | 1;
2429 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2430 level = (level - 1) | 1;
2437 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2438 int16_t *block, int n, int qscale)
2440 int i, level, nCoeffs;
2441 const uint16_t *quant_matrix;
2443 nCoeffs= s->block_last_index[n];
2445 quant_matrix = s->inter_matrix;
2446 for(i=0; i<=nCoeffs; i++) {
2447 int j= s->intra_scantable.permutated[i];
2452 level = (((level << 1) + 1) * qscale *
2453 ((int) (quant_matrix[j]))) >> 4;
2454 level = (level - 1) | 1;
2457 level = (((level << 1) + 1) * qscale *
2458 ((int) (quant_matrix[j]))) >> 4;
2459 level = (level - 1) | 1;
2466 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2467 int16_t *block, int n, int qscale)
2469 int i, level, nCoeffs;
2470 const uint16_t *quant_matrix;
2472 if(s->alternate_scan) nCoeffs= 63;
2473 else nCoeffs= s->block_last_index[n];
2476 block[0] = block[0] * s->y_dc_scale;
2478 block[0] = block[0] * s->c_dc_scale;
2479 quant_matrix = s->intra_matrix;
2480 for(i=1;i<=nCoeffs;i++) {
2481 int j= s->intra_scantable.permutated[i];
2486 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2489 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2496 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2497 int16_t *block, int n, int qscale)
2499 int i, level, nCoeffs;
2500 const uint16_t *quant_matrix;
2503 if(s->alternate_scan) nCoeffs= 63;
2504 else nCoeffs= s->block_last_index[n];
2507 block[0] = block[0] * s->y_dc_scale;
2509 block[0] = block[0] * s->c_dc_scale;
2510 quant_matrix = s->intra_matrix;
2511 for(i=1;i<=nCoeffs;i++) {
2512 int j= s->intra_scantable.permutated[i];
2517 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2520 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2529 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2530 int16_t *block, int n, int qscale)
2532 int i, level, nCoeffs;
2533 const uint16_t *quant_matrix;
2536 if(s->alternate_scan) nCoeffs= 63;
2537 else nCoeffs= s->block_last_index[n];
2539 quant_matrix = s->inter_matrix;
2540 for(i=0; i<=nCoeffs; i++) {
2541 int j= s->intra_scantable.permutated[i];
2546 level = (((level << 1) + 1) * qscale *
2547 ((int) (quant_matrix[j]))) >> 4;
2550 level = (((level << 1) + 1) * qscale *
2551 ((int) (quant_matrix[j]))) >> 4;
2560 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2561 int16_t *block, int n, int qscale)
2563 int i, level, qmul, qadd;
2566 assert(s->block_last_index[n]>=0);
2572 block[0] = block[0] * s->y_dc_scale;
2574 block[0] = block[0] * s->c_dc_scale;
2575 qadd = (qscale - 1) | 1;
2582 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2584 for(i=1; i<=nCoeffs; i++) {
2588 level = level * qmul - qadd;
2590 level = level * qmul + qadd;
2597 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2598 int16_t *block, int n, int qscale)
2600 int i, level, qmul, qadd;
2603 assert(s->block_last_index[n]>=0);
2605 qadd = (qscale - 1) | 1;
2608 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2610 for(i=0; i<=nCoeffs; i++) {
2614 level = level * qmul - qadd;
2616 level = level * qmul + qadd;
2624 * set qscale and update qscale dependent variables.
2626 void ff_set_qscale(MpegEncContext * s, int qscale)
2630 else if (qscale > 31)
2634 s->chroma_qscale= s->chroma_qscale_table[qscale];
2636 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2637 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2640 void ff_MPV_report_decode_progress(MpegEncContext *s)
2642 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2643 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2646 #if CONFIG_ERROR_RESILIENCE
2647 void ff_mpeg_er_frame_start(MpegEncContext *s)
2649 ERContext *er = &s->er;
2651 er->cur_pic = s->current_picture_ptr;
2652 er->last_pic = s->last_picture_ptr;
2653 er->next_pic = s->next_picture_ptr;
2655 er->pp_time = s->pp_time;
2656 er->pb_time = s->pb_time;
2657 er->quarter_sample = s->quarter_sample;
2658 er->partitioned_frame = s->partitioned_frame;
2660 ff_er_frame_start(er);
2662 #endif /* CONFIG_ERROR_RESILIENCE */