2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
37 #include "mpegvideo.h"
40 #include "xvmc_internal.h"
44 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
45 int16_t *block, int n, int qscale);
46 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
59 static const uint8_t ff_default_chroma_qscale_table[32] = {
60 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
61 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
62 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
65 const uint8_t ff_mpeg1_dc_scale_table[128] = {
66 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
67 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 static const uint8_t mpeg2_dc_scale_table1[128] = {
78 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
79 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 static const uint8_t mpeg2_dc_scale_table2[128] = {
90 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
91 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 static const uint8_t mpeg2_dc_scale_table3[128] = {
102 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
114 ff_mpeg1_dc_scale_table,
115 mpeg2_dc_scale_table1,
116 mpeg2_dc_scale_table2,
117 mpeg2_dc_scale_table3,
120 const enum AVPixelFormat ff_pixfmt_list_420[] = {
125 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
127 int mb_x, int mb_y, int mb_intra, int mb_skipped)
129 MpegEncContext *s = opaque;
132 s->mv_type = mv_type;
133 s->mb_intra = mb_intra;
134 s->mb_skipped = mb_skipped;
137 memcpy(s->mv, mv, sizeof(*mv));
139 ff_init_block_index(s);
140 ff_update_block_index(s);
142 s->dsp.clear_blocks(s->block[0]);
144 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
145 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
146 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
149 ff_MPV_decode_mb(s, s->block);
152 /* init common dct for both encoder and decoder */
153 av_cold int ff_dct_common_init(MpegEncContext *s)
155 ff_dsputil_init(&s->dsp, s->avctx);
156 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
157 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
159 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
160 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
161 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
162 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
163 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
164 if (s->flags & CODEC_FLAG_BITEXACT)
165 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
166 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
169 ff_MPV_common_init_x86(s);
171 ff_MPV_common_init_axp(s);
173 ff_MPV_common_init_arm(s);
175 ff_MPV_common_init_bfin(s);
177 ff_MPV_common_init_ppc(s);
180 /* load & permutate scantables
181 * note: only wmv uses different ones
183 if (s->alternate_scan) {
184 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
185 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
196 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
198 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
200 // edge emu needs blocksize + filter length - 1
201 // (= 17x17 for halfpel / 21x21 for h264)
202 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
203 // at uvlinesize. It supports only YUV420 so 24x24 is enough
204 // linesize * interlaced * MBsize
205 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
208 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
210 s->me.temp = s->me.scratchpad;
211 s->rd_scratchpad = s->me.scratchpad;
212 s->b_scratchpad = s->me.scratchpad;
213 s->obmc_scratchpad = s->me.scratchpad + 16;
217 av_freep(&s->edge_emu_buffer);
218 return AVERROR(ENOMEM);
222 * Allocate a frame buffer
224 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
229 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
230 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
231 s->codec_id != AV_CODEC_ID_MSS2)
232 r = ff_thread_get_buffer(s->avctx, &pic->tf,
233 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
235 pic->f.width = s->avctx->width;
236 pic->f.height = s->avctx->height;
237 pic->f.format = s->avctx->pix_fmt;
238 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
241 if (r < 0 || !pic->f.data[0]) {
242 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
247 if (s->avctx->hwaccel) {
248 assert(!pic->hwaccel_picture_private);
249 if (s->avctx->hwaccel->priv_data_size) {
250 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
251 if (!pic->hwaccel_priv_buf) {
252 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
255 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
259 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
260 s->uvlinesize != pic->f.linesize[1])) {
261 av_log(s->avctx, AV_LOG_ERROR,
262 "get_buffer() failed (stride changed)\n");
263 ff_mpeg_unref_picture(s, pic);
267 if (pic->f.linesize[1] != pic->f.linesize[2]) {
268 av_log(s->avctx, AV_LOG_ERROR,
269 "get_buffer() failed (uv stride mismatch)\n");
270 ff_mpeg_unref_picture(s, pic);
274 if (!s->edge_emu_buffer &&
275 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
276 av_log(s->avctx, AV_LOG_ERROR,
277 "get_buffer() failed to allocate context scratch buffers.\n");
278 ff_mpeg_unref_picture(s, pic);
285 static void free_picture_tables(Picture *pic)
289 av_buffer_unref(&pic->mb_var_buf);
290 av_buffer_unref(&pic->mc_mb_var_buf);
291 av_buffer_unref(&pic->mb_mean_buf);
292 av_buffer_unref(&pic->mbskip_table_buf);
293 av_buffer_unref(&pic->qscale_table_buf);
294 av_buffer_unref(&pic->mb_type_buf);
296 for (i = 0; i < 2; i++) {
297 av_buffer_unref(&pic->motion_val_buf[i]);
298 av_buffer_unref(&pic->ref_index_buf[i]);
302 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
304 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
305 const int mb_array_size = s->mb_stride * s->mb_height;
306 const int b8_array_size = s->b8_stride * s->mb_height * 2;
310 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
311 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
312 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
314 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
315 return AVERROR(ENOMEM);
318 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
319 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
320 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
321 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
322 return AVERROR(ENOMEM);
325 if (s->out_format == FMT_H263 || s->encoding ||
326 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
327 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
328 int ref_index_size = 4 * mb_array_size;
330 for (i = 0; mv_size && i < 2; i++) {
331 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
332 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
333 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
334 return AVERROR(ENOMEM);
341 static int make_tables_writable(Picture *pic)
344 #define MAKE_WRITABLE(table) \
347 (ret = av_buffer_make_writable(&pic->table)) < 0)\
351 MAKE_WRITABLE(mb_var_buf);
352 MAKE_WRITABLE(mc_mb_var_buf);
353 MAKE_WRITABLE(mb_mean_buf);
354 MAKE_WRITABLE(mbskip_table_buf);
355 MAKE_WRITABLE(qscale_table_buf);
356 MAKE_WRITABLE(mb_type_buf);
358 for (i = 0; i < 2; i++) {
359 MAKE_WRITABLE(motion_val_buf[i]);
360 MAKE_WRITABLE(ref_index_buf[i]);
367 * Allocate a Picture.
368 * The pixels are allocated/set by calling get_buffer() if shared = 0
370 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
375 assert(pic->f.data[0]);
378 assert(!pic->f.data[0]);
380 if (alloc_frame_buffer(s, pic) < 0)
383 s->linesize = pic->f.linesize[0];
384 s->uvlinesize = pic->f.linesize[1];
387 if (!pic->qscale_table_buf)
388 ret = alloc_picture_tables(s, pic);
390 ret = make_tables_writable(pic);
395 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
396 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
397 pic->mb_mean = pic->mb_mean_buf->data;
400 pic->mbskip_table = pic->mbskip_table_buf->data;
401 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
402 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
404 if (pic->motion_val_buf[0]) {
405 for (i = 0; i < 2; i++) {
406 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
407 pic->ref_index[i] = pic->ref_index_buf[i]->data;
413 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
414 ff_mpeg_unref_picture(s, pic);
415 free_picture_tables(pic);
416 return AVERROR(ENOMEM);
420 * Deallocate a picture.
422 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
424 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
427 /* WM Image / Screen codecs allocate internal buffers with different
428 * dimensions / colorspaces; ignore user-defined callbacks for these. */
429 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
430 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
431 s->codec_id != AV_CODEC_ID_MSS2)
432 ff_thread_release_buffer(s->avctx, &pic->tf);
434 av_frame_unref(&pic->f);
436 av_buffer_unref(&pic->hwaccel_priv_buf);
438 if (pic->needs_realloc)
439 free_picture_tables(pic);
441 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
444 static int update_picture_tables(Picture *dst, Picture *src)
448 #define UPDATE_TABLE(table)\
451 (!dst->table || dst->table->buffer != src->table->buffer)) {\
452 av_buffer_unref(&dst->table);\
453 dst->table = av_buffer_ref(src->table);\
455 free_picture_tables(dst);\
456 return AVERROR(ENOMEM);\
461 UPDATE_TABLE(mb_var_buf);
462 UPDATE_TABLE(mc_mb_var_buf);
463 UPDATE_TABLE(mb_mean_buf);
464 UPDATE_TABLE(mbskip_table_buf);
465 UPDATE_TABLE(qscale_table_buf);
466 UPDATE_TABLE(mb_type_buf);
467 for (i = 0; i < 2; i++) {
468 UPDATE_TABLE(motion_val_buf[i]);
469 UPDATE_TABLE(ref_index_buf[i]);
472 dst->mb_var = src->mb_var;
473 dst->mc_mb_var = src->mc_mb_var;
474 dst->mb_mean = src->mb_mean;
475 dst->mbskip_table = src->mbskip_table;
476 dst->qscale_table = src->qscale_table;
477 dst->mb_type = src->mb_type;
478 for (i = 0; i < 2; i++) {
479 dst->motion_val[i] = src->motion_val[i];
480 dst->ref_index[i] = src->ref_index[i];
486 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
490 av_assert0(!dst->f.buf[0]);
491 av_assert0(src->f.buf[0]);
495 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
499 ret = update_picture_tables(dst, src);
503 if (src->hwaccel_picture_private) {
504 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
505 if (!dst->hwaccel_priv_buf)
507 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
510 dst->field_picture = src->field_picture;
511 dst->mb_var_sum = src->mb_var_sum;
512 dst->mc_mb_var_sum = src->mc_mb_var_sum;
513 dst->b_frame_score = src->b_frame_score;
514 dst->needs_realloc = src->needs_realloc;
515 dst->reference = src->reference;
516 dst->shared = src->shared;
520 ff_mpeg_unref_picture(s, dst);
524 static int init_duplicate_context(MpegEncContext *s)
526 int y_size = s->b8_stride * (2 * s->mb_height + 1);
527 int c_size = s->mb_stride * (s->mb_height + 1);
528 int yc_size = y_size + 2 * c_size;
536 s->obmc_scratchpad = NULL;
539 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
540 ME_MAP_SIZE * sizeof(uint32_t), fail)
541 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
542 ME_MAP_SIZE * sizeof(uint32_t), fail)
543 if (s->avctx->noise_reduction) {
544 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
545 2 * 64 * sizeof(int), fail)
548 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
549 s->block = s->blocks[0];
551 for (i = 0; i < 12; i++) {
552 s->pblocks[i] = &s->block[i];
555 if (s->out_format == FMT_H263) {
557 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
558 yc_size * sizeof(int16_t) * 16, fail);
559 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
560 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
561 s->ac_val[2] = s->ac_val[1] + c_size;
566 return -1; // free() through ff_MPV_common_end()
569 static void free_duplicate_context(MpegEncContext *s)
574 av_freep(&s->edge_emu_buffer);
575 av_freep(&s->me.scratchpad);
579 s->obmc_scratchpad = NULL;
581 av_freep(&s->dct_error_sum);
582 av_freep(&s->me.map);
583 av_freep(&s->me.score_map);
584 av_freep(&s->blocks);
585 av_freep(&s->ac_val_base);
589 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
591 #define COPY(a) bak->a = src->a
592 COPY(edge_emu_buffer);
597 COPY(obmc_scratchpad);
604 COPY(me.map_generation);
616 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
620 // FIXME copy only needed parts
622 backup_duplicate_context(&bak, dst);
623 memcpy(dst, src, sizeof(MpegEncContext));
624 backup_duplicate_context(dst, &bak);
625 for (i = 0; i < 12; i++) {
626 dst->pblocks[i] = &dst->block[i];
628 if (!dst->edge_emu_buffer &&
629 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
630 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
631 "scratch buffers.\n");
634 // STOP_TIMER("update_duplicate_context")
635 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
639 int ff_mpeg_update_thread_context(AVCodecContext *dst,
640 const AVCodecContext *src)
643 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
645 if (dst == src || !s1->context_initialized)
648 // FIXME can parameters change on I-frames?
649 // in that case dst may need a reinit
650 if (!s->context_initialized) {
651 memcpy(s, s1, sizeof(MpegEncContext));
654 s->bitstream_buffer = NULL;
655 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
657 ff_MPV_common_init(s);
660 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
662 s->context_reinit = 0;
663 s->height = s1->height;
664 s->width = s1->width;
665 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
669 s->avctx->coded_height = s1->avctx->coded_height;
670 s->avctx->coded_width = s1->avctx->coded_width;
671 s->avctx->width = s1->avctx->width;
672 s->avctx->height = s1->avctx->height;
674 s->coded_picture_number = s1->coded_picture_number;
675 s->picture_number = s1->picture_number;
676 s->input_picture_number = s1->input_picture_number;
678 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
679 ff_mpeg_unref_picture(s, &s->picture[i]);
680 if (s1->picture[i].f.data[0] &&
681 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
685 #define UPDATE_PICTURE(pic)\
687 ff_mpeg_unref_picture(s, &s->pic);\
688 if (s1->pic.f.data[0])\
689 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
691 ret = update_picture_tables(&s->pic, &s1->pic);\
696 UPDATE_PICTURE(current_picture);
697 UPDATE_PICTURE(last_picture);
698 UPDATE_PICTURE(next_picture);
700 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
701 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
702 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
704 // Error/bug resilience
705 s->next_p_frame_damaged = s1->next_p_frame_damaged;
706 s->workaround_bugs = s1->workaround_bugs;
709 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
710 (char *) &s1->shape - (char *) &s1->time_increment_bits);
713 s->max_b_frames = s1->max_b_frames;
714 s->low_delay = s1->low_delay;
715 s->droppable = s1->droppable;
717 // DivX handling (doesn't work)
718 s->divx_packed = s1->divx_packed;
720 if (s1->bitstream_buffer) {
721 if (s1->bitstream_buffer_size +
722 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
723 av_fast_malloc(&s->bitstream_buffer,
724 &s->allocated_bitstream_buffer_size,
725 s1->allocated_bitstream_buffer_size);
726 s->bitstream_buffer_size = s1->bitstream_buffer_size;
727 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
728 s1->bitstream_buffer_size);
729 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
730 FF_INPUT_BUFFER_PADDING_SIZE);
733 // linesize dependend scratch buffer allocation
734 if (!s->edge_emu_buffer)
736 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
737 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
738 "scratch buffers.\n");
739 return AVERROR(ENOMEM);
742 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
743 "be allocated due to unknown size.\n");
747 // MPEG2/interlacing info
748 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
749 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
751 if (!s1->first_field) {
752 s->last_pict_type = s1->pict_type;
753 if (s1->current_picture_ptr)
754 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
756 if (s1->pict_type != AV_PICTURE_TYPE_B) {
757 s->last_non_b_pict_type = s1->pict_type;
765 * Set the given MpegEncContext to common defaults
766 * (same for encoding and decoding).
767 * The changed fields will not depend upon the
768 * prior state of the MpegEncContext.
770 void ff_MPV_common_defaults(MpegEncContext *s)
772 s->y_dc_scale_table =
773 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
774 s->chroma_qscale_table = ff_default_chroma_qscale_table;
775 s->progressive_frame = 1;
776 s->progressive_sequence = 1;
777 s->picture_structure = PICT_FRAME;
779 s->coded_picture_number = 0;
780 s->picture_number = 0;
781 s->input_picture_number = 0;
783 s->picture_in_gop_number = 0;
788 s->slice_context_count = 1;
792 * Set the given MpegEncContext to defaults for decoding.
793 * the changed fields will not depend upon
794 * the prior state of the MpegEncContext.
796 void ff_MPV_decode_defaults(MpegEncContext *s)
798 ff_MPV_common_defaults(s);
801 static int init_er(MpegEncContext *s)
803 ERContext *er = &s->er;
804 int mb_array_size = s->mb_height * s->mb_stride;
807 er->avctx = s->avctx;
810 er->mb_index2xy = s->mb_index2xy;
811 er->mb_num = s->mb_num;
812 er->mb_width = s->mb_width;
813 er->mb_height = s->mb_height;
814 er->mb_stride = s->mb_stride;
815 er->b8_stride = s->b8_stride;
817 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
818 er->error_status_table = av_mallocz(mb_array_size);
819 if (!er->er_temp_buffer || !er->error_status_table)
822 er->mbskip_table = s->mbskip_table;
823 er->mbintra_table = s->mbintra_table;
825 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
826 er->dc_val[i] = s->dc_val[i];
828 er->decode_mb = mpeg_er_decode_mb;
833 av_freep(&er->er_temp_buffer);
834 av_freep(&er->error_status_table);
835 return AVERROR(ENOMEM);
839 * Initialize and allocates MpegEncContext fields dependent on the resolution.
841 static int init_context_frame(MpegEncContext *s)
843 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
845 s->mb_width = (s->width + 15) / 16;
846 s->mb_stride = s->mb_width + 1;
847 s->b8_stride = s->mb_width * 2 + 1;
848 s->b4_stride = s->mb_width * 4 + 1;
849 mb_array_size = s->mb_height * s->mb_stride;
850 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
852 /* set default edge pos, will be overriden
853 * in decode_header if needed */
854 s->h_edge_pos = s->mb_width * 16;
855 s->v_edge_pos = s->mb_height * 16;
857 s->mb_num = s->mb_width * s->mb_height;
862 s->block_wrap[3] = s->b8_stride;
864 s->block_wrap[5] = s->mb_stride;
866 y_size = s->b8_stride * (2 * s->mb_height + 1);
867 c_size = s->mb_stride * (s->mb_height + 1);
868 yc_size = y_size + 2 * c_size;
870 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
871 fail); // error ressilience code looks cleaner with this
872 for (y = 0; y < s->mb_height; y++)
873 for (x = 0; x < s->mb_width; x++)
874 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
876 s->mb_index2xy[s->mb_height * s->mb_width] =
877 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
880 /* Allocate MV tables */
881 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
882 mv_table_size * 2 * sizeof(int16_t), fail);
883 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
884 mv_table_size * 2 * sizeof(int16_t), fail);
885 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
886 mv_table_size * 2 * sizeof(int16_t), fail);
887 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
888 mv_table_size * 2 * sizeof(int16_t), fail);
889 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
890 mv_table_size * 2 * sizeof(int16_t), fail);
891 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
892 mv_table_size * 2 * sizeof(int16_t), fail);
893 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
894 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
895 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
896 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
898 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
900 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
902 /* Allocate MB type table */
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
904 sizeof(uint16_t), fail); // needed for encoding
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
909 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
910 mb_array_size * sizeof(float), fail);
911 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
912 mb_array_size * sizeof(float), fail);
916 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
917 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
918 /* interlaced direct mode decoding tables */
919 for (i = 0; i < 2; i++) {
921 for (j = 0; j < 2; j++) {
922 for (k = 0; k < 2; k++) {
923 FF_ALLOCZ_OR_GOTO(s->avctx,
924 s->b_field_mv_table_base[i][j][k],
925 mv_table_size * 2 * sizeof(int16_t),
927 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
930 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
931 mb_array_size * 2 * sizeof(uint8_t), fail);
932 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
933 mv_table_size * 2 * sizeof(int16_t), fail);
934 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
938 mb_array_size * 2 * sizeof(uint8_t), fail);
941 if (s->out_format == FMT_H263) {
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
944 s->coded_block = s->coded_block_base + s->b8_stride + 1;
946 /* cbp, ac_pred, pred_dir */
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
948 mb_array_size * sizeof(uint8_t), fail);
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
950 mb_array_size * sizeof(uint8_t), fail);
953 if (s->h263_pred || s->h263_plus || !s->encoding) {
955 // MN: we need these for error resilience of intra-frames
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
957 yc_size * sizeof(int16_t), fail);
958 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
959 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
960 s->dc_val[2] = s->dc_val[1] + c_size;
961 for (i = 0; i < yc_size; i++)
962 s->dc_val_base[i] = 1024;
965 /* which mb is a intra block */
966 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
967 memset(s->mbintra_table, 1, mb_array_size);
969 /* init macroblock skip table */
970 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
971 // Note the + 1 is for a quicker mpeg4 slice_end detection
975 return AVERROR(ENOMEM);
979 * init common structure for both encoder and decoder.
980 * this assumes that some variables like width/height are already set
982 av_cold int ff_MPV_common_init(MpegEncContext *s)
985 int nb_slices = (HAVE_THREADS &&
986 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
987 s->avctx->thread_count : 1;
989 if (s->encoding && s->avctx->slices)
990 nb_slices = s->avctx->slices;
992 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
993 s->mb_height = (s->height + 31) / 32 * 2;
995 s->mb_height = (s->height + 15) / 16;
997 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
998 av_log(s->avctx, AV_LOG_ERROR,
999 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1003 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1006 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1008 max_slices = MAX_THREADS;
1009 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1010 " reducing to %d\n", nb_slices, max_slices);
1011 nb_slices = max_slices;
1014 if ((s->width || s->height) &&
1015 av_image_check_size(s->width, s->height, 0, s->avctx))
1018 ff_dct_common_init(s);
1020 s->flags = s->avctx->flags;
1021 s->flags2 = s->avctx->flags2;
1023 if (s->width && s->height) {
1024 /* set chroma shifts */
1025 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1027 &s->chroma_y_shift);
1029 /* convert fourcc to upper case */
1030 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1032 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1034 s->avctx->coded_frame = &s->current_picture.f;
1037 if (s->msmpeg4_version) {
1038 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1039 2 * 2 * (MAX_LEVEL + 1) *
1040 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1042 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1044 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1045 64 * 32 * sizeof(int), fail);
1046 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1047 64 * 32 * sizeof(int), fail);
1048 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1049 64 * 32 * 2 * sizeof(uint16_t), fail);
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1051 64 * 32 * 2 * sizeof(uint16_t), fail);
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1053 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1055 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1057 if (s->avctx->noise_reduction) {
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1059 2 * 64 * sizeof(uint16_t), fail);
1064 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1065 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1066 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1067 avcodec_get_frame_defaults(&s->picture[i].f);
1069 memset(&s->next_picture, 0, sizeof(s->next_picture));
1070 memset(&s->last_picture, 0, sizeof(s->last_picture));
1071 memset(&s->current_picture, 0, sizeof(s->current_picture));
1072 avcodec_get_frame_defaults(&s->next_picture.f);
1073 avcodec_get_frame_defaults(&s->last_picture.f);
1074 avcodec_get_frame_defaults(&s->current_picture.f);
1076 if (s->width && s->height) {
1077 if (init_context_frame(s))
1080 s->parse_context.state = -1;
1083 s->context_initialized = 1;
1084 s->thread_context[0] = s;
1086 if (s->width && s->height) {
1087 if (nb_slices > 1) {
1088 for (i = 1; i < nb_slices; i++) {
1089 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1090 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1093 for (i = 0; i < nb_slices; i++) {
1094 if (init_duplicate_context(s->thread_context[i]) < 0)
1096 s->thread_context[i]->start_mb_y =
1097 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1098 s->thread_context[i]->end_mb_y =
1099 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1102 if (init_duplicate_context(s) < 0)
1105 s->end_mb_y = s->mb_height;
1107 s->slice_context_count = nb_slices;
1112 ff_MPV_common_end(s);
1117 * Frees and resets MpegEncContext fields depending on the resolution.
1118 * Is used during resolution changes to avoid a full reinitialization of the
1121 static int free_context_frame(MpegEncContext *s)
1125 av_freep(&s->mb_type);
1126 av_freep(&s->p_mv_table_base);
1127 av_freep(&s->b_forw_mv_table_base);
1128 av_freep(&s->b_back_mv_table_base);
1129 av_freep(&s->b_bidir_forw_mv_table_base);
1130 av_freep(&s->b_bidir_back_mv_table_base);
1131 av_freep(&s->b_direct_mv_table_base);
1132 s->p_mv_table = NULL;
1133 s->b_forw_mv_table = NULL;
1134 s->b_back_mv_table = NULL;
1135 s->b_bidir_forw_mv_table = NULL;
1136 s->b_bidir_back_mv_table = NULL;
1137 s->b_direct_mv_table = NULL;
1138 for (i = 0; i < 2; i++) {
1139 for (j = 0; j < 2; j++) {
1140 for (k = 0; k < 2; k++) {
1141 av_freep(&s->b_field_mv_table_base[i][j][k]);
1142 s->b_field_mv_table[i][j][k] = NULL;
1144 av_freep(&s->b_field_select_table[i][j]);
1145 av_freep(&s->p_field_mv_table_base[i][j]);
1146 s->p_field_mv_table[i][j] = NULL;
1148 av_freep(&s->p_field_select_table[i]);
1151 av_freep(&s->dc_val_base);
1152 av_freep(&s->coded_block_base);
1153 av_freep(&s->mbintra_table);
1154 av_freep(&s->cbp_table);
1155 av_freep(&s->pred_dir_table);
1157 av_freep(&s->mbskip_table);
1159 av_freep(&s->er.error_status_table);
1160 av_freep(&s->er.er_temp_buffer);
1161 av_freep(&s->mb_index2xy);
1162 av_freep(&s->lambda_table);
1163 av_freep(&s->cplx_tab);
1164 av_freep(&s->bits_tab);
1166 s->linesize = s->uvlinesize = 0;
1171 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1175 if (s->slice_context_count > 1) {
1176 for (i = 0; i < s->slice_context_count; i++) {
1177 free_duplicate_context(s->thread_context[i]);
1179 for (i = 1; i < s->slice_context_count; i++) {
1180 av_freep(&s->thread_context[i]);
1183 free_duplicate_context(s);
1185 if ((err = free_context_frame(s)) < 0)
1189 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1190 s->picture[i].needs_realloc = 1;
1193 s->last_picture_ptr =
1194 s->next_picture_ptr =
1195 s->current_picture_ptr = NULL;
1198 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1199 s->mb_height = (s->height + 31) / 32 * 2;
1201 s->mb_height = (s->height + 15) / 16;
1203 if ((s->width || s->height) &&
1204 av_image_check_size(s->width, s->height, 0, s->avctx))
1205 return AVERROR_INVALIDDATA;
1207 if ((err = init_context_frame(s)))
1210 s->thread_context[0] = s;
1212 if (s->width && s->height) {
1213 int nb_slices = s->slice_context_count;
1214 if (nb_slices > 1) {
1215 for (i = 1; i < nb_slices; i++) {
1216 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1217 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1220 for (i = 0; i < nb_slices; i++) {
1221 if (init_duplicate_context(s->thread_context[i]) < 0)
1223 s->thread_context[i]->start_mb_y =
1224 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1225 s->thread_context[i]->end_mb_y =
1226 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1229 if (init_duplicate_context(s) < 0)
1232 s->end_mb_y = s->mb_height;
1234 s->slice_context_count = nb_slices;
1239 ff_MPV_common_end(s);
1243 /* init common structure for both encoder and decoder */
1244 void ff_MPV_common_end(MpegEncContext *s)
1248 if (s->slice_context_count > 1) {
1249 for (i = 0; i < s->slice_context_count; i++) {
1250 free_duplicate_context(s->thread_context[i]);
1252 for (i = 1; i < s->slice_context_count; i++) {
1253 av_freep(&s->thread_context[i]);
1255 s->slice_context_count = 1;
1256 } else free_duplicate_context(s);
1258 av_freep(&s->parse_context.buffer);
1259 s->parse_context.buffer_size = 0;
1261 av_freep(&s->bitstream_buffer);
1262 s->allocated_bitstream_buffer_size = 0;
1264 av_freep(&s->avctx->stats_out);
1265 av_freep(&s->ac_stats);
1267 av_freep(&s->q_intra_matrix);
1268 av_freep(&s->q_inter_matrix);
1269 av_freep(&s->q_intra_matrix16);
1270 av_freep(&s->q_inter_matrix16);
1271 av_freep(&s->input_picture);
1272 av_freep(&s->reordered_input_picture);
1273 av_freep(&s->dct_offset);
1276 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1277 free_picture_tables(&s->picture[i]);
1278 ff_mpeg_unref_picture(s, &s->picture[i]);
1281 av_freep(&s->picture);
1282 free_picture_tables(&s->last_picture);
1283 ff_mpeg_unref_picture(s, &s->last_picture);
1284 free_picture_tables(&s->current_picture);
1285 ff_mpeg_unref_picture(s, &s->current_picture);
1286 free_picture_tables(&s->next_picture);
1287 ff_mpeg_unref_picture(s, &s->next_picture);
1288 free_picture_tables(&s->new_picture);
1289 ff_mpeg_unref_picture(s, &s->new_picture);
1291 free_context_frame(s);
1293 s->context_initialized = 0;
1294 s->last_picture_ptr =
1295 s->next_picture_ptr =
1296 s->current_picture_ptr = NULL;
1297 s->linesize = s->uvlinesize = 0;
1300 av_cold void ff_init_rl(RLTable *rl,
1301 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1303 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1304 uint8_t index_run[MAX_RUN + 1];
1305 int last, run, level, start, end, i;
1307 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1308 if (static_store && rl->max_level[0])
1311 /* compute max_level[], max_run[] and index_run[] */
1312 for (last = 0; last < 2; last++) {
1321 memset(max_level, 0, MAX_RUN + 1);
1322 memset(max_run, 0, MAX_LEVEL + 1);
1323 memset(index_run, rl->n, MAX_RUN + 1);
1324 for (i = start; i < end; i++) {
1325 run = rl->table_run[i];
1326 level = rl->table_level[i];
1327 if (index_run[run] == rl->n)
1329 if (level > max_level[run])
1330 max_level[run] = level;
1331 if (run > max_run[level])
1332 max_run[level] = run;
1335 rl->max_level[last] = static_store[last];
1337 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1338 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1340 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1342 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1343 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1345 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1347 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1348 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1352 av_cold void ff_init_vlc_rl(RLTable *rl)
1356 for (q = 0; q < 32; q++) {
1358 int qadd = (q - 1) | 1;
1364 for (i = 0; i < rl->vlc.table_size; i++) {
1365 int code = rl->vlc.table[i][0];
1366 int len = rl->vlc.table[i][1];
1369 if (len == 0) { // illegal code
1372 } else if (len < 0) { // more bits needed
1376 if (code == rl->n) { // esc
1380 run = rl->table_run[code] + 1;
1381 level = rl->table_level[code] * qmul + qadd;
1382 if (code >= rl->last) run += 192;
1385 rl->rl_vlc[q][i].len = len;
1386 rl->rl_vlc[q][i].level = level;
1387 rl->rl_vlc[q][i].run = run;
1392 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1396 /* release non reference frames */
1397 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1398 if (!s->picture[i].reference &&
1399 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1400 ff_mpeg_unref_picture(s, &s->picture[i]);
1405 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1407 if (pic->f.data[0] == NULL)
1409 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1414 static int find_unused_picture(MpegEncContext *s, int shared)
1419 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1420 if (s->picture[i].f.data[0] == NULL)
1424 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1425 if (pic_is_unused(s, &s->picture[i]))
1430 return AVERROR_INVALIDDATA;
1433 int ff_find_unused_picture(MpegEncContext *s, int shared)
1435 int ret = find_unused_picture(s, shared);
1437 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1438 if (s->picture[ret].needs_realloc) {
1439 s->picture[ret].needs_realloc = 0;
1440 free_picture_tables(&s->picture[ret]);
1441 ff_mpeg_unref_picture(s, &s->picture[ret]);
1442 avcodec_get_frame_defaults(&s->picture[ret].f);
1448 static void update_noise_reduction(MpegEncContext *s)
1452 for (intra = 0; intra < 2; intra++) {
1453 if (s->dct_count[intra] > (1 << 16)) {
1454 for (i = 0; i < 64; i++) {
1455 s->dct_error_sum[intra][i] >>= 1;
1457 s->dct_count[intra] >>= 1;
1460 for (i = 0; i < 64; i++) {
1461 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1462 s->dct_count[intra] +
1463 s->dct_error_sum[intra][i] / 2) /
1464 (s->dct_error_sum[intra][i] + 1);
1470 * generic function for encode/decode called after coding/decoding
1471 * the header and before a frame is coded/decoded.
1473 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1479 /* mark & release old frames */
1480 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1481 s->last_picture_ptr != s->next_picture_ptr &&
1482 s->last_picture_ptr->f.data[0]) {
1483 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1486 /* release forgotten pictures */
1487 /* if (mpeg124/h263) */
1489 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1490 if (&s->picture[i] != s->last_picture_ptr &&
1491 &s->picture[i] != s->next_picture_ptr &&
1492 s->picture[i].reference && !s->picture[i].needs_realloc) {
1493 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1494 av_log(avctx, AV_LOG_ERROR,
1495 "releasing zombie picture\n");
1496 ff_mpeg_unref_picture(s, &s->picture[i]);
1501 ff_mpeg_unref_picture(s, &s->current_picture);
1504 ff_release_unused_pictures(s, 1);
1506 if (s->current_picture_ptr &&
1507 s->current_picture_ptr->f.data[0] == NULL) {
1508 // we already have a unused image
1509 // (maybe it was set before reading the header)
1510 pic = s->current_picture_ptr;
1512 i = ff_find_unused_picture(s, 0);
1514 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1517 pic = &s->picture[i];
1521 if (!s->droppable) {
1522 if (s->pict_type != AV_PICTURE_TYPE_B)
1526 pic->f.coded_picture_number = s->coded_picture_number++;
1528 if (ff_alloc_picture(s, pic, 0) < 0)
1531 s->current_picture_ptr = pic;
1532 // FIXME use only the vars from current_pic
1533 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1534 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1535 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1536 if (s->picture_structure != PICT_FRAME)
1537 s->current_picture_ptr->f.top_field_first =
1538 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1540 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1541 !s->progressive_sequence;
1542 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1545 s->current_picture_ptr->f.pict_type = s->pict_type;
1546 // if (s->flags && CODEC_FLAG_QSCALE)
1547 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1548 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1550 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1551 s->current_picture_ptr)) < 0)
1554 if (s->pict_type != AV_PICTURE_TYPE_B) {
1555 s->last_picture_ptr = s->next_picture_ptr;
1557 s->next_picture_ptr = s->current_picture_ptr;
1559 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1560 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1561 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1562 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1563 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1564 s->pict_type, s->droppable);
1566 if ((s->last_picture_ptr == NULL ||
1567 s->last_picture_ptr->f.data[0] == NULL) &&
1568 (s->pict_type != AV_PICTURE_TYPE_I ||
1569 s->picture_structure != PICT_FRAME)) {
1570 int h_chroma_shift, v_chroma_shift;
1571 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1572 &h_chroma_shift, &v_chroma_shift);
1573 if (s->pict_type != AV_PICTURE_TYPE_I)
1574 av_log(avctx, AV_LOG_ERROR,
1575 "warning: first frame is no keyframe\n");
1576 else if (s->picture_structure != PICT_FRAME)
1577 av_log(avctx, AV_LOG_INFO,
1578 "allocate dummy last picture for field based first keyframe\n");
1580 /* Allocate a dummy frame */
1581 i = ff_find_unused_picture(s, 0);
1583 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1586 s->last_picture_ptr = &s->picture[i];
1587 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1588 s->last_picture_ptr = NULL;
1592 memset(s->last_picture_ptr->f.data[0], 0,
1593 avctx->height * s->last_picture_ptr->f.linesize[0]);
1594 memset(s->last_picture_ptr->f.data[1], 0x80,
1595 (avctx->height >> v_chroma_shift) *
1596 s->last_picture_ptr->f.linesize[1]);
1597 memset(s->last_picture_ptr->f.data[2], 0x80,
1598 (avctx->height >> v_chroma_shift) *
1599 s->last_picture_ptr->f.linesize[2]);
1601 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1602 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1604 if ((s->next_picture_ptr == NULL ||
1605 s->next_picture_ptr->f.data[0] == NULL) &&
1606 s->pict_type == AV_PICTURE_TYPE_B) {
1607 /* Allocate a dummy frame */
1608 i = ff_find_unused_picture(s, 0);
1610 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1613 s->next_picture_ptr = &s->picture[i];
1614 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1615 s->next_picture_ptr = NULL;
1618 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1619 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1622 if (s->last_picture_ptr) {
1623 ff_mpeg_unref_picture(s, &s->last_picture);
1624 if (s->last_picture_ptr->f.data[0] &&
1625 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1626 s->last_picture_ptr)) < 0)
1629 if (s->next_picture_ptr) {
1630 ff_mpeg_unref_picture(s, &s->next_picture);
1631 if (s->next_picture_ptr->f.data[0] &&
1632 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1633 s->next_picture_ptr)) < 0)
1637 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1638 s->last_picture_ptr->f.data[0]));
1640 if (s->picture_structure!= PICT_FRAME) {
1642 for (i = 0; i < 4; i++) {
1643 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1644 s->current_picture.f.data[i] +=
1645 s->current_picture.f.linesize[i];
1647 s->current_picture.f.linesize[i] *= 2;
1648 s->last_picture.f.linesize[i] *= 2;
1649 s->next_picture.f.linesize[i] *= 2;
1653 s->err_recognition = avctx->err_recognition;
1655 /* set dequantizer, we can't do it during init as
1656 * it might change for mpeg4 and we can't do it in the header
1657 * decode as init is not called for mpeg4 there yet */
1658 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1659 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1660 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1661 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1662 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1663 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1665 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1666 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1669 if (s->dct_error_sum) {
1670 assert(s->avctx->noise_reduction && s->encoding);
1671 update_noise_reduction(s);
1674 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1675 return ff_xvmc_field_start(s, avctx);
1680 /* generic function for encode/decode called after a
1681 * frame has been coded/decoded. */
1682 void ff_MPV_frame_end(MpegEncContext *s)
1685 /* redraw edges for the frame if decoding didn't complete */
1686 // just to make sure that all data is rendered.
1687 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1688 ff_xvmc_field_end(s);
1689 } else if ((s->er.error_count || s->encoding) &&
1690 !s->avctx->hwaccel &&
1691 s->unrestricted_mv &&
1692 s->current_picture.reference &&
1694 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1695 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1696 int hshift = desc->log2_chroma_w;
1697 int vshift = desc->log2_chroma_h;
1698 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1699 s->h_edge_pos, s->v_edge_pos,
1700 EDGE_WIDTH, EDGE_WIDTH,
1701 EDGE_TOP | EDGE_BOTTOM);
1702 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1703 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1704 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1705 EDGE_TOP | EDGE_BOTTOM);
1706 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1707 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1708 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1709 EDGE_TOP | EDGE_BOTTOM);
1714 s->last_pict_type = s->pict_type;
1715 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1716 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1717 s->last_non_b_pict_type = s->pict_type;
1720 /* copy back current_picture variables */
1721 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1722 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1723 s->picture[i] = s->current_picture;
1727 assert(i < MAX_PICTURE_COUNT);
1731 /* release non-reference frames */
1732 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1733 if (!s->picture[i].reference)
1734 ff_mpeg_unref_picture(s, &s->picture[i]);
1737 // clear copies, to avoid confusion
1739 memset(&s->last_picture, 0, sizeof(Picture));
1740 memset(&s->next_picture, 0, sizeof(Picture));
1741 memset(&s->current_picture, 0, sizeof(Picture));
1743 s->avctx->coded_frame = &s->current_picture_ptr->f;
1745 if (s->current_picture.reference)
1746 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1750 * Print debugging info for the given picture.
1752 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1755 if (s->avctx->hwaccel || !p || !p->mb_type)
1759 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1762 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1763 switch (pict->pict_type) {
1764 case AV_PICTURE_TYPE_I:
1765 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1767 case AV_PICTURE_TYPE_P:
1768 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1770 case AV_PICTURE_TYPE_B:
1771 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1773 case AV_PICTURE_TYPE_S:
1774 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1776 case AV_PICTURE_TYPE_SI:
1777 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1779 case AV_PICTURE_TYPE_SP:
1780 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1783 for (y = 0; y < s->mb_height; y++) {
1784 for (x = 0; x < s->mb_width; x++) {
1785 if (s->avctx->debug & FF_DEBUG_SKIP) {
1786 int count = s->mbskip_table[x + y * s->mb_stride];
1789 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1791 if (s->avctx->debug & FF_DEBUG_QP) {
1792 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1793 p->qscale_table[x + y * s->mb_stride]);
1795 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1796 int mb_type = p->mb_type[x + y * s->mb_stride];
1797 // Type & MV direction
1798 if (IS_PCM(mb_type))
1799 av_log(s->avctx, AV_LOG_DEBUG, "P");
1800 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1801 av_log(s->avctx, AV_LOG_DEBUG, "A");
1802 else if (IS_INTRA4x4(mb_type))
1803 av_log(s->avctx, AV_LOG_DEBUG, "i");
1804 else if (IS_INTRA16x16(mb_type))
1805 av_log(s->avctx, AV_LOG_DEBUG, "I");
1806 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1807 av_log(s->avctx, AV_LOG_DEBUG, "d");
1808 else if (IS_DIRECT(mb_type))
1809 av_log(s->avctx, AV_LOG_DEBUG, "D");
1810 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1811 av_log(s->avctx, AV_LOG_DEBUG, "g");
1812 else if (IS_GMC(mb_type))
1813 av_log(s->avctx, AV_LOG_DEBUG, "G");
1814 else if (IS_SKIP(mb_type))
1815 av_log(s->avctx, AV_LOG_DEBUG, "S");
1816 else if (!USES_LIST(mb_type, 1))
1817 av_log(s->avctx, AV_LOG_DEBUG, ">");
1818 else if (!USES_LIST(mb_type, 0))
1819 av_log(s->avctx, AV_LOG_DEBUG, "<");
1821 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1822 av_log(s->avctx, AV_LOG_DEBUG, "X");
1826 if (IS_8X8(mb_type))
1827 av_log(s->avctx, AV_LOG_DEBUG, "+");
1828 else if (IS_16X8(mb_type))
1829 av_log(s->avctx, AV_LOG_DEBUG, "-");
1830 else if (IS_8X16(mb_type))
1831 av_log(s->avctx, AV_LOG_DEBUG, "|");
1832 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1833 av_log(s->avctx, AV_LOG_DEBUG, " ");
1835 av_log(s->avctx, AV_LOG_DEBUG, "?");
1838 if (IS_INTERLACED(mb_type))
1839 av_log(s->avctx, AV_LOG_DEBUG, "=");
1841 av_log(s->avctx, AV_LOG_DEBUG, " ");
1844 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1850 * find the lowest MB row referenced in the MVs
1852 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1854 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1855 int my, off, i, mvs;
1857 if (s->picture_structure != PICT_FRAME || s->mcsel)
1860 switch (s->mv_type) {
1874 for (i = 0; i < mvs; i++) {
1875 my = s->mv[dir][i][1]<<qpel_shift;
1876 my_max = FFMAX(my_max, my);
1877 my_min = FFMIN(my_min, my);
1880 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1882 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1884 return s->mb_height-1;
1887 /* put block[] to dest[] */
1888 static inline void put_dct(MpegEncContext *s,
1889 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1891 s->dct_unquantize_intra(s, block, i, qscale);
1892 s->dsp.idct_put (dest, line_size, block);
1895 /* add block[] to dest[] */
1896 static inline void add_dct(MpegEncContext *s,
1897 int16_t *block, int i, uint8_t *dest, int line_size)
1899 if (s->block_last_index[i] >= 0) {
1900 s->dsp.idct_add (dest, line_size, block);
1904 static inline void add_dequant_dct(MpegEncContext *s,
1905 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1907 if (s->block_last_index[i] >= 0) {
1908 s->dct_unquantize_inter(s, block, i, qscale);
1910 s->dsp.idct_add (dest, line_size, block);
1915 * Clean dc, ac, coded_block for the current non-intra MB.
1917 void ff_clean_intra_table_entries(MpegEncContext *s)
1919 int wrap = s->b8_stride;
1920 int xy = s->block_index[0];
1923 s->dc_val[0][xy + 1 ] =
1924 s->dc_val[0][xy + wrap] =
1925 s->dc_val[0][xy + 1 + wrap] = 1024;
1927 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1928 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1929 if (s->msmpeg4_version>=3) {
1930 s->coded_block[xy ] =
1931 s->coded_block[xy + 1 ] =
1932 s->coded_block[xy + wrap] =
1933 s->coded_block[xy + 1 + wrap] = 0;
1936 wrap = s->mb_stride;
1937 xy = s->mb_x + s->mb_y * wrap;
1939 s->dc_val[2][xy] = 1024;
1941 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1942 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1944 s->mbintra_table[xy]= 0;
1947 /* generic function called after a macroblock has been parsed by the
1948 decoder or after it has been encoded by the encoder.
1950 Important variables used:
1951 s->mb_intra : true if intra macroblock
1952 s->mv_dir : motion vector direction
1953 s->mv_type : motion vector type
1954 s->mv : motion vector
1955 s->interlaced_dct : true if interlaced dct used (mpeg2)
1957 static av_always_inline
1958 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1961 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1962 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1963 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1967 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1968 /* print DCT coefficients */
1970 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1972 for(j=0; j<64; j++){
1973 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
1975 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1979 s->current_picture.qscale_table[mb_xy] = s->qscale;
1981 /* update DC predictors for P macroblocks */
1983 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1984 if(s->mbintra_table[mb_xy])
1985 ff_clean_intra_table_entries(s);
1989 s->last_dc[2] = 128 << s->intra_dc_precision;
1992 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1993 s->mbintra_table[mb_xy]=1;
1995 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1996 uint8_t *dest_y, *dest_cb, *dest_cr;
1997 int dct_linesize, dct_offset;
1998 op_pixels_func (*op_pix)[4];
1999 qpel_mc_func (*op_qpix)[16];
2000 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2001 const int uvlinesize = s->current_picture.f.linesize[1];
2002 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2003 const int block_size = 8;
2005 /* avoid copy if macroblock skipped in last frame too */
2006 /* skip only during decoding as we might trash the buffers during encoding a bit */
2008 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2010 if (s->mb_skipped) {
2012 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2014 } else if(!s->current_picture.reference) {
2017 *mbskip_ptr = 0; /* not skipped */
2021 dct_linesize = linesize << s->interlaced_dct;
2022 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2026 dest_cb= s->dest[1];
2027 dest_cr= s->dest[2];
2029 dest_y = s->b_scratchpad;
2030 dest_cb= s->b_scratchpad+16*linesize;
2031 dest_cr= s->b_scratchpad+32*linesize;
2035 /* motion handling */
2036 /* decoding or more than one mb_type (MC was already done otherwise) */
2039 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2040 if (s->mv_dir & MV_DIR_FORWARD) {
2041 ff_thread_await_progress(&s->last_picture_ptr->tf,
2042 ff_MPV_lowest_referenced_row(s, 0),
2045 if (s->mv_dir & MV_DIR_BACKWARD) {
2046 ff_thread_await_progress(&s->next_picture_ptr->tf,
2047 ff_MPV_lowest_referenced_row(s, 1),
2052 op_qpix= s->me.qpel_put;
2053 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2054 op_pix = s->hdsp.put_pixels_tab;
2056 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2058 if (s->mv_dir & MV_DIR_FORWARD) {
2059 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2060 op_pix = s->hdsp.avg_pixels_tab;
2061 op_qpix= s->me.qpel_avg;
2063 if (s->mv_dir & MV_DIR_BACKWARD) {
2064 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2068 /* skip dequant / idct if we are really late ;) */
2069 if(s->avctx->skip_idct){
2070 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2071 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2072 || s->avctx->skip_idct >= AVDISCARD_ALL)
2076 /* add dct residue */
2077 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2078 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2079 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2080 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2081 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2082 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2084 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2085 if (s->chroma_y_shift){
2086 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2087 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2091 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2092 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2093 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2094 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2097 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2098 add_dct(s, block[0], 0, dest_y , dct_linesize);
2099 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2100 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2101 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2103 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2104 if(s->chroma_y_shift){//Chroma420
2105 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2106 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2109 dct_linesize = uvlinesize << s->interlaced_dct;
2110 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2112 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2113 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2114 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2115 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2116 if(!s->chroma_x_shift){//Chroma444
2117 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2118 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2119 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2120 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2125 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2126 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2129 /* dct only in intra block */
2130 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2131 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2132 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2133 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2134 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2136 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2137 if(s->chroma_y_shift){
2138 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2139 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2143 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2144 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2145 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2146 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2150 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2151 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2152 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2153 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2155 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2156 if(s->chroma_y_shift){
2157 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2158 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2161 dct_linesize = uvlinesize << s->interlaced_dct;
2162 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2164 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2165 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2166 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2167 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2168 if(!s->chroma_x_shift){//Chroma444
2169 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2170 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2171 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2172 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2180 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2181 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2182 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2187 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2189 if(s->out_format == FMT_MPEG1) {
2190 MPV_decode_mb_internal(s, block, 1);
2193 MPV_decode_mb_internal(s, block, 0);
2197 * @param h is the normal height, this will be reduced automatically if needed for the last row
2199 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2200 Picture *last, int y, int h, int picture_structure,
2201 int first_field, int draw_edges, int low_delay,
2202 int v_edge_pos, int h_edge_pos)
2204 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2205 int hshift = desc->log2_chroma_w;
2206 int vshift = desc->log2_chroma_h;
2207 const int field_pic = picture_structure != PICT_FRAME;
2213 if (!avctx->hwaccel &&
2216 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2217 int *linesize = cur->f.linesize;
2218 int sides = 0, edge_h;
2219 if (y==0) sides |= EDGE_TOP;
2220 if (y + h >= v_edge_pos)
2221 sides |= EDGE_BOTTOM;
2223 edge_h= FFMIN(h, v_edge_pos - y);
2225 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2226 linesize[0], h_edge_pos, edge_h,
2227 EDGE_WIDTH, EDGE_WIDTH, sides);
2228 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2229 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2230 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2231 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2232 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2233 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2236 h = FFMIN(h, avctx->height - y);
2238 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2240 if (avctx->draw_horiz_band) {
2242 int offset[AV_NUM_DATA_POINTERS];
2245 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2246 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2253 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2254 picture_structure == PICT_FRAME &&
2255 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2256 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2259 offset[0]= y * src->linesize[0];
2261 offset[2]= (y >> vshift) * src->linesize[1];
2262 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2268 avctx->draw_horiz_band(avctx, src, offset,
2269 y, picture_structure, h);
2273 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2275 int draw_edges = s->unrestricted_mv && !s->intra_only;
2276 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2277 &s->last_picture, y, h, s->picture_structure,
2278 s->first_field, draw_edges, s->low_delay,
2279 s->v_edge_pos, s->h_edge_pos);
2282 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2283 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2284 const int uvlinesize = s->current_picture.f.linesize[1];
2285 const int mb_size= 4;
2287 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2288 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2289 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2290 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2291 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2292 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2293 //block_index is not used by mpeg2, so it is not affected by chroma_format
2295 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2296 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2297 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2299 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2301 if(s->picture_structure==PICT_FRAME){
2302 s->dest[0] += s->mb_y * linesize << mb_size;
2303 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2304 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2306 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2307 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2308 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2309 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2315 * Permute an 8x8 block.
2316 * @param block the block which will be permuted according to the given permutation vector
2317 * @param permutation the permutation vector
2318 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2319 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2320 * (inverse) permutated to scantable order!
2322 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2328 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2330 for(i=0; i<=last; i++){
2331 const int j= scantable[i];
2336 for(i=0; i<=last; i++){
2337 const int j= scantable[i];
2338 const int perm_j= permutation[j];
2339 block[perm_j]= temp[j];
2343 void ff_mpeg_flush(AVCodecContext *avctx){
2345 MpegEncContext *s = avctx->priv_data;
2347 if(s==NULL || s->picture==NULL)
2350 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2351 ff_mpeg_unref_picture(s, &s->picture[i]);
2352 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2354 ff_mpeg_unref_picture(s, &s->current_picture);
2355 ff_mpeg_unref_picture(s, &s->last_picture);
2356 ff_mpeg_unref_picture(s, &s->next_picture);
2358 s->mb_x= s->mb_y= 0;
2360 s->parse_context.state= -1;
2361 s->parse_context.frame_start_found= 0;
2362 s->parse_context.overread= 0;
2363 s->parse_context.overread_index= 0;
2364 s->parse_context.index= 0;
2365 s->parse_context.last_index= 0;
2366 s->bitstream_buffer_size=0;
2370 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2371 int16_t *block, int n, int qscale)
2373 int i, level, nCoeffs;
2374 const uint16_t *quant_matrix;
2376 nCoeffs= s->block_last_index[n];
2379 block[0] = block[0] * s->y_dc_scale;
2381 block[0] = block[0] * s->c_dc_scale;
2382 /* XXX: only mpeg1 */
2383 quant_matrix = s->intra_matrix;
2384 for(i=1;i<=nCoeffs;i++) {
2385 int j= s->intra_scantable.permutated[i];
2390 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2391 level = (level - 1) | 1;
2394 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2395 level = (level - 1) | 1;
2402 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2403 int16_t *block, int n, int qscale)
2405 int i, level, nCoeffs;
2406 const uint16_t *quant_matrix;
2408 nCoeffs= s->block_last_index[n];
2410 quant_matrix = s->inter_matrix;
2411 for(i=0; i<=nCoeffs; i++) {
2412 int j= s->intra_scantable.permutated[i];
2417 level = (((level << 1) + 1) * qscale *
2418 ((int) (quant_matrix[j]))) >> 4;
2419 level = (level - 1) | 1;
2422 level = (((level << 1) + 1) * qscale *
2423 ((int) (quant_matrix[j]))) >> 4;
2424 level = (level - 1) | 1;
2431 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2432 int16_t *block, int n, int qscale)
2434 int i, level, nCoeffs;
2435 const uint16_t *quant_matrix;
2437 if(s->alternate_scan) nCoeffs= 63;
2438 else nCoeffs= s->block_last_index[n];
2441 block[0] = block[0] * s->y_dc_scale;
2443 block[0] = block[0] * s->c_dc_scale;
2444 quant_matrix = s->intra_matrix;
2445 for(i=1;i<=nCoeffs;i++) {
2446 int j= s->intra_scantable.permutated[i];
2451 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2454 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2461 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2462 int16_t *block, int n, int qscale)
2464 int i, level, nCoeffs;
2465 const uint16_t *quant_matrix;
2468 if(s->alternate_scan) nCoeffs= 63;
2469 else nCoeffs= s->block_last_index[n];
2472 block[0] = block[0] * s->y_dc_scale;
2474 block[0] = block[0] * s->c_dc_scale;
2475 quant_matrix = s->intra_matrix;
2476 for(i=1;i<=nCoeffs;i++) {
2477 int j= s->intra_scantable.permutated[i];
2482 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2485 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2494 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2495 int16_t *block, int n, int qscale)
2497 int i, level, nCoeffs;
2498 const uint16_t *quant_matrix;
2501 if(s->alternate_scan) nCoeffs= 63;
2502 else nCoeffs= s->block_last_index[n];
2504 quant_matrix = s->inter_matrix;
2505 for(i=0; i<=nCoeffs; i++) {
2506 int j= s->intra_scantable.permutated[i];
2511 level = (((level << 1) + 1) * qscale *
2512 ((int) (quant_matrix[j]))) >> 4;
2515 level = (((level << 1) + 1) * qscale *
2516 ((int) (quant_matrix[j]))) >> 4;
2525 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2526 int16_t *block, int n, int qscale)
2528 int i, level, qmul, qadd;
2531 assert(s->block_last_index[n]>=0);
2537 block[0] = block[0] * s->y_dc_scale;
2539 block[0] = block[0] * s->c_dc_scale;
2540 qadd = (qscale - 1) | 1;
2547 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2549 for(i=1; i<=nCoeffs; i++) {
2553 level = level * qmul - qadd;
2555 level = level * qmul + qadd;
2562 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2563 int16_t *block, int n, int qscale)
2565 int i, level, qmul, qadd;
2568 assert(s->block_last_index[n]>=0);
2570 qadd = (qscale - 1) | 1;
2573 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2575 for(i=0; i<=nCoeffs; i++) {
2579 level = level * qmul - qadd;
2581 level = level * qmul + qadd;
2589 * set qscale and update qscale dependent variables.
2591 void ff_set_qscale(MpegEncContext * s, int qscale)
2595 else if (qscale > 31)
2599 s->chroma_qscale= s->chroma_qscale_table[qscale];
2601 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2602 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2605 void ff_MPV_report_decode_progress(MpegEncContext *s)
2607 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2608 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2611 #if CONFIG_ERROR_RESILIENCE
2612 void ff_mpeg_er_frame_start(MpegEncContext *s)
2614 ERContext *er = &s->er;
2616 er->cur_pic = s->current_picture_ptr;
2617 er->last_pic = s->last_picture_ptr;
2618 er->next_pic = s->next_picture_ptr;
2620 er->pp_time = s->pp_time;
2621 er->pb_time = s->pb_time;
2622 er->quarter_sample = s->quarter_sample;
2623 er->partitioned_frame = s->partitioned_frame;
2625 ff_er_frame_start(er);
2627 #endif /* CONFIG_ERROR_RESILIENCE */