2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
37 #include "mpegvideo.h"
40 #include "xvmc_internal.h"
44 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
45 int16_t *block, int n, int qscale);
46 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
59 static const uint8_t ff_default_chroma_qscale_table[32] = {
60 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
61 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
62 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
65 const uint8_t ff_mpeg1_dc_scale_table[128] = {
66 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
67 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 static const uint8_t mpeg2_dc_scale_table1[128] = {
78 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
79 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 static const uint8_t mpeg2_dc_scale_table2[128] = {
90 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
91 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 static const uint8_t mpeg2_dc_scale_table3[128] = {
102 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
114 ff_mpeg1_dc_scale_table,
115 mpeg2_dc_scale_table1,
116 mpeg2_dc_scale_table2,
117 mpeg2_dc_scale_table3,
120 const enum AVPixelFormat ff_pixfmt_list_420[] = {
125 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
127 int mb_x, int mb_y, int mb_intra, int mb_skipped)
129 MpegEncContext *s = opaque;
132 s->mv_type = mv_type;
133 s->mb_intra = mb_intra;
134 s->mb_skipped = mb_skipped;
137 memcpy(s->mv, mv, sizeof(*mv));
139 ff_init_block_index(s);
140 ff_update_block_index(s);
142 s->dsp.clear_blocks(s->block[0]);
144 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
145 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
146 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
149 ff_MPV_decode_mb(s, s->block);
152 /* init common dct for both encoder and decoder */
153 av_cold int ff_dct_common_init(MpegEncContext *s)
155 ff_dsputil_init(&s->dsp, s->avctx);
156 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
157 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
159 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
160 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
161 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
162 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
163 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
164 if (s->flags & CODEC_FLAG_BITEXACT)
165 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
166 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
169 ff_MPV_common_init_axp(s);
171 ff_MPV_common_init_arm(s);
173 ff_MPV_common_init_bfin(s);
175 ff_MPV_common_init_ppc(s);
177 ff_MPV_common_init_x86(s);
179 /* load & permutate scantables
180 * note: only wmv uses different ones
182 if (s->alternate_scan) {
183 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
184 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
186 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
187 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
189 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
195 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
197 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
199 // edge emu needs blocksize + filter length - 1
200 // (= 17x17 for halfpel / 21x21 for h264)
201 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
202 // at uvlinesize. It supports only YUV420 so 24x24 is enough
203 // linesize * interlaced * MBsize
204 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
207 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
209 s->me.temp = s->me.scratchpad;
210 s->rd_scratchpad = s->me.scratchpad;
211 s->b_scratchpad = s->me.scratchpad;
212 s->obmc_scratchpad = s->me.scratchpad + 16;
216 av_freep(&s->edge_emu_buffer);
217 return AVERROR(ENOMEM);
221 * Allocate a frame buffer
223 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
228 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
229 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
230 s->codec_id != AV_CODEC_ID_MSS2)
231 r = ff_thread_get_buffer(s->avctx, &pic->tf,
232 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
234 pic->f.width = s->avctx->width;
235 pic->f.height = s->avctx->height;
236 pic->f.format = s->avctx->pix_fmt;
237 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
240 if (r < 0 || !pic->f.data[0]) {
241 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
246 if (s->avctx->hwaccel) {
247 assert(!pic->hwaccel_picture_private);
248 if (s->avctx->hwaccel->priv_data_size) {
249 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
250 if (!pic->hwaccel_priv_buf) {
251 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
254 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
258 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
259 s->uvlinesize != pic->f.linesize[1])) {
260 av_log(s->avctx, AV_LOG_ERROR,
261 "get_buffer() failed (stride changed)\n");
262 ff_mpeg_unref_picture(s, pic);
266 if (pic->f.linesize[1] != pic->f.linesize[2]) {
267 av_log(s->avctx, AV_LOG_ERROR,
268 "get_buffer() failed (uv stride mismatch)\n");
269 ff_mpeg_unref_picture(s, pic);
273 if (!s->edge_emu_buffer &&
274 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
275 av_log(s->avctx, AV_LOG_ERROR,
276 "get_buffer() failed to allocate context scratch buffers.\n");
277 ff_mpeg_unref_picture(s, pic);
284 static void free_picture_tables(Picture *pic)
288 av_buffer_unref(&pic->mb_var_buf);
289 av_buffer_unref(&pic->mc_mb_var_buf);
290 av_buffer_unref(&pic->mb_mean_buf);
291 av_buffer_unref(&pic->mbskip_table_buf);
292 av_buffer_unref(&pic->qscale_table_buf);
293 av_buffer_unref(&pic->mb_type_buf);
295 for (i = 0; i < 2; i++) {
296 av_buffer_unref(&pic->motion_val_buf[i]);
297 av_buffer_unref(&pic->ref_index_buf[i]);
301 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
303 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
304 const int mb_array_size = s->mb_stride * s->mb_height;
305 const int b8_array_size = s->b8_stride * s->mb_height * 2;
309 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
310 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
311 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
313 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
314 return AVERROR(ENOMEM);
317 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
318 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
319 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
320 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
321 return AVERROR(ENOMEM);
324 if (s->out_format == FMT_H263 || s->encoding ||
325 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
326 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
327 int ref_index_size = 4 * mb_array_size;
329 for (i = 0; mv_size && i < 2; i++) {
330 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
331 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
332 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
333 return AVERROR(ENOMEM);
340 static int make_tables_writable(Picture *pic)
343 #define MAKE_WRITABLE(table) \
346 (ret = av_buffer_make_writable(&pic->table)) < 0)\
350 MAKE_WRITABLE(mb_var_buf);
351 MAKE_WRITABLE(mc_mb_var_buf);
352 MAKE_WRITABLE(mb_mean_buf);
353 MAKE_WRITABLE(mbskip_table_buf);
354 MAKE_WRITABLE(qscale_table_buf);
355 MAKE_WRITABLE(mb_type_buf);
357 for (i = 0; i < 2; i++) {
358 MAKE_WRITABLE(motion_val_buf[i]);
359 MAKE_WRITABLE(ref_index_buf[i]);
366 * Allocate a Picture.
367 * The pixels are allocated/set by calling get_buffer() if shared = 0
369 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
374 assert(pic->f.data[0]);
377 assert(!pic->f.data[0]);
379 if (alloc_frame_buffer(s, pic) < 0)
382 s->linesize = pic->f.linesize[0];
383 s->uvlinesize = pic->f.linesize[1];
386 if (!pic->qscale_table_buf)
387 ret = alloc_picture_tables(s, pic);
389 ret = make_tables_writable(pic);
394 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
395 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
396 pic->mb_mean = pic->mb_mean_buf->data;
399 pic->mbskip_table = pic->mbskip_table_buf->data;
400 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
401 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
403 if (pic->motion_val_buf[0]) {
404 for (i = 0; i < 2; i++) {
405 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
406 pic->ref_index[i] = pic->ref_index_buf[i]->data;
412 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
413 ff_mpeg_unref_picture(s, pic);
414 free_picture_tables(pic);
415 return AVERROR(ENOMEM);
419 * Deallocate a picture.
421 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
423 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
426 /* WM Image / Screen codecs allocate internal buffers with different
427 * dimensions / colorspaces; ignore user-defined callbacks for these. */
428 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
429 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
430 s->codec_id != AV_CODEC_ID_MSS2)
431 ff_thread_release_buffer(s->avctx, &pic->tf);
433 av_frame_unref(&pic->f);
435 av_buffer_unref(&pic->hwaccel_priv_buf);
437 if (pic->needs_realloc)
438 free_picture_tables(pic);
440 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
443 static int update_picture_tables(Picture *dst, Picture *src)
447 #define UPDATE_TABLE(table)\
450 (!dst->table || dst->table->buffer != src->table->buffer)) {\
451 av_buffer_unref(&dst->table);\
452 dst->table = av_buffer_ref(src->table);\
454 free_picture_tables(dst);\
455 return AVERROR(ENOMEM);\
460 UPDATE_TABLE(mb_var_buf);
461 UPDATE_TABLE(mc_mb_var_buf);
462 UPDATE_TABLE(mb_mean_buf);
463 UPDATE_TABLE(mbskip_table_buf);
464 UPDATE_TABLE(qscale_table_buf);
465 UPDATE_TABLE(mb_type_buf);
466 for (i = 0; i < 2; i++) {
467 UPDATE_TABLE(motion_val_buf[i]);
468 UPDATE_TABLE(ref_index_buf[i]);
471 dst->mb_var = src->mb_var;
472 dst->mc_mb_var = src->mc_mb_var;
473 dst->mb_mean = src->mb_mean;
474 dst->mbskip_table = src->mbskip_table;
475 dst->qscale_table = src->qscale_table;
476 dst->mb_type = src->mb_type;
477 for (i = 0; i < 2; i++) {
478 dst->motion_val[i] = src->motion_val[i];
479 dst->ref_index[i] = src->ref_index[i];
485 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
489 av_assert0(!dst->f.buf[0]);
490 av_assert0(src->f.buf[0]);
494 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
498 ret = update_picture_tables(dst, src);
502 if (src->hwaccel_picture_private) {
503 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
504 if (!dst->hwaccel_priv_buf)
506 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
509 dst->field_picture = src->field_picture;
510 dst->mb_var_sum = src->mb_var_sum;
511 dst->mc_mb_var_sum = src->mc_mb_var_sum;
512 dst->b_frame_score = src->b_frame_score;
513 dst->needs_realloc = src->needs_realloc;
514 dst->reference = src->reference;
515 dst->shared = src->shared;
519 ff_mpeg_unref_picture(s, dst);
523 static int init_duplicate_context(MpegEncContext *s)
525 int y_size = s->b8_stride * (2 * s->mb_height + 1);
526 int c_size = s->mb_stride * (s->mb_height + 1);
527 int yc_size = y_size + 2 * c_size;
535 s->obmc_scratchpad = NULL;
538 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
539 ME_MAP_SIZE * sizeof(uint32_t), fail)
540 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
541 ME_MAP_SIZE * sizeof(uint32_t), fail)
542 if (s->avctx->noise_reduction) {
543 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
544 2 * 64 * sizeof(int), fail)
547 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
548 s->block = s->blocks[0];
550 for (i = 0; i < 12; i++) {
551 s->pblocks[i] = &s->block[i];
554 if (s->out_format == FMT_H263) {
556 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
557 yc_size * sizeof(int16_t) * 16, fail);
558 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
559 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
560 s->ac_val[2] = s->ac_val[1] + c_size;
565 return -1; // free() through ff_MPV_common_end()
568 static void free_duplicate_context(MpegEncContext *s)
573 av_freep(&s->edge_emu_buffer);
574 av_freep(&s->me.scratchpad);
578 s->obmc_scratchpad = NULL;
580 av_freep(&s->dct_error_sum);
581 av_freep(&s->me.map);
582 av_freep(&s->me.score_map);
583 av_freep(&s->blocks);
584 av_freep(&s->ac_val_base);
588 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
590 #define COPY(a) bak->a = src->a
591 COPY(edge_emu_buffer);
596 COPY(obmc_scratchpad);
603 COPY(me.map_generation);
615 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
619 // FIXME copy only needed parts
621 backup_duplicate_context(&bak, dst);
622 memcpy(dst, src, sizeof(MpegEncContext));
623 backup_duplicate_context(dst, &bak);
624 for (i = 0; i < 12; i++) {
625 dst->pblocks[i] = &dst->block[i];
627 if (!dst->edge_emu_buffer &&
628 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
629 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
630 "scratch buffers.\n");
633 // STOP_TIMER("update_duplicate_context")
634 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
638 int ff_mpeg_update_thread_context(AVCodecContext *dst,
639 const AVCodecContext *src)
642 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
644 if (dst == src || !s1->context_initialized)
647 // FIXME can parameters change on I-frames?
648 // in that case dst may need a reinit
649 if (!s->context_initialized) {
650 memcpy(s, s1, sizeof(MpegEncContext));
653 s->bitstream_buffer = NULL;
654 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
656 ff_MPV_common_init(s);
659 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
661 s->context_reinit = 0;
662 s->height = s1->height;
663 s->width = s1->width;
664 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
668 s->avctx->coded_height = s1->avctx->coded_height;
669 s->avctx->coded_width = s1->avctx->coded_width;
670 s->avctx->width = s1->avctx->width;
671 s->avctx->height = s1->avctx->height;
673 s->coded_picture_number = s1->coded_picture_number;
674 s->picture_number = s1->picture_number;
675 s->input_picture_number = s1->input_picture_number;
677 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
678 ff_mpeg_unref_picture(s, &s->picture[i]);
679 if (s1->picture[i].f.data[0] &&
680 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
684 #define UPDATE_PICTURE(pic)\
686 ff_mpeg_unref_picture(s, &s->pic);\
687 if (s1->pic.f.data[0])\
688 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
690 ret = update_picture_tables(&s->pic, &s1->pic);\
695 UPDATE_PICTURE(current_picture);
696 UPDATE_PICTURE(last_picture);
697 UPDATE_PICTURE(next_picture);
699 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
700 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
701 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
703 // Error/bug resilience
704 s->next_p_frame_damaged = s1->next_p_frame_damaged;
705 s->workaround_bugs = s1->workaround_bugs;
708 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
709 (char *) &s1->shape - (char *) &s1->time_increment_bits);
712 s->max_b_frames = s1->max_b_frames;
713 s->low_delay = s1->low_delay;
714 s->droppable = s1->droppable;
716 // DivX handling (doesn't work)
717 s->divx_packed = s1->divx_packed;
719 if (s1->bitstream_buffer) {
720 if (s1->bitstream_buffer_size +
721 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
722 av_fast_malloc(&s->bitstream_buffer,
723 &s->allocated_bitstream_buffer_size,
724 s1->allocated_bitstream_buffer_size);
725 s->bitstream_buffer_size = s1->bitstream_buffer_size;
726 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
727 s1->bitstream_buffer_size);
728 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
729 FF_INPUT_BUFFER_PADDING_SIZE);
732 // linesize dependend scratch buffer allocation
733 if (!s->edge_emu_buffer)
735 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
736 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
737 "scratch buffers.\n");
738 return AVERROR(ENOMEM);
741 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
742 "be allocated due to unknown size.\n");
746 // MPEG2/interlacing info
747 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
748 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
750 if (!s1->first_field) {
751 s->last_pict_type = s1->pict_type;
752 if (s1->current_picture_ptr)
753 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
755 if (s1->pict_type != AV_PICTURE_TYPE_B) {
756 s->last_non_b_pict_type = s1->pict_type;
764 * Set the given MpegEncContext to common defaults
765 * (same for encoding and decoding).
766 * The changed fields will not depend upon the
767 * prior state of the MpegEncContext.
769 void ff_MPV_common_defaults(MpegEncContext *s)
771 s->y_dc_scale_table =
772 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
773 s->chroma_qscale_table = ff_default_chroma_qscale_table;
774 s->progressive_frame = 1;
775 s->progressive_sequence = 1;
776 s->picture_structure = PICT_FRAME;
778 s->coded_picture_number = 0;
779 s->picture_number = 0;
780 s->input_picture_number = 0;
782 s->picture_in_gop_number = 0;
787 s->slice_context_count = 1;
791 * Set the given MpegEncContext to defaults for decoding.
792 * the changed fields will not depend upon
793 * the prior state of the MpegEncContext.
795 void ff_MPV_decode_defaults(MpegEncContext *s)
797 ff_MPV_common_defaults(s);
800 static int init_er(MpegEncContext *s)
802 ERContext *er = &s->er;
803 int mb_array_size = s->mb_height * s->mb_stride;
806 er->avctx = s->avctx;
809 er->mb_index2xy = s->mb_index2xy;
810 er->mb_num = s->mb_num;
811 er->mb_width = s->mb_width;
812 er->mb_height = s->mb_height;
813 er->mb_stride = s->mb_stride;
814 er->b8_stride = s->b8_stride;
816 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
817 er->error_status_table = av_mallocz(mb_array_size);
818 if (!er->er_temp_buffer || !er->error_status_table)
821 er->mbskip_table = s->mbskip_table;
822 er->mbintra_table = s->mbintra_table;
824 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
825 er->dc_val[i] = s->dc_val[i];
827 er->decode_mb = mpeg_er_decode_mb;
832 av_freep(&er->er_temp_buffer);
833 av_freep(&er->error_status_table);
834 return AVERROR(ENOMEM);
838 * Initialize and allocates MpegEncContext fields dependent on the resolution.
840 static int init_context_frame(MpegEncContext *s)
842 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
844 s->mb_width = (s->width + 15) / 16;
845 s->mb_stride = s->mb_width + 1;
846 s->b8_stride = s->mb_width * 2 + 1;
847 s->b4_stride = s->mb_width * 4 + 1;
848 mb_array_size = s->mb_height * s->mb_stride;
849 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
851 /* set default edge pos, will be overriden
852 * in decode_header if needed */
853 s->h_edge_pos = s->mb_width * 16;
854 s->v_edge_pos = s->mb_height * 16;
856 s->mb_num = s->mb_width * s->mb_height;
861 s->block_wrap[3] = s->b8_stride;
863 s->block_wrap[5] = s->mb_stride;
865 y_size = s->b8_stride * (2 * s->mb_height + 1);
866 c_size = s->mb_stride * (s->mb_height + 1);
867 yc_size = y_size + 2 * c_size;
869 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
870 fail); // error ressilience code looks cleaner with this
871 for (y = 0; y < s->mb_height; y++)
872 for (x = 0; x < s->mb_width; x++)
873 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
875 s->mb_index2xy[s->mb_height * s->mb_width] =
876 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
879 /* Allocate MV tables */
880 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
881 mv_table_size * 2 * sizeof(int16_t), fail);
882 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
883 mv_table_size * 2 * sizeof(int16_t), fail);
884 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
885 mv_table_size * 2 * sizeof(int16_t), fail);
886 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
887 mv_table_size * 2 * sizeof(int16_t), fail);
888 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
889 mv_table_size * 2 * sizeof(int16_t), fail);
890 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
891 mv_table_size * 2 * sizeof(int16_t), fail);
892 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
893 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
894 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
895 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
897 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
899 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
901 /* Allocate MB type table */
902 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
903 sizeof(uint16_t), fail); // needed for encoding
905 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
908 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
909 mb_array_size * sizeof(float), fail);
910 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
911 mb_array_size * sizeof(float), fail);
915 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
916 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
917 /* interlaced direct mode decoding tables */
918 for (i = 0; i < 2; i++) {
920 for (j = 0; j < 2; j++) {
921 for (k = 0; k < 2; k++) {
922 FF_ALLOCZ_OR_GOTO(s->avctx,
923 s->b_field_mv_table_base[i][j][k],
924 mv_table_size * 2 * sizeof(int16_t),
926 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
929 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
930 mb_array_size * 2 * sizeof(uint8_t), fail);
931 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
932 mv_table_size * 2 * sizeof(int16_t), fail);
933 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
936 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
937 mb_array_size * 2 * sizeof(uint8_t), fail);
940 if (s->out_format == FMT_H263) {
942 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
943 s->coded_block = s->coded_block_base + s->b8_stride + 1;
945 /* cbp, ac_pred, pred_dir */
946 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
947 mb_array_size * sizeof(uint8_t), fail);
948 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
949 mb_array_size * sizeof(uint8_t), fail);
952 if (s->h263_pred || s->h263_plus || !s->encoding) {
954 // MN: we need these for error resilience of intra-frames
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
956 yc_size * sizeof(int16_t), fail);
957 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
958 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
959 s->dc_val[2] = s->dc_val[1] + c_size;
960 for (i = 0; i < yc_size; i++)
961 s->dc_val_base[i] = 1024;
964 /* which mb is a intra block */
965 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
966 memset(s->mbintra_table, 1, mb_array_size);
968 /* init macroblock skip table */
969 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
970 // Note the + 1 is for a quicker mpeg4 slice_end detection
974 return AVERROR(ENOMEM);
978 * init common structure for both encoder and decoder.
979 * this assumes that some variables like width/height are already set
981 av_cold int ff_MPV_common_init(MpegEncContext *s)
984 int nb_slices = (HAVE_THREADS &&
985 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
986 s->avctx->thread_count : 1;
988 if (s->encoding && s->avctx->slices)
989 nb_slices = s->avctx->slices;
991 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
992 s->mb_height = (s->height + 31) / 32 * 2;
994 s->mb_height = (s->height + 15) / 16;
996 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
997 av_log(s->avctx, AV_LOG_ERROR,
998 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1002 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1005 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1007 max_slices = MAX_THREADS;
1008 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1009 " reducing to %d\n", nb_slices, max_slices);
1010 nb_slices = max_slices;
1013 if ((s->width || s->height) &&
1014 av_image_check_size(s->width, s->height, 0, s->avctx))
1017 ff_dct_common_init(s);
1019 s->flags = s->avctx->flags;
1020 s->flags2 = s->avctx->flags2;
1022 if (s->width && s->height) {
1023 /* set chroma shifts */
1024 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1026 &s->chroma_y_shift);
1028 /* convert fourcc to upper case */
1029 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1031 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1033 s->avctx->coded_frame = &s->current_picture.f;
1036 if (s->msmpeg4_version) {
1037 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1038 2 * 2 * (MAX_LEVEL + 1) *
1039 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1041 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1043 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1044 64 * 32 * sizeof(int), fail);
1045 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1046 64 * 32 * sizeof(int), fail);
1047 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1048 64 * 32 * 2 * sizeof(uint16_t), fail);
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1050 64 * 32 * 2 * sizeof(uint16_t), fail);
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1052 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1054 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1056 if (s->avctx->noise_reduction) {
1057 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1058 2 * 64 * sizeof(uint16_t), fail);
1063 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1064 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1065 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1066 avcodec_get_frame_defaults(&s->picture[i].f);
1068 memset(&s->next_picture, 0, sizeof(s->next_picture));
1069 memset(&s->last_picture, 0, sizeof(s->last_picture));
1070 memset(&s->current_picture, 0, sizeof(s->current_picture));
1071 avcodec_get_frame_defaults(&s->next_picture.f);
1072 avcodec_get_frame_defaults(&s->last_picture.f);
1073 avcodec_get_frame_defaults(&s->current_picture.f);
1075 if (s->width && s->height) {
1076 if (init_context_frame(s))
1079 s->parse_context.state = -1;
1082 s->context_initialized = 1;
1083 s->thread_context[0] = s;
1085 if (s->width && s->height) {
1086 if (nb_slices > 1) {
1087 for (i = 1; i < nb_slices; i++) {
1088 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1089 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1092 for (i = 0; i < nb_slices; i++) {
1093 if (init_duplicate_context(s->thread_context[i]) < 0)
1095 s->thread_context[i]->start_mb_y =
1096 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1097 s->thread_context[i]->end_mb_y =
1098 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1101 if (init_duplicate_context(s) < 0)
1104 s->end_mb_y = s->mb_height;
1106 s->slice_context_count = nb_slices;
1111 ff_MPV_common_end(s);
1116 * Frees and resets MpegEncContext fields depending on the resolution.
1117 * Is used during resolution changes to avoid a full reinitialization of the
1120 static int free_context_frame(MpegEncContext *s)
1124 av_freep(&s->mb_type);
1125 av_freep(&s->p_mv_table_base);
1126 av_freep(&s->b_forw_mv_table_base);
1127 av_freep(&s->b_back_mv_table_base);
1128 av_freep(&s->b_bidir_forw_mv_table_base);
1129 av_freep(&s->b_bidir_back_mv_table_base);
1130 av_freep(&s->b_direct_mv_table_base);
1131 s->p_mv_table = NULL;
1132 s->b_forw_mv_table = NULL;
1133 s->b_back_mv_table = NULL;
1134 s->b_bidir_forw_mv_table = NULL;
1135 s->b_bidir_back_mv_table = NULL;
1136 s->b_direct_mv_table = NULL;
1137 for (i = 0; i < 2; i++) {
1138 for (j = 0; j < 2; j++) {
1139 for (k = 0; k < 2; k++) {
1140 av_freep(&s->b_field_mv_table_base[i][j][k]);
1141 s->b_field_mv_table[i][j][k] = NULL;
1143 av_freep(&s->b_field_select_table[i][j]);
1144 av_freep(&s->p_field_mv_table_base[i][j]);
1145 s->p_field_mv_table[i][j] = NULL;
1147 av_freep(&s->p_field_select_table[i]);
1150 av_freep(&s->dc_val_base);
1151 av_freep(&s->coded_block_base);
1152 av_freep(&s->mbintra_table);
1153 av_freep(&s->cbp_table);
1154 av_freep(&s->pred_dir_table);
1156 av_freep(&s->mbskip_table);
1158 av_freep(&s->er.error_status_table);
1159 av_freep(&s->er.er_temp_buffer);
1160 av_freep(&s->mb_index2xy);
1161 av_freep(&s->lambda_table);
1162 av_freep(&s->cplx_tab);
1163 av_freep(&s->bits_tab);
1165 s->linesize = s->uvlinesize = 0;
1170 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1174 if (s->slice_context_count > 1) {
1175 for (i = 0; i < s->slice_context_count; i++) {
1176 free_duplicate_context(s->thread_context[i]);
1178 for (i = 1; i < s->slice_context_count; i++) {
1179 av_freep(&s->thread_context[i]);
1182 free_duplicate_context(s);
1184 if ((err = free_context_frame(s)) < 0)
1188 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1189 s->picture[i].needs_realloc = 1;
1192 s->last_picture_ptr =
1193 s->next_picture_ptr =
1194 s->current_picture_ptr = NULL;
1197 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1198 s->mb_height = (s->height + 31) / 32 * 2;
1200 s->mb_height = (s->height + 15) / 16;
1202 if ((s->width || s->height) &&
1203 av_image_check_size(s->width, s->height, 0, s->avctx))
1204 return AVERROR_INVALIDDATA;
1206 if ((err = init_context_frame(s)))
1209 s->thread_context[0] = s;
1211 if (s->width && s->height) {
1212 int nb_slices = s->slice_context_count;
1213 if (nb_slices > 1) {
1214 for (i = 1; i < nb_slices; i++) {
1215 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1216 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1219 for (i = 0; i < nb_slices; i++) {
1220 if (init_duplicate_context(s->thread_context[i]) < 0)
1222 s->thread_context[i]->start_mb_y =
1223 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1224 s->thread_context[i]->end_mb_y =
1225 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1228 if (init_duplicate_context(s) < 0)
1231 s->end_mb_y = s->mb_height;
1233 s->slice_context_count = nb_slices;
1238 ff_MPV_common_end(s);
1242 /* init common structure for both encoder and decoder */
1243 void ff_MPV_common_end(MpegEncContext *s)
1247 if (s->slice_context_count > 1) {
1248 for (i = 0; i < s->slice_context_count; i++) {
1249 free_duplicate_context(s->thread_context[i]);
1251 for (i = 1; i < s->slice_context_count; i++) {
1252 av_freep(&s->thread_context[i]);
1254 s->slice_context_count = 1;
1255 } else free_duplicate_context(s);
1257 av_freep(&s->parse_context.buffer);
1258 s->parse_context.buffer_size = 0;
1260 av_freep(&s->bitstream_buffer);
1261 s->allocated_bitstream_buffer_size = 0;
1263 av_freep(&s->avctx->stats_out);
1264 av_freep(&s->ac_stats);
1266 av_freep(&s->q_intra_matrix);
1267 av_freep(&s->q_inter_matrix);
1268 av_freep(&s->q_intra_matrix16);
1269 av_freep(&s->q_inter_matrix16);
1270 av_freep(&s->input_picture);
1271 av_freep(&s->reordered_input_picture);
1272 av_freep(&s->dct_offset);
1275 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1276 free_picture_tables(&s->picture[i]);
1277 ff_mpeg_unref_picture(s, &s->picture[i]);
1280 av_freep(&s->picture);
1281 free_picture_tables(&s->last_picture);
1282 ff_mpeg_unref_picture(s, &s->last_picture);
1283 free_picture_tables(&s->current_picture);
1284 ff_mpeg_unref_picture(s, &s->current_picture);
1285 free_picture_tables(&s->next_picture);
1286 ff_mpeg_unref_picture(s, &s->next_picture);
1287 free_picture_tables(&s->new_picture);
1288 ff_mpeg_unref_picture(s, &s->new_picture);
1290 free_context_frame(s);
1292 s->context_initialized = 0;
1293 s->last_picture_ptr =
1294 s->next_picture_ptr =
1295 s->current_picture_ptr = NULL;
1296 s->linesize = s->uvlinesize = 0;
1299 av_cold void ff_init_rl(RLTable *rl,
1300 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1302 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1303 uint8_t index_run[MAX_RUN + 1];
1304 int last, run, level, start, end, i;
1306 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1307 if (static_store && rl->max_level[0])
1310 /* compute max_level[], max_run[] and index_run[] */
1311 for (last = 0; last < 2; last++) {
1320 memset(max_level, 0, MAX_RUN + 1);
1321 memset(max_run, 0, MAX_LEVEL + 1);
1322 memset(index_run, rl->n, MAX_RUN + 1);
1323 for (i = start; i < end; i++) {
1324 run = rl->table_run[i];
1325 level = rl->table_level[i];
1326 if (index_run[run] == rl->n)
1328 if (level > max_level[run])
1329 max_level[run] = level;
1330 if (run > max_run[level])
1331 max_run[level] = run;
1334 rl->max_level[last] = static_store[last];
1336 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1337 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1339 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1341 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1342 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1344 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1346 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1347 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1351 av_cold void ff_init_vlc_rl(RLTable *rl)
1355 for (q = 0; q < 32; q++) {
1357 int qadd = (q - 1) | 1;
1363 for (i = 0; i < rl->vlc.table_size; i++) {
1364 int code = rl->vlc.table[i][0];
1365 int len = rl->vlc.table[i][1];
1368 if (len == 0) { // illegal code
1371 } else if (len < 0) { // more bits needed
1375 if (code == rl->n) { // esc
1379 run = rl->table_run[code] + 1;
1380 level = rl->table_level[code] * qmul + qadd;
1381 if (code >= rl->last) run += 192;
1384 rl->rl_vlc[q][i].len = len;
1385 rl->rl_vlc[q][i].level = level;
1386 rl->rl_vlc[q][i].run = run;
1391 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1395 /* release non reference frames */
1396 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1397 if (!s->picture[i].reference &&
1398 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1399 ff_mpeg_unref_picture(s, &s->picture[i]);
1404 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1406 if (pic->f.data[0] == NULL)
1408 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1413 static int find_unused_picture(MpegEncContext *s, int shared)
1418 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1419 if (s->picture[i].f.data[0] == NULL)
1423 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1424 if (pic_is_unused(s, &s->picture[i]))
1429 return AVERROR_INVALIDDATA;
1432 int ff_find_unused_picture(MpegEncContext *s, int shared)
1434 int ret = find_unused_picture(s, shared);
1436 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1437 if (s->picture[ret].needs_realloc) {
1438 s->picture[ret].needs_realloc = 0;
1439 free_picture_tables(&s->picture[ret]);
1440 ff_mpeg_unref_picture(s, &s->picture[ret]);
1441 avcodec_get_frame_defaults(&s->picture[ret].f);
1447 static void update_noise_reduction(MpegEncContext *s)
1451 for (intra = 0; intra < 2; intra++) {
1452 if (s->dct_count[intra] > (1 << 16)) {
1453 for (i = 0; i < 64; i++) {
1454 s->dct_error_sum[intra][i] >>= 1;
1456 s->dct_count[intra] >>= 1;
1459 for (i = 0; i < 64; i++) {
1460 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1461 s->dct_count[intra] +
1462 s->dct_error_sum[intra][i] / 2) /
1463 (s->dct_error_sum[intra][i] + 1);
1469 * generic function for encode/decode called after coding/decoding
1470 * the header and before a frame is coded/decoded.
1472 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1478 /* mark & release old frames */
1479 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1480 s->last_picture_ptr != s->next_picture_ptr &&
1481 s->last_picture_ptr->f.data[0]) {
1482 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1485 /* release forgotten pictures */
1486 /* if (mpeg124/h263) */
1488 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1489 if (&s->picture[i] != s->last_picture_ptr &&
1490 &s->picture[i] != s->next_picture_ptr &&
1491 s->picture[i].reference && !s->picture[i].needs_realloc) {
1492 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1493 av_log(avctx, AV_LOG_ERROR,
1494 "releasing zombie picture\n");
1495 ff_mpeg_unref_picture(s, &s->picture[i]);
1500 ff_mpeg_unref_picture(s, &s->current_picture);
1503 ff_release_unused_pictures(s, 1);
1505 if (s->current_picture_ptr &&
1506 s->current_picture_ptr->f.data[0] == NULL) {
1507 // we already have a unused image
1508 // (maybe it was set before reading the header)
1509 pic = s->current_picture_ptr;
1511 i = ff_find_unused_picture(s, 0);
1513 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1516 pic = &s->picture[i];
1520 if (!s->droppable) {
1521 if (s->pict_type != AV_PICTURE_TYPE_B)
1525 pic->f.coded_picture_number = s->coded_picture_number++;
1527 if (ff_alloc_picture(s, pic, 0) < 0)
1530 s->current_picture_ptr = pic;
1531 // FIXME use only the vars from current_pic
1532 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1533 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1534 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1535 if (s->picture_structure != PICT_FRAME)
1536 s->current_picture_ptr->f.top_field_first =
1537 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1539 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1540 !s->progressive_sequence;
1541 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1544 s->current_picture_ptr->f.pict_type = s->pict_type;
1545 // if (s->flags && CODEC_FLAG_QSCALE)
1546 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1547 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1549 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1550 s->current_picture_ptr)) < 0)
1553 if (s->pict_type != AV_PICTURE_TYPE_B) {
1554 s->last_picture_ptr = s->next_picture_ptr;
1556 s->next_picture_ptr = s->current_picture_ptr;
1558 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1559 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1560 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1561 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1562 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1563 s->pict_type, s->droppable);
1565 if ((s->last_picture_ptr == NULL ||
1566 s->last_picture_ptr->f.data[0] == NULL) &&
1567 (s->pict_type != AV_PICTURE_TYPE_I ||
1568 s->picture_structure != PICT_FRAME)) {
1569 int h_chroma_shift, v_chroma_shift;
1570 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1571 &h_chroma_shift, &v_chroma_shift);
1572 if (s->pict_type != AV_PICTURE_TYPE_I)
1573 av_log(avctx, AV_LOG_ERROR,
1574 "warning: first frame is no keyframe\n");
1575 else if (s->picture_structure != PICT_FRAME)
1576 av_log(avctx, AV_LOG_INFO,
1577 "allocate dummy last picture for field based first keyframe\n");
1579 /* Allocate a dummy frame */
1580 i = ff_find_unused_picture(s, 0);
1582 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1585 s->last_picture_ptr = &s->picture[i];
1586 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1587 s->last_picture_ptr = NULL;
1591 memset(s->last_picture_ptr->f.data[0], 0,
1592 avctx->height * s->last_picture_ptr->f.linesize[0]);
1593 memset(s->last_picture_ptr->f.data[1], 0x80,
1594 (avctx->height >> v_chroma_shift) *
1595 s->last_picture_ptr->f.linesize[1]);
1596 memset(s->last_picture_ptr->f.data[2], 0x80,
1597 (avctx->height >> v_chroma_shift) *
1598 s->last_picture_ptr->f.linesize[2]);
1600 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1601 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1603 if ((s->next_picture_ptr == NULL ||
1604 s->next_picture_ptr->f.data[0] == NULL) &&
1605 s->pict_type == AV_PICTURE_TYPE_B) {
1606 /* Allocate a dummy frame */
1607 i = ff_find_unused_picture(s, 0);
1609 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1612 s->next_picture_ptr = &s->picture[i];
1613 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1614 s->next_picture_ptr = NULL;
1617 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1618 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1621 if (s->last_picture_ptr) {
1622 ff_mpeg_unref_picture(s, &s->last_picture);
1623 if (s->last_picture_ptr->f.data[0] &&
1624 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1625 s->last_picture_ptr)) < 0)
1628 if (s->next_picture_ptr) {
1629 ff_mpeg_unref_picture(s, &s->next_picture);
1630 if (s->next_picture_ptr->f.data[0] &&
1631 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1632 s->next_picture_ptr)) < 0)
1636 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1637 s->last_picture_ptr->f.data[0]));
1639 if (s->picture_structure!= PICT_FRAME) {
1641 for (i = 0; i < 4; i++) {
1642 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1643 s->current_picture.f.data[i] +=
1644 s->current_picture.f.linesize[i];
1646 s->current_picture.f.linesize[i] *= 2;
1647 s->last_picture.f.linesize[i] *= 2;
1648 s->next_picture.f.linesize[i] *= 2;
1652 s->err_recognition = avctx->err_recognition;
1654 /* set dequantizer, we can't do it during init as
1655 * it might change for mpeg4 and we can't do it in the header
1656 * decode as init is not called for mpeg4 there yet */
1657 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1658 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1659 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1660 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1661 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1662 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1664 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1665 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1668 if (s->dct_error_sum) {
1669 assert(s->avctx->noise_reduction && s->encoding);
1670 update_noise_reduction(s);
1673 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1674 return ff_xvmc_field_start(s, avctx);
1679 /* generic function for encode/decode called after a
1680 * frame has been coded/decoded. */
1681 void ff_MPV_frame_end(MpegEncContext *s)
1684 /* redraw edges for the frame if decoding didn't complete */
1685 // just to make sure that all data is rendered.
1686 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1687 ff_xvmc_field_end(s);
1688 } else if ((s->er.error_count || s->encoding) &&
1689 !s->avctx->hwaccel &&
1690 s->unrestricted_mv &&
1691 s->current_picture.reference &&
1693 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1694 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1695 int hshift = desc->log2_chroma_w;
1696 int vshift = desc->log2_chroma_h;
1697 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1698 s->h_edge_pos, s->v_edge_pos,
1699 EDGE_WIDTH, EDGE_WIDTH,
1700 EDGE_TOP | EDGE_BOTTOM);
1701 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1702 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1703 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1704 EDGE_TOP | EDGE_BOTTOM);
1705 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1706 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1707 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1708 EDGE_TOP | EDGE_BOTTOM);
1713 s->last_pict_type = s->pict_type;
1714 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1715 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1716 s->last_non_b_pict_type = s->pict_type;
1719 /* copy back current_picture variables */
1720 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1721 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1722 s->picture[i] = s->current_picture;
1726 assert(i < MAX_PICTURE_COUNT);
1730 /* release non-reference frames */
1731 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1732 if (!s->picture[i].reference)
1733 ff_mpeg_unref_picture(s, &s->picture[i]);
1736 // clear copies, to avoid confusion
1738 memset(&s->last_picture, 0, sizeof(Picture));
1739 memset(&s->next_picture, 0, sizeof(Picture));
1740 memset(&s->current_picture, 0, sizeof(Picture));
1742 s->avctx->coded_frame = &s->current_picture_ptr->f;
1744 if (s->current_picture.reference)
1745 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1749 * Print debugging info for the given picture.
1751 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1754 if (s->avctx->hwaccel || !p || !p->mb_type)
1758 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1761 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1762 switch (pict->pict_type) {
1763 case AV_PICTURE_TYPE_I:
1764 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1766 case AV_PICTURE_TYPE_P:
1767 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1769 case AV_PICTURE_TYPE_B:
1770 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1772 case AV_PICTURE_TYPE_S:
1773 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1775 case AV_PICTURE_TYPE_SI:
1776 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1778 case AV_PICTURE_TYPE_SP:
1779 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1782 for (y = 0; y < s->mb_height; y++) {
1783 for (x = 0; x < s->mb_width; x++) {
1784 if (s->avctx->debug & FF_DEBUG_SKIP) {
1785 int count = s->mbskip_table[x + y * s->mb_stride];
1788 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1790 if (s->avctx->debug & FF_DEBUG_QP) {
1791 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1792 p->qscale_table[x + y * s->mb_stride]);
1794 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1795 int mb_type = p->mb_type[x + y * s->mb_stride];
1796 // Type & MV direction
1797 if (IS_PCM(mb_type))
1798 av_log(s->avctx, AV_LOG_DEBUG, "P");
1799 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1800 av_log(s->avctx, AV_LOG_DEBUG, "A");
1801 else if (IS_INTRA4x4(mb_type))
1802 av_log(s->avctx, AV_LOG_DEBUG, "i");
1803 else if (IS_INTRA16x16(mb_type))
1804 av_log(s->avctx, AV_LOG_DEBUG, "I");
1805 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1806 av_log(s->avctx, AV_LOG_DEBUG, "d");
1807 else if (IS_DIRECT(mb_type))
1808 av_log(s->avctx, AV_LOG_DEBUG, "D");
1809 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1810 av_log(s->avctx, AV_LOG_DEBUG, "g");
1811 else if (IS_GMC(mb_type))
1812 av_log(s->avctx, AV_LOG_DEBUG, "G");
1813 else if (IS_SKIP(mb_type))
1814 av_log(s->avctx, AV_LOG_DEBUG, "S");
1815 else if (!USES_LIST(mb_type, 1))
1816 av_log(s->avctx, AV_LOG_DEBUG, ">");
1817 else if (!USES_LIST(mb_type, 0))
1818 av_log(s->avctx, AV_LOG_DEBUG, "<");
1820 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1821 av_log(s->avctx, AV_LOG_DEBUG, "X");
1825 if (IS_8X8(mb_type))
1826 av_log(s->avctx, AV_LOG_DEBUG, "+");
1827 else if (IS_16X8(mb_type))
1828 av_log(s->avctx, AV_LOG_DEBUG, "-");
1829 else if (IS_8X16(mb_type))
1830 av_log(s->avctx, AV_LOG_DEBUG, "|");
1831 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1832 av_log(s->avctx, AV_LOG_DEBUG, " ");
1834 av_log(s->avctx, AV_LOG_DEBUG, "?");
1837 if (IS_INTERLACED(mb_type))
1838 av_log(s->avctx, AV_LOG_DEBUG, "=");
1840 av_log(s->avctx, AV_LOG_DEBUG, " ");
1843 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1849 * find the lowest MB row referenced in the MVs
1851 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1853 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1854 int my, off, i, mvs;
1856 if (s->picture_structure != PICT_FRAME || s->mcsel)
1859 switch (s->mv_type) {
1873 for (i = 0; i < mvs; i++) {
1874 my = s->mv[dir][i][1]<<qpel_shift;
1875 my_max = FFMAX(my_max, my);
1876 my_min = FFMIN(my_min, my);
1879 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1881 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1883 return s->mb_height-1;
1886 /* put block[] to dest[] */
1887 static inline void put_dct(MpegEncContext *s,
1888 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1890 s->dct_unquantize_intra(s, block, i, qscale);
1891 s->dsp.idct_put (dest, line_size, block);
1894 /* add block[] to dest[] */
1895 static inline void add_dct(MpegEncContext *s,
1896 int16_t *block, int i, uint8_t *dest, int line_size)
1898 if (s->block_last_index[i] >= 0) {
1899 s->dsp.idct_add (dest, line_size, block);
1903 static inline void add_dequant_dct(MpegEncContext *s,
1904 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1906 if (s->block_last_index[i] >= 0) {
1907 s->dct_unquantize_inter(s, block, i, qscale);
1909 s->dsp.idct_add (dest, line_size, block);
1914 * Clean dc, ac, coded_block for the current non-intra MB.
1916 void ff_clean_intra_table_entries(MpegEncContext *s)
1918 int wrap = s->b8_stride;
1919 int xy = s->block_index[0];
1922 s->dc_val[0][xy + 1 ] =
1923 s->dc_val[0][xy + wrap] =
1924 s->dc_val[0][xy + 1 + wrap] = 1024;
1926 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1927 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1928 if (s->msmpeg4_version>=3) {
1929 s->coded_block[xy ] =
1930 s->coded_block[xy + 1 ] =
1931 s->coded_block[xy + wrap] =
1932 s->coded_block[xy + 1 + wrap] = 0;
1935 wrap = s->mb_stride;
1936 xy = s->mb_x + s->mb_y * wrap;
1938 s->dc_val[2][xy] = 1024;
1940 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1941 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1943 s->mbintra_table[xy]= 0;
1946 /* generic function called after a macroblock has been parsed by the
1947 decoder or after it has been encoded by the encoder.
1949 Important variables used:
1950 s->mb_intra : true if intra macroblock
1951 s->mv_dir : motion vector direction
1952 s->mv_type : motion vector type
1953 s->mv : motion vector
1954 s->interlaced_dct : true if interlaced dct used (mpeg2)
1956 static av_always_inline
1957 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1960 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1961 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1962 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1966 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1967 /* print DCT coefficients */
1969 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1971 for(j=0; j<64; j++){
1972 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
1974 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1978 s->current_picture.qscale_table[mb_xy] = s->qscale;
1980 /* update DC predictors for P macroblocks */
1982 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1983 if(s->mbintra_table[mb_xy])
1984 ff_clean_intra_table_entries(s);
1988 s->last_dc[2] = 128 << s->intra_dc_precision;
1991 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1992 s->mbintra_table[mb_xy]=1;
1994 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1995 uint8_t *dest_y, *dest_cb, *dest_cr;
1996 int dct_linesize, dct_offset;
1997 op_pixels_func (*op_pix)[4];
1998 qpel_mc_func (*op_qpix)[16];
1999 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2000 const int uvlinesize = s->current_picture.f.linesize[1];
2001 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2002 const int block_size = 8;
2004 /* avoid copy if macroblock skipped in last frame too */
2005 /* skip only during decoding as we might trash the buffers during encoding a bit */
2007 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2009 if (s->mb_skipped) {
2011 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2013 } else if(!s->current_picture.reference) {
2016 *mbskip_ptr = 0; /* not skipped */
2020 dct_linesize = linesize << s->interlaced_dct;
2021 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2025 dest_cb= s->dest[1];
2026 dest_cr= s->dest[2];
2028 dest_y = s->b_scratchpad;
2029 dest_cb= s->b_scratchpad+16*linesize;
2030 dest_cr= s->b_scratchpad+32*linesize;
2034 /* motion handling */
2035 /* decoding or more than one mb_type (MC was already done otherwise) */
2038 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2039 if (s->mv_dir & MV_DIR_FORWARD) {
2040 ff_thread_await_progress(&s->last_picture_ptr->tf,
2041 ff_MPV_lowest_referenced_row(s, 0),
2044 if (s->mv_dir & MV_DIR_BACKWARD) {
2045 ff_thread_await_progress(&s->next_picture_ptr->tf,
2046 ff_MPV_lowest_referenced_row(s, 1),
2051 op_qpix= s->me.qpel_put;
2052 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2053 op_pix = s->hdsp.put_pixels_tab;
2055 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2057 if (s->mv_dir & MV_DIR_FORWARD) {
2058 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2059 op_pix = s->hdsp.avg_pixels_tab;
2060 op_qpix= s->me.qpel_avg;
2062 if (s->mv_dir & MV_DIR_BACKWARD) {
2063 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2067 /* skip dequant / idct if we are really late ;) */
2068 if(s->avctx->skip_idct){
2069 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2070 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2071 || s->avctx->skip_idct >= AVDISCARD_ALL)
2075 /* add dct residue */
2076 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2077 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2078 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2079 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2080 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2081 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2083 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2084 if (s->chroma_y_shift){
2085 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2086 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2090 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2091 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2092 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2093 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2096 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2097 add_dct(s, block[0], 0, dest_y , dct_linesize);
2098 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2099 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2100 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2102 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2103 if(s->chroma_y_shift){//Chroma420
2104 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2105 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2108 dct_linesize = uvlinesize << s->interlaced_dct;
2109 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2111 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2112 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2113 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2114 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2115 if(!s->chroma_x_shift){//Chroma444
2116 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2117 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2118 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2119 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2124 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2125 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2128 /* dct only in intra block */
2129 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2130 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2131 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2132 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2133 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2135 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2136 if(s->chroma_y_shift){
2137 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2138 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2142 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2143 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2144 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2145 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2149 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2150 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2151 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2152 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2154 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2155 if(s->chroma_y_shift){
2156 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2157 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2160 dct_linesize = uvlinesize << s->interlaced_dct;
2161 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2163 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2164 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2165 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2166 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2167 if(!s->chroma_x_shift){//Chroma444
2168 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2169 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2170 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2171 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2179 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2180 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2181 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2186 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2188 if(s->out_format == FMT_MPEG1) {
2189 MPV_decode_mb_internal(s, block, 1);
2192 MPV_decode_mb_internal(s, block, 0);
2196 * @param h is the normal height, this will be reduced automatically if needed for the last row
2198 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2199 Picture *last, int y, int h, int picture_structure,
2200 int first_field, int draw_edges, int low_delay,
2201 int v_edge_pos, int h_edge_pos)
2203 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2204 int hshift = desc->log2_chroma_w;
2205 int vshift = desc->log2_chroma_h;
2206 const int field_pic = picture_structure != PICT_FRAME;
2212 if (!avctx->hwaccel &&
2215 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2216 int *linesize = cur->f.linesize;
2217 int sides = 0, edge_h;
2218 if (y==0) sides |= EDGE_TOP;
2219 if (y + h >= v_edge_pos)
2220 sides |= EDGE_BOTTOM;
2222 edge_h= FFMIN(h, v_edge_pos - y);
2224 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2225 linesize[0], h_edge_pos, edge_h,
2226 EDGE_WIDTH, EDGE_WIDTH, sides);
2227 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2228 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2229 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2230 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2231 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2232 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2235 h = FFMIN(h, avctx->height - y);
2237 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2239 if (avctx->draw_horiz_band) {
2241 int offset[AV_NUM_DATA_POINTERS];
2244 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2245 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2252 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2253 picture_structure == PICT_FRAME &&
2254 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2255 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2258 offset[0]= y * src->linesize[0];
2260 offset[2]= (y >> vshift) * src->linesize[1];
2261 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2267 avctx->draw_horiz_band(avctx, src, offset,
2268 y, picture_structure, h);
2272 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2274 int draw_edges = s->unrestricted_mv && !s->intra_only;
2275 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2276 &s->last_picture, y, h, s->picture_structure,
2277 s->first_field, draw_edges, s->low_delay,
2278 s->v_edge_pos, s->h_edge_pos);
2281 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2282 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2283 const int uvlinesize = s->current_picture.f.linesize[1];
2284 const int mb_size= 4;
2286 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2287 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2288 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2289 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2290 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2291 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2292 //block_index is not used by mpeg2, so it is not affected by chroma_format
2294 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2295 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2296 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2298 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2300 if(s->picture_structure==PICT_FRAME){
2301 s->dest[0] += s->mb_y * linesize << mb_size;
2302 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2303 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2305 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2306 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2307 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2308 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2314 * Permute an 8x8 block.
2315 * @param block the block which will be permuted according to the given permutation vector
2316 * @param permutation the permutation vector
2317 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2318 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2319 * (inverse) permutated to scantable order!
2321 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2327 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2329 for(i=0; i<=last; i++){
2330 const int j= scantable[i];
2335 for(i=0; i<=last; i++){
2336 const int j= scantable[i];
2337 const int perm_j= permutation[j];
2338 block[perm_j]= temp[j];
2342 void ff_mpeg_flush(AVCodecContext *avctx){
2344 MpegEncContext *s = avctx->priv_data;
2346 if(s==NULL || s->picture==NULL)
2349 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2350 ff_mpeg_unref_picture(s, &s->picture[i]);
2351 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2353 ff_mpeg_unref_picture(s, &s->current_picture);
2354 ff_mpeg_unref_picture(s, &s->last_picture);
2355 ff_mpeg_unref_picture(s, &s->next_picture);
2357 s->mb_x= s->mb_y= 0;
2359 s->parse_context.state= -1;
2360 s->parse_context.frame_start_found= 0;
2361 s->parse_context.overread= 0;
2362 s->parse_context.overread_index= 0;
2363 s->parse_context.index= 0;
2364 s->parse_context.last_index= 0;
2365 s->bitstream_buffer_size=0;
2369 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2370 int16_t *block, int n, int qscale)
2372 int i, level, nCoeffs;
2373 const uint16_t *quant_matrix;
2375 nCoeffs= s->block_last_index[n];
2378 block[0] = block[0] * s->y_dc_scale;
2380 block[0] = block[0] * s->c_dc_scale;
2381 /* XXX: only mpeg1 */
2382 quant_matrix = s->intra_matrix;
2383 for(i=1;i<=nCoeffs;i++) {
2384 int j= s->intra_scantable.permutated[i];
2389 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2390 level = (level - 1) | 1;
2393 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2394 level = (level - 1) | 1;
2401 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2402 int16_t *block, int n, int qscale)
2404 int i, level, nCoeffs;
2405 const uint16_t *quant_matrix;
2407 nCoeffs= s->block_last_index[n];
2409 quant_matrix = s->inter_matrix;
2410 for(i=0; i<=nCoeffs; i++) {
2411 int j= s->intra_scantable.permutated[i];
2416 level = (((level << 1) + 1) * qscale *
2417 ((int) (quant_matrix[j]))) >> 4;
2418 level = (level - 1) | 1;
2421 level = (((level << 1) + 1) * qscale *
2422 ((int) (quant_matrix[j]))) >> 4;
2423 level = (level - 1) | 1;
2430 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2431 int16_t *block, int n, int qscale)
2433 int i, level, nCoeffs;
2434 const uint16_t *quant_matrix;
2436 if(s->alternate_scan) nCoeffs= 63;
2437 else nCoeffs= s->block_last_index[n];
2440 block[0] = block[0] * s->y_dc_scale;
2442 block[0] = block[0] * s->c_dc_scale;
2443 quant_matrix = s->intra_matrix;
2444 for(i=1;i<=nCoeffs;i++) {
2445 int j= s->intra_scantable.permutated[i];
2450 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2453 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2460 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2461 int16_t *block, int n, int qscale)
2463 int i, level, nCoeffs;
2464 const uint16_t *quant_matrix;
2467 if(s->alternate_scan) nCoeffs= 63;
2468 else nCoeffs= s->block_last_index[n];
2471 block[0] = block[0] * s->y_dc_scale;
2473 block[0] = block[0] * s->c_dc_scale;
2474 quant_matrix = s->intra_matrix;
2475 for(i=1;i<=nCoeffs;i++) {
2476 int j= s->intra_scantable.permutated[i];
2481 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2484 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2493 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2494 int16_t *block, int n, int qscale)
2496 int i, level, nCoeffs;
2497 const uint16_t *quant_matrix;
2500 if(s->alternate_scan) nCoeffs= 63;
2501 else nCoeffs= s->block_last_index[n];
2503 quant_matrix = s->inter_matrix;
2504 for(i=0; i<=nCoeffs; i++) {
2505 int j= s->intra_scantable.permutated[i];
2510 level = (((level << 1) + 1) * qscale *
2511 ((int) (quant_matrix[j]))) >> 4;
2514 level = (((level << 1) + 1) * qscale *
2515 ((int) (quant_matrix[j]))) >> 4;
2524 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2525 int16_t *block, int n, int qscale)
2527 int i, level, qmul, qadd;
2530 assert(s->block_last_index[n]>=0);
2536 block[0] = block[0] * s->y_dc_scale;
2538 block[0] = block[0] * s->c_dc_scale;
2539 qadd = (qscale - 1) | 1;
2546 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2548 for(i=1; i<=nCoeffs; i++) {
2552 level = level * qmul - qadd;
2554 level = level * qmul + qadd;
2561 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2562 int16_t *block, int n, int qscale)
2564 int i, level, qmul, qadd;
2567 assert(s->block_last_index[n]>=0);
2569 qadd = (qscale - 1) | 1;
2572 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2574 for(i=0; i<=nCoeffs; i++) {
2578 level = level * qmul - qadd;
2580 level = level * qmul + qadd;
2588 * set qscale and update qscale dependent variables.
2590 void ff_set_qscale(MpegEncContext * s, int qscale)
2594 else if (qscale > 31)
2598 s->chroma_qscale= s->chroma_qscale_table[qscale];
2600 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2601 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2604 void ff_MPV_report_decode_progress(MpegEncContext *s)
2606 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2607 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2610 #if CONFIG_ERROR_RESILIENCE
2611 void ff_mpeg_er_frame_start(MpegEncContext *s)
2613 ERContext *er = &s->er;
2615 er->cur_pic = s->current_picture_ptr;
2616 er->last_pic = s->last_picture_ptr;
2617 er->next_pic = s->next_picture_ptr;
2619 er->pp_time = s->pp_time;
2620 er->pb_time = s->pb_time;
2621 er->quarter_sample = s->quarter_sample;
2622 er->partitioned_frame = s->partitioned_frame;
2624 ff_er_frame_start(er);
2626 #endif /* CONFIG_ERROR_RESILIENCE */