2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
130 MpegEncContext *s = opaque;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
138 memcpy(s->mv, mv, sizeof(*mv));
140 ff_init_block_index(s);
141 ff_update_block_index(s);
143 s->dsp.clear_blocks(s->block[0]);
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
150 ff_MPV_decode_mb(s, s->block);
153 /* init common dct for both encoder and decoder */
154 av_cold int ff_dct_common_init(MpegEncContext *s)
156 ff_dsputil_init(&s->dsp, s->avctx);
157 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
158 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
160 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
161 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
162 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
163 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
165 if (s->flags & CODEC_FLAG_BITEXACT)
166 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
167 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
170 ff_MPV_common_init_arm(s);
172 ff_MPV_common_init_bfin(s);
174 ff_MPV_common_init_ppc(s);
176 ff_MPV_common_init_x86(s);
178 /* load & permutate scantables
179 * note: only wmv uses different ones
181 if (s->alternate_scan) {
182 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
183 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
185 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
186 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
194 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
196 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
198 // edge emu needs blocksize + filter length - 1
199 // (= 17x17 for halfpel / 21x21 for h264)
200 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
201 // at uvlinesize. It supports only YUV420 so 24x24 is enough
202 // linesize * interlaced * MBsize
203 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
206 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
208 s->me.temp = s->me.scratchpad;
209 s->rd_scratchpad = s->me.scratchpad;
210 s->b_scratchpad = s->me.scratchpad;
211 s->obmc_scratchpad = s->me.scratchpad + 16;
215 av_freep(&s->edge_emu_buffer);
216 return AVERROR(ENOMEM);
220 * Allocate a frame buffer
222 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
227 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
228 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
229 s->codec_id != AV_CODEC_ID_MSS2)
230 r = ff_thread_get_buffer(s->avctx, &pic->tf,
231 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
233 pic->f.width = s->avctx->width;
234 pic->f.height = s->avctx->height;
235 pic->f.format = s->avctx->pix_fmt;
236 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
239 if (r < 0 || !pic->f.buf[0]) {
240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
245 if (s->avctx->hwaccel) {
246 assert(!pic->hwaccel_picture_private);
247 if (s->avctx->hwaccel->priv_data_size) {
248 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
249 if (!pic->hwaccel_priv_buf) {
250 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
253 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
257 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
258 s->uvlinesize != pic->f.linesize[1])) {
259 av_log(s->avctx, AV_LOG_ERROR,
260 "get_buffer() failed (stride changed)\n");
261 ff_mpeg_unref_picture(s, pic);
265 if (pic->f.linesize[1] != pic->f.linesize[2]) {
266 av_log(s->avctx, AV_LOG_ERROR,
267 "get_buffer() failed (uv stride mismatch)\n");
268 ff_mpeg_unref_picture(s, pic);
272 if (!s->edge_emu_buffer &&
273 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
274 av_log(s->avctx, AV_LOG_ERROR,
275 "get_buffer() failed to allocate context scratch buffers.\n");
276 ff_mpeg_unref_picture(s, pic);
283 static void free_picture_tables(Picture *pic)
287 av_buffer_unref(&pic->mb_var_buf);
288 av_buffer_unref(&pic->mc_mb_var_buf);
289 av_buffer_unref(&pic->mb_mean_buf);
290 av_buffer_unref(&pic->mbskip_table_buf);
291 av_buffer_unref(&pic->qscale_table_buf);
292 av_buffer_unref(&pic->mb_type_buf);
294 for (i = 0; i < 2; i++) {
295 av_buffer_unref(&pic->motion_val_buf[i]);
296 av_buffer_unref(&pic->ref_index_buf[i]);
300 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
302 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
303 const int mb_array_size = s->mb_stride * s->mb_height;
304 const int b8_array_size = s->b8_stride * s->mb_height * 2;
308 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
309 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
310 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
312 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
313 return AVERROR(ENOMEM);
316 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
317 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
318 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
319 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
320 return AVERROR(ENOMEM);
323 if (s->out_format == FMT_H263 || s->encoding) {
324 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
325 int ref_index_size = 4 * mb_array_size;
327 for (i = 0; mv_size && i < 2; i++) {
328 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
329 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
330 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
331 return AVERROR(ENOMEM);
338 static int make_tables_writable(Picture *pic)
341 #define MAKE_WRITABLE(table) \
344 (ret = av_buffer_make_writable(&pic->table)) < 0)\
348 MAKE_WRITABLE(mb_var_buf);
349 MAKE_WRITABLE(mc_mb_var_buf);
350 MAKE_WRITABLE(mb_mean_buf);
351 MAKE_WRITABLE(mbskip_table_buf);
352 MAKE_WRITABLE(qscale_table_buf);
353 MAKE_WRITABLE(mb_type_buf);
355 for (i = 0; i < 2; i++) {
356 MAKE_WRITABLE(motion_val_buf[i]);
357 MAKE_WRITABLE(ref_index_buf[i]);
364 * Allocate a Picture.
365 * The pixels are allocated/set by calling get_buffer() if shared = 0
367 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
372 assert(pic->f.data[0]);
375 assert(!pic->f.buf[0]);
377 if (alloc_frame_buffer(s, pic) < 0)
380 s->linesize = pic->f.linesize[0];
381 s->uvlinesize = pic->f.linesize[1];
384 if (!pic->qscale_table_buf)
385 ret = alloc_picture_tables(s, pic);
387 ret = make_tables_writable(pic);
392 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
393 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
394 pic->mb_mean = pic->mb_mean_buf->data;
397 pic->mbskip_table = pic->mbskip_table_buf->data;
398 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
399 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
401 if (pic->motion_val_buf[0]) {
402 for (i = 0; i < 2; i++) {
403 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
404 pic->ref_index[i] = pic->ref_index_buf[i]->data;
410 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
411 ff_mpeg_unref_picture(s, pic);
412 free_picture_tables(pic);
413 return AVERROR(ENOMEM);
417 * Deallocate a picture.
419 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
421 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
424 /* WM Image / Screen codecs allocate internal buffers with different
425 * dimensions / colorspaces; ignore user-defined callbacks for these. */
426 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
427 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
428 s->codec_id != AV_CODEC_ID_MSS2)
429 ff_thread_release_buffer(s->avctx, &pic->tf);
431 av_frame_unref(&pic->f);
433 av_buffer_unref(&pic->hwaccel_priv_buf);
435 if (pic->needs_realloc)
436 free_picture_tables(pic);
438 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
441 static int update_picture_tables(Picture *dst, Picture *src)
445 #define UPDATE_TABLE(table)\
448 (!dst->table || dst->table->buffer != src->table->buffer)) {\
449 av_buffer_unref(&dst->table);\
450 dst->table = av_buffer_ref(src->table);\
452 free_picture_tables(dst);\
453 return AVERROR(ENOMEM);\
458 UPDATE_TABLE(mb_var_buf);
459 UPDATE_TABLE(mc_mb_var_buf);
460 UPDATE_TABLE(mb_mean_buf);
461 UPDATE_TABLE(mbskip_table_buf);
462 UPDATE_TABLE(qscale_table_buf);
463 UPDATE_TABLE(mb_type_buf);
464 for (i = 0; i < 2; i++) {
465 UPDATE_TABLE(motion_val_buf[i]);
466 UPDATE_TABLE(ref_index_buf[i]);
469 dst->mb_var = src->mb_var;
470 dst->mc_mb_var = src->mc_mb_var;
471 dst->mb_mean = src->mb_mean;
472 dst->mbskip_table = src->mbskip_table;
473 dst->qscale_table = src->qscale_table;
474 dst->mb_type = src->mb_type;
475 for (i = 0; i < 2; i++) {
476 dst->motion_val[i] = src->motion_val[i];
477 dst->ref_index[i] = src->ref_index[i];
483 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
487 av_assert0(!dst->f.buf[0]);
488 av_assert0(src->f.buf[0]);
492 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
496 ret = update_picture_tables(dst, src);
500 if (src->hwaccel_picture_private) {
501 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
502 if (!dst->hwaccel_priv_buf)
504 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
507 dst->field_picture = src->field_picture;
508 dst->mb_var_sum = src->mb_var_sum;
509 dst->mc_mb_var_sum = src->mc_mb_var_sum;
510 dst->b_frame_score = src->b_frame_score;
511 dst->needs_realloc = src->needs_realloc;
512 dst->reference = src->reference;
513 dst->shared = src->shared;
517 ff_mpeg_unref_picture(s, dst);
521 static void exchange_uv(MpegEncContext *s)
526 s->pblocks[4] = s->pblocks[5];
530 static int init_duplicate_context(MpegEncContext *s)
532 int y_size = s->b8_stride * (2 * s->mb_height + 1);
533 int c_size = s->mb_stride * (s->mb_height + 1);
534 int yc_size = y_size + 2 * c_size;
542 s->obmc_scratchpad = NULL;
545 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
546 ME_MAP_SIZE * sizeof(uint32_t), fail)
547 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
548 ME_MAP_SIZE * sizeof(uint32_t), fail)
549 if (s->avctx->noise_reduction) {
550 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
551 2 * 64 * sizeof(int), fail)
554 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
555 s->block = s->blocks[0];
557 for (i = 0; i < 12; i++) {
558 s->pblocks[i] = &s->block[i];
560 if (s->avctx->codec_tag == AV_RL32("VCR2"))
563 if (s->out_format == FMT_H263) {
565 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
566 yc_size * sizeof(int16_t) * 16, fail);
567 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
568 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
569 s->ac_val[2] = s->ac_val[1] + c_size;
574 return -1; // free() through ff_MPV_common_end()
577 static void free_duplicate_context(MpegEncContext *s)
582 av_freep(&s->edge_emu_buffer);
583 av_freep(&s->me.scratchpad);
587 s->obmc_scratchpad = NULL;
589 av_freep(&s->dct_error_sum);
590 av_freep(&s->me.map);
591 av_freep(&s->me.score_map);
592 av_freep(&s->blocks);
593 av_freep(&s->ac_val_base);
597 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
599 #define COPY(a) bak->a = src->a
600 COPY(edge_emu_buffer);
605 COPY(obmc_scratchpad);
612 COPY(me.map_generation);
624 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
628 // FIXME copy only needed parts
630 backup_duplicate_context(&bak, dst);
631 memcpy(dst, src, sizeof(MpegEncContext));
632 backup_duplicate_context(dst, &bak);
633 for (i = 0; i < 12; i++) {
634 dst->pblocks[i] = &dst->block[i];
636 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
638 if (!dst->edge_emu_buffer &&
639 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
640 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
641 "scratch buffers.\n");
644 // STOP_TIMER("update_duplicate_context")
645 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
649 int ff_mpeg_update_thread_context(AVCodecContext *dst,
650 const AVCodecContext *src)
653 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
655 if (dst == src || !s1->context_initialized)
658 // FIXME can parameters change on I-frames?
659 // in that case dst may need a reinit
660 if (!s->context_initialized) {
661 memcpy(s, s1, sizeof(MpegEncContext));
664 s->bitstream_buffer = NULL;
665 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
667 ff_MPV_common_init(s);
670 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
672 s->context_reinit = 0;
673 s->height = s1->height;
674 s->width = s1->width;
675 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
679 s->avctx->coded_height = s1->avctx->coded_height;
680 s->avctx->coded_width = s1->avctx->coded_width;
681 s->avctx->width = s1->avctx->width;
682 s->avctx->height = s1->avctx->height;
684 s->coded_picture_number = s1->coded_picture_number;
685 s->picture_number = s1->picture_number;
687 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
688 ff_mpeg_unref_picture(s, &s->picture[i]);
689 if (s1->picture[i].f.buf[0] &&
690 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
694 #define UPDATE_PICTURE(pic)\
696 ff_mpeg_unref_picture(s, &s->pic);\
697 if (s1->pic.f.buf[0])\
698 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
700 ret = update_picture_tables(&s->pic, &s1->pic);\
705 UPDATE_PICTURE(current_picture);
706 UPDATE_PICTURE(last_picture);
707 UPDATE_PICTURE(next_picture);
709 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
710 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
711 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
713 // Error/bug resilience
714 s->next_p_frame_damaged = s1->next_p_frame_damaged;
715 s->workaround_bugs = s1->workaround_bugs;
718 memcpy(&s->last_time_base, &s1->last_time_base,
719 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
720 (char *) &s1->last_time_base);
723 s->max_b_frames = s1->max_b_frames;
724 s->low_delay = s1->low_delay;
725 s->droppable = s1->droppable;
727 // DivX handling (doesn't work)
728 s->divx_packed = s1->divx_packed;
730 if (s1->bitstream_buffer) {
731 if (s1->bitstream_buffer_size +
732 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
733 av_fast_malloc(&s->bitstream_buffer,
734 &s->allocated_bitstream_buffer_size,
735 s1->allocated_bitstream_buffer_size);
736 s->bitstream_buffer_size = s1->bitstream_buffer_size;
737 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
738 s1->bitstream_buffer_size);
739 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
740 FF_INPUT_BUFFER_PADDING_SIZE);
743 // linesize dependend scratch buffer allocation
744 if (!s->edge_emu_buffer)
746 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
747 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
748 "scratch buffers.\n");
749 return AVERROR(ENOMEM);
752 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
753 "be allocated due to unknown size.\n");
757 // MPEG2/interlacing info
758 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
759 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
761 if (!s1->first_field) {
762 s->last_pict_type = s1->pict_type;
763 if (s1->current_picture_ptr)
764 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
766 if (s1->pict_type != AV_PICTURE_TYPE_B) {
767 s->last_non_b_pict_type = s1->pict_type;
775 * Set the given MpegEncContext to common defaults
776 * (same for encoding and decoding).
777 * The changed fields will not depend upon the
778 * prior state of the MpegEncContext.
780 void ff_MPV_common_defaults(MpegEncContext *s)
782 s->y_dc_scale_table =
783 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
784 s->chroma_qscale_table = ff_default_chroma_qscale_table;
785 s->progressive_frame = 1;
786 s->progressive_sequence = 1;
787 s->picture_structure = PICT_FRAME;
789 s->coded_picture_number = 0;
790 s->picture_number = 0;
795 s->slice_context_count = 1;
799 * Set the given MpegEncContext to defaults for decoding.
800 * the changed fields will not depend upon
801 * the prior state of the MpegEncContext.
803 void ff_MPV_decode_defaults(MpegEncContext *s)
805 ff_MPV_common_defaults(s);
808 static int init_er(MpegEncContext *s)
810 ERContext *er = &s->er;
811 int mb_array_size = s->mb_height * s->mb_stride;
814 er->avctx = s->avctx;
817 er->mb_index2xy = s->mb_index2xy;
818 er->mb_num = s->mb_num;
819 er->mb_width = s->mb_width;
820 er->mb_height = s->mb_height;
821 er->mb_stride = s->mb_stride;
822 er->b8_stride = s->b8_stride;
824 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
825 er->error_status_table = av_mallocz(mb_array_size);
826 if (!er->er_temp_buffer || !er->error_status_table)
829 er->mbskip_table = s->mbskip_table;
830 er->mbintra_table = s->mbintra_table;
832 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
833 er->dc_val[i] = s->dc_val[i];
835 er->decode_mb = mpeg_er_decode_mb;
840 av_freep(&er->er_temp_buffer);
841 av_freep(&er->error_status_table);
842 return AVERROR(ENOMEM);
846 * Initialize and allocates MpegEncContext fields dependent on the resolution.
848 static int init_context_frame(MpegEncContext *s)
850 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
852 s->mb_width = (s->width + 15) / 16;
853 s->mb_stride = s->mb_width + 1;
854 s->b8_stride = s->mb_width * 2 + 1;
855 s->b4_stride = s->mb_width * 4 + 1;
856 mb_array_size = s->mb_height * s->mb_stride;
857 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
859 /* set default edge pos, will be overriden
860 * in decode_header if needed */
861 s->h_edge_pos = s->mb_width * 16;
862 s->v_edge_pos = s->mb_height * 16;
864 s->mb_num = s->mb_width * s->mb_height;
869 s->block_wrap[3] = s->b8_stride;
871 s->block_wrap[5] = s->mb_stride;
873 y_size = s->b8_stride * (2 * s->mb_height + 1);
874 c_size = s->mb_stride * (s->mb_height + 1);
875 yc_size = y_size + 2 * c_size;
877 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
878 fail); // error ressilience code looks cleaner with this
879 for (y = 0; y < s->mb_height; y++)
880 for (x = 0; x < s->mb_width; x++)
881 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
883 s->mb_index2xy[s->mb_height * s->mb_width] =
884 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
887 /* Allocate MV tables */
888 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
889 mv_table_size * 2 * sizeof(int16_t), fail);
890 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
891 mv_table_size * 2 * sizeof(int16_t), fail);
892 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
893 mv_table_size * 2 * sizeof(int16_t), fail);
894 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
895 mv_table_size * 2 * sizeof(int16_t), fail);
896 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
897 mv_table_size * 2 * sizeof(int16_t), fail);
898 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
899 mv_table_size * 2 * sizeof(int16_t), fail);
900 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
901 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
902 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
903 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
905 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
907 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
909 /* Allocate MB type table */
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
911 sizeof(uint16_t), fail); // needed for encoding
913 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
916 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
917 mb_array_size * sizeof(float), fail);
918 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
919 mb_array_size * sizeof(float), fail);
923 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
924 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
925 /* interlaced direct mode decoding tables */
926 for (i = 0; i < 2; i++) {
928 for (j = 0; j < 2; j++) {
929 for (k = 0; k < 2; k++) {
930 FF_ALLOCZ_OR_GOTO(s->avctx,
931 s->b_field_mv_table_base[i][j][k],
932 mv_table_size * 2 * sizeof(int16_t),
934 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
938 mb_array_size * 2 * sizeof(uint8_t), fail);
939 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
940 mv_table_size * 2 * sizeof(int16_t), fail);
941 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
944 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
945 mb_array_size * 2 * sizeof(uint8_t), fail);
948 if (s->out_format == FMT_H263) {
950 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
951 s->coded_block = s->coded_block_base + s->b8_stride + 1;
953 /* cbp, ac_pred, pred_dir */
954 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
955 mb_array_size * sizeof(uint8_t), fail);
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
957 mb_array_size * sizeof(uint8_t), fail);
960 if (s->h263_pred || s->h263_plus || !s->encoding) {
962 // MN: we need these for error resilience of intra-frames
963 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
964 yc_size * sizeof(int16_t), fail);
965 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
966 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
967 s->dc_val[2] = s->dc_val[1] + c_size;
968 for (i = 0; i < yc_size; i++)
969 s->dc_val_base[i] = 1024;
972 /* which mb is a intra block */
973 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
974 memset(s->mbintra_table, 1, mb_array_size);
976 /* init macroblock skip table */
977 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
978 // Note the + 1 is for a quicker mpeg4 slice_end detection
982 return AVERROR(ENOMEM);
986 * init common structure for both encoder and decoder.
987 * this assumes that some variables like width/height are already set
989 av_cold int ff_MPV_common_init(MpegEncContext *s)
992 int nb_slices = (HAVE_THREADS &&
993 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
994 s->avctx->thread_count : 1;
996 if (s->encoding && s->avctx->slices)
997 nb_slices = s->avctx->slices;
999 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1000 s->mb_height = (s->height + 31) / 32 * 2;
1002 s->mb_height = (s->height + 15) / 16;
1004 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1005 av_log(s->avctx, AV_LOG_ERROR,
1006 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1010 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1013 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1015 max_slices = MAX_THREADS;
1016 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1017 " reducing to %d\n", nb_slices, max_slices);
1018 nb_slices = max_slices;
1021 if ((s->width || s->height) &&
1022 av_image_check_size(s->width, s->height, 0, s->avctx))
1025 ff_dct_common_init(s);
1027 s->flags = s->avctx->flags;
1028 s->flags2 = s->avctx->flags2;
1030 /* set chroma shifts */
1031 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1033 &s->chroma_y_shift);
1035 /* convert fourcc to upper case */
1036 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1038 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1040 if (s->width && s->height) {
1041 s->avctx->coded_frame = &s->current_picture.f;
1044 if (s->msmpeg4_version) {
1045 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1046 2 * 2 * (MAX_LEVEL + 1) *
1047 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1052 64 * 32 * sizeof(int), fail);
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1054 64 * 32 * sizeof(int), fail);
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1056 64 * 32 * 2 * sizeof(uint16_t), fail);
1057 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1058 64 * 32 * 2 * sizeof(uint16_t), fail);
1059 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1060 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1061 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1062 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1064 if (s->avctx->noise_reduction) {
1065 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1066 2 * 64 * sizeof(uint16_t), fail);
1071 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1072 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1073 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1074 avcodec_get_frame_defaults(&s->picture[i].f);
1076 memset(&s->next_picture, 0, sizeof(s->next_picture));
1077 memset(&s->last_picture, 0, sizeof(s->last_picture));
1078 memset(&s->current_picture, 0, sizeof(s->current_picture));
1079 avcodec_get_frame_defaults(&s->next_picture.f);
1080 avcodec_get_frame_defaults(&s->last_picture.f);
1081 avcodec_get_frame_defaults(&s->current_picture.f);
1083 if (s->width && s->height) {
1084 if (init_context_frame(s))
1087 s->parse_context.state = -1;
1090 s->context_initialized = 1;
1091 s->thread_context[0] = s;
1093 if (s->width && s->height) {
1094 if (nb_slices > 1) {
1095 for (i = 1; i < nb_slices; i++) {
1096 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1097 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1100 for (i = 0; i < nb_slices; i++) {
1101 if (init_duplicate_context(s->thread_context[i]) < 0)
1103 s->thread_context[i]->start_mb_y =
1104 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1105 s->thread_context[i]->end_mb_y =
1106 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1109 if (init_duplicate_context(s) < 0)
1112 s->end_mb_y = s->mb_height;
1114 s->slice_context_count = nb_slices;
1119 ff_MPV_common_end(s);
1124 * Frees and resets MpegEncContext fields depending on the resolution.
1125 * Is used during resolution changes to avoid a full reinitialization of the
1128 static int free_context_frame(MpegEncContext *s)
1132 av_freep(&s->mb_type);
1133 av_freep(&s->p_mv_table_base);
1134 av_freep(&s->b_forw_mv_table_base);
1135 av_freep(&s->b_back_mv_table_base);
1136 av_freep(&s->b_bidir_forw_mv_table_base);
1137 av_freep(&s->b_bidir_back_mv_table_base);
1138 av_freep(&s->b_direct_mv_table_base);
1139 s->p_mv_table = NULL;
1140 s->b_forw_mv_table = NULL;
1141 s->b_back_mv_table = NULL;
1142 s->b_bidir_forw_mv_table = NULL;
1143 s->b_bidir_back_mv_table = NULL;
1144 s->b_direct_mv_table = NULL;
1145 for (i = 0; i < 2; i++) {
1146 for (j = 0; j < 2; j++) {
1147 for (k = 0; k < 2; k++) {
1148 av_freep(&s->b_field_mv_table_base[i][j][k]);
1149 s->b_field_mv_table[i][j][k] = NULL;
1151 av_freep(&s->b_field_select_table[i][j]);
1152 av_freep(&s->p_field_mv_table_base[i][j]);
1153 s->p_field_mv_table[i][j] = NULL;
1155 av_freep(&s->p_field_select_table[i]);
1158 av_freep(&s->dc_val_base);
1159 av_freep(&s->coded_block_base);
1160 av_freep(&s->mbintra_table);
1161 av_freep(&s->cbp_table);
1162 av_freep(&s->pred_dir_table);
1164 av_freep(&s->mbskip_table);
1166 av_freep(&s->er.error_status_table);
1167 av_freep(&s->er.er_temp_buffer);
1168 av_freep(&s->mb_index2xy);
1169 av_freep(&s->lambda_table);
1170 av_freep(&s->cplx_tab);
1171 av_freep(&s->bits_tab);
1173 s->linesize = s->uvlinesize = 0;
1178 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1182 if (s->slice_context_count > 1) {
1183 for (i = 0; i < s->slice_context_count; i++) {
1184 free_duplicate_context(s->thread_context[i]);
1186 for (i = 1; i < s->slice_context_count; i++) {
1187 av_freep(&s->thread_context[i]);
1190 free_duplicate_context(s);
1192 if ((err = free_context_frame(s)) < 0)
1196 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1197 s->picture[i].needs_realloc = 1;
1200 s->last_picture_ptr =
1201 s->next_picture_ptr =
1202 s->current_picture_ptr = NULL;
1205 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1206 s->mb_height = (s->height + 31) / 32 * 2;
1208 s->mb_height = (s->height + 15) / 16;
1210 if ((s->width || s->height) &&
1211 av_image_check_size(s->width, s->height, 0, s->avctx))
1212 return AVERROR_INVALIDDATA;
1214 if ((err = init_context_frame(s)))
1217 s->thread_context[0] = s;
1219 if (s->width && s->height) {
1220 int nb_slices = s->slice_context_count;
1221 if (nb_slices > 1) {
1222 for (i = 1; i < nb_slices; i++) {
1223 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1224 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1227 for (i = 0; i < nb_slices; i++) {
1228 if (init_duplicate_context(s->thread_context[i]) < 0)
1230 s->thread_context[i]->start_mb_y =
1231 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1232 s->thread_context[i]->end_mb_y =
1233 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1236 if (init_duplicate_context(s) < 0)
1239 s->end_mb_y = s->mb_height;
1241 s->slice_context_count = nb_slices;
1246 ff_MPV_common_end(s);
1250 /* init common structure for both encoder and decoder */
1251 void ff_MPV_common_end(MpegEncContext *s)
1255 if (s->slice_context_count > 1) {
1256 for (i = 0; i < s->slice_context_count; i++) {
1257 free_duplicate_context(s->thread_context[i]);
1259 for (i = 1; i < s->slice_context_count; i++) {
1260 av_freep(&s->thread_context[i]);
1262 s->slice_context_count = 1;
1263 } else free_duplicate_context(s);
1265 av_freep(&s->parse_context.buffer);
1266 s->parse_context.buffer_size = 0;
1268 av_freep(&s->bitstream_buffer);
1269 s->allocated_bitstream_buffer_size = 0;
1271 av_freep(&s->avctx->stats_out);
1272 av_freep(&s->ac_stats);
1274 av_freep(&s->q_intra_matrix);
1275 av_freep(&s->q_inter_matrix);
1276 av_freep(&s->q_intra_matrix16);
1277 av_freep(&s->q_inter_matrix16);
1278 av_freep(&s->input_picture);
1279 av_freep(&s->reordered_input_picture);
1280 av_freep(&s->dct_offset);
1283 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1284 free_picture_tables(&s->picture[i]);
1285 ff_mpeg_unref_picture(s, &s->picture[i]);
1288 av_freep(&s->picture);
1289 free_picture_tables(&s->last_picture);
1290 ff_mpeg_unref_picture(s, &s->last_picture);
1291 free_picture_tables(&s->current_picture);
1292 ff_mpeg_unref_picture(s, &s->current_picture);
1293 free_picture_tables(&s->next_picture);
1294 ff_mpeg_unref_picture(s, &s->next_picture);
1295 free_picture_tables(&s->new_picture);
1296 ff_mpeg_unref_picture(s, &s->new_picture);
1298 free_context_frame(s);
1300 s->context_initialized = 0;
1301 s->last_picture_ptr =
1302 s->next_picture_ptr =
1303 s->current_picture_ptr = NULL;
1304 s->linesize = s->uvlinesize = 0;
1307 av_cold void ff_init_rl(RLTable *rl,
1308 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1310 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1311 uint8_t index_run[MAX_RUN + 1];
1312 int last, run, level, start, end, i;
1314 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1315 if (static_store && rl->max_level[0])
1318 /* compute max_level[], max_run[] and index_run[] */
1319 for (last = 0; last < 2; last++) {
1328 memset(max_level, 0, MAX_RUN + 1);
1329 memset(max_run, 0, MAX_LEVEL + 1);
1330 memset(index_run, rl->n, MAX_RUN + 1);
1331 for (i = start; i < end; i++) {
1332 run = rl->table_run[i];
1333 level = rl->table_level[i];
1334 if (index_run[run] == rl->n)
1336 if (level > max_level[run])
1337 max_level[run] = level;
1338 if (run > max_run[level])
1339 max_run[level] = run;
1342 rl->max_level[last] = static_store[last];
1344 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1345 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1347 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1349 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1350 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1352 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1354 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1355 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1359 av_cold void ff_init_vlc_rl(RLTable *rl)
1363 for (q = 0; q < 32; q++) {
1365 int qadd = (q - 1) | 1;
1371 for (i = 0; i < rl->vlc.table_size; i++) {
1372 int code = rl->vlc.table[i][0];
1373 int len = rl->vlc.table[i][1];
1376 if (len == 0) { // illegal code
1379 } else if (len < 0) { // more bits needed
1383 if (code == rl->n) { // esc
1387 run = rl->table_run[code] + 1;
1388 level = rl->table_level[code] * qmul + qadd;
1389 if (code >= rl->last) run += 192;
1392 rl->rl_vlc[q][i].len = len;
1393 rl->rl_vlc[q][i].level = level;
1394 rl->rl_vlc[q][i].run = run;
1399 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1403 /* release non reference frames */
1404 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1405 if (!s->picture[i].reference &&
1406 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1407 ff_mpeg_unref_picture(s, &s->picture[i]);
1412 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1414 if (pic->f.buf[0] == NULL)
1416 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1421 static int find_unused_picture(MpegEncContext *s, int shared)
1426 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1427 if (s->picture[i].f.buf[0] == NULL)
1431 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1432 if (pic_is_unused(s, &s->picture[i]))
1437 return AVERROR_INVALIDDATA;
1440 int ff_find_unused_picture(MpegEncContext *s, int shared)
1442 int ret = find_unused_picture(s, shared);
1444 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1445 if (s->picture[ret].needs_realloc) {
1446 s->picture[ret].needs_realloc = 0;
1447 free_picture_tables(&s->picture[ret]);
1448 ff_mpeg_unref_picture(s, &s->picture[ret]);
1449 avcodec_get_frame_defaults(&s->picture[ret].f);
1455 static void update_noise_reduction(MpegEncContext *s)
1459 for (intra = 0; intra < 2; intra++) {
1460 if (s->dct_count[intra] > (1 << 16)) {
1461 for (i = 0; i < 64; i++) {
1462 s->dct_error_sum[intra][i] >>= 1;
1464 s->dct_count[intra] >>= 1;
1467 for (i = 0; i < 64; i++) {
1468 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1469 s->dct_count[intra] +
1470 s->dct_error_sum[intra][i] / 2) /
1471 (s->dct_error_sum[intra][i] + 1);
1477 * generic function for encode/decode called after coding/decoding
1478 * the header and before a frame is coded/decoded.
1480 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1486 /* mark & release old frames */
1487 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1488 s->last_picture_ptr != s->next_picture_ptr &&
1489 s->last_picture_ptr->f.buf[0]) {
1490 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1493 /* release forgotten pictures */
1494 /* if (mpeg124/h263) */
1496 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1497 if (&s->picture[i] != s->last_picture_ptr &&
1498 &s->picture[i] != s->next_picture_ptr &&
1499 s->picture[i].reference && !s->picture[i].needs_realloc) {
1500 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1501 av_log(avctx, AV_LOG_ERROR,
1502 "releasing zombie picture\n");
1503 ff_mpeg_unref_picture(s, &s->picture[i]);
1508 ff_mpeg_unref_picture(s, &s->current_picture);
1511 ff_release_unused_pictures(s, 1);
1513 if (s->current_picture_ptr &&
1514 s->current_picture_ptr->f.buf[0] == NULL) {
1515 // we already have a unused image
1516 // (maybe it was set before reading the header)
1517 pic = s->current_picture_ptr;
1519 i = ff_find_unused_picture(s, 0);
1521 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1524 pic = &s->picture[i];
1528 if (!s->droppable) {
1529 if (s->pict_type != AV_PICTURE_TYPE_B)
1533 pic->f.coded_picture_number = s->coded_picture_number++;
1535 if (ff_alloc_picture(s, pic, 0) < 0)
1538 s->current_picture_ptr = pic;
1539 // FIXME use only the vars from current_pic
1540 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1541 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1542 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1543 if (s->picture_structure != PICT_FRAME)
1544 s->current_picture_ptr->f.top_field_first =
1545 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1547 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1548 !s->progressive_sequence;
1549 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1552 s->current_picture_ptr->f.pict_type = s->pict_type;
1553 // if (s->flags && CODEC_FLAG_QSCALE)
1554 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1555 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1557 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1558 s->current_picture_ptr)) < 0)
1561 if (s->pict_type != AV_PICTURE_TYPE_B) {
1562 s->last_picture_ptr = s->next_picture_ptr;
1564 s->next_picture_ptr = s->current_picture_ptr;
1566 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1567 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1568 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1569 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1570 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1571 s->pict_type, s->droppable);
1573 if ((s->last_picture_ptr == NULL ||
1574 s->last_picture_ptr->f.buf[0] == NULL) &&
1575 (s->pict_type != AV_PICTURE_TYPE_I ||
1576 s->picture_structure != PICT_FRAME)) {
1577 int h_chroma_shift, v_chroma_shift;
1578 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1579 &h_chroma_shift, &v_chroma_shift);
1580 if (s->pict_type != AV_PICTURE_TYPE_I)
1581 av_log(avctx, AV_LOG_ERROR,
1582 "warning: first frame is no keyframe\n");
1583 else if (s->picture_structure != PICT_FRAME)
1584 av_log(avctx, AV_LOG_INFO,
1585 "allocate dummy last picture for field based first keyframe\n");
1587 /* Allocate a dummy frame */
1588 i = ff_find_unused_picture(s, 0);
1590 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1593 s->last_picture_ptr = &s->picture[i];
1594 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1595 s->last_picture_ptr = NULL;
1599 memset(s->last_picture_ptr->f.data[0], 0,
1600 avctx->height * s->last_picture_ptr->f.linesize[0]);
1601 memset(s->last_picture_ptr->f.data[1], 0x80,
1602 (avctx->height >> v_chroma_shift) *
1603 s->last_picture_ptr->f.linesize[1]);
1604 memset(s->last_picture_ptr->f.data[2], 0x80,
1605 (avctx->height >> v_chroma_shift) *
1606 s->last_picture_ptr->f.linesize[2]);
1608 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1609 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1611 if ((s->next_picture_ptr == NULL ||
1612 s->next_picture_ptr->f.buf[0] == NULL) &&
1613 s->pict_type == AV_PICTURE_TYPE_B) {
1614 /* Allocate a dummy frame */
1615 i = ff_find_unused_picture(s, 0);
1617 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1620 s->next_picture_ptr = &s->picture[i];
1621 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1622 s->next_picture_ptr = NULL;
1625 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1626 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1629 if (s->last_picture_ptr) {
1630 ff_mpeg_unref_picture(s, &s->last_picture);
1631 if (s->last_picture_ptr->f.buf[0] &&
1632 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1633 s->last_picture_ptr)) < 0)
1636 if (s->next_picture_ptr) {
1637 ff_mpeg_unref_picture(s, &s->next_picture);
1638 if (s->next_picture_ptr->f.buf[0] &&
1639 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1640 s->next_picture_ptr)) < 0)
1644 if (s->pict_type != AV_PICTURE_TYPE_I &&
1645 !(s->last_picture_ptr && s->last_picture_ptr->f.buf[0])) {
1646 av_log(s, AV_LOG_ERROR,
1647 "Non-reference picture received and no reference available\n");
1648 return AVERROR_INVALIDDATA;
1651 if (s->picture_structure!= PICT_FRAME) {
1653 for (i = 0; i < 4; i++) {
1654 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1655 s->current_picture.f.data[i] +=
1656 s->current_picture.f.linesize[i];
1658 s->current_picture.f.linesize[i] *= 2;
1659 s->last_picture.f.linesize[i] *= 2;
1660 s->next_picture.f.linesize[i] *= 2;
1664 s->err_recognition = avctx->err_recognition;
1666 /* set dequantizer, we can't do it during init as
1667 * it might change for mpeg4 and we can't do it in the header
1668 * decode as init is not called for mpeg4 there yet */
1669 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1670 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1671 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1672 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1673 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1674 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1676 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1677 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1680 if (s->dct_error_sum) {
1681 assert(s->avctx->noise_reduction && s->encoding);
1682 update_noise_reduction(s);
1686 FF_DISABLE_DEPRECATION_WARNINGS
1687 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1688 return ff_xvmc_field_start(s, avctx);
1689 FF_ENABLE_DEPRECATION_WARNINGS
1690 #endif /* FF_API_XVMC */
1695 /* generic function for encode/decode called after a
1696 * frame has been coded/decoded. */
1697 void ff_MPV_frame_end(MpegEncContext *s)
1702 FF_DISABLE_DEPRECATION_WARNINGS
1703 /* redraw edges for the frame if decoding didn't complete */
1704 // just to make sure that all data is rendered.
1705 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1706 ff_xvmc_field_end(s);
1708 FF_ENABLE_DEPRECATION_WARNINGS
1709 #endif /* FF_API_XVMC */
1710 if ((s->er.error_count || s->encoding) &&
1711 !s->avctx->hwaccel &&
1712 s->unrestricted_mv &&
1713 s->current_picture.reference &&
1715 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1716 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1717 int hshift = desc->log2_chroma_w;
1718 int vshift = desc->log2_chroma_h;
1719 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1720 s->h_edge_pos, s->v_edge_pos,
1721 EDGE_WIDTH, EDGE_WIDTH,
1722 EDGE_TOP | EDGE_BOTTOM);
1723 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1724 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1725 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1726 EDGE_TOP | EDGE_BOTTOM);
1727 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1728 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1729 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1730 EDGE_TOP | EDGE_BOTTOM);
1735 s->last_pict_type = s->pict_type;
1736 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1737 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1738 s->last_non_b_pict_type = s->pict_type;
1741 /* copy back current_picture variables */
1742 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1743 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1744 s->picture[i] = s->current_picture;
1748 assert(i < MAX_PICTURE_COUNT);
1752 /* release non-reference frames */
1753 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1754 if (!s->picture[i].reference)
1755 ff_mpeg_unref_picture(s, &s->picture[i]);
1758 // clear copies, to avoid confusion
1760 memset(&s->last_picture, 0, sizeof(Picture));
1761 memset(&s->next_picture, 0, sizeof(Picture));
1762 memset(&s->current_picture, 0, sizeof(Picture));
1764 s->avctx->coded_frame = &s->current_picture_ptr->f;
1766 if (s->current_picture.reference)
1767 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1771 * Print debugging info for the given picture.
1773 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1776 if (s->avctx->hwaccel || !p || !p->mb_type)
1780 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1783 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1784 switch (pict->pict_type) {
1785 case AV_PICTURE_TYPE_I:
1786 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1788 case AV_PICTURE_TYPE_P:
1789 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1791 case AV_PICTURE_TYPE_B:
1792 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1794 case AV_PICTURE_TYPE_S:
1795 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1797 case AV_PICTURE_TYPE_SI:
1798 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1800 case AV_PICTURE_TYPE_SP:
1801 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1804 for (y = 0; y < s->mb_height; y++) {
1805 for (x = 0; x < s->mb_width; x++) {
1806 if (s->avctx->debug & FF_DEBUG_SKIP) {
1807 int count = s->mbskip_table[x + y * s->mb_stride];
1810 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1812 if (s->avctx->debug & FF_DEBUG_QP) {
1813 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1814 p->qscale_table[x + y * s->mb_stride]);
1816 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1817 int mb_type = p->mb_type[x + y * s->mb_stride];
1818 // Type & MV direction
1819 if (IS_PCM(mb_type))
1820 av_log(s->avctx, AV_LOG_DEBUG, "P");
1821 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1822 av_log(s->avctx, AV_LOG_DEBUG, "A");
1823 else if (IS_INTRA4x4(mb_type))
1824 av_log(s->avctx, AV_LOG_DEBUG, "i");
1825 else if (IS_INTRA16x16(mb_type))
1826 av_log(s->avctx, AV_LOG_DEBUG, "I");
1827 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1828 av_log(s->avctx, AV_LOG_DEBUG, "d");
1829 else if (IS_DIRECT(mb_type))
1830 av_log(s->avctx, AV_LOG_DEBUG, "D");
1831 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1832 av_log(s->avctx, AV_LOG_DEBUG, "g");
1833 else if (IS_GMC(mb_type))
1834 av_log(s->avctx, AV_LOG_DEBUG, "G");
1835 else if (IS_SKIP(mb_type))
1836 av_log(s->avctx, AV_LOG_DEBUG, "S");
1837 else if (!USES_LIST(mb_type, 1))
1838 av_log(s->avctx, AV_LOG_DEBUG, ">");
1839 else if (!USES_LIST(mb_type, 0))
1840 av_log(s->avctx, AV_LOG_DEBUG, "<");
1842 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1843 av_log(s->avctx, AV_LOG_DEBUG, "X");
1847 if (IS_8X8(mb_type))
1848 av_log(s->avctx, AV_LOG_DEBUG, "+");
1849 else if (IS_16X8(mb_type))
1850 av_log(s->avctx, AV_LOG_DEBUG, "-");
1851 else if (IS_8X16(mb_type))
1852 av_log(s->avctx, AV_LOG_DEBUG, "|");
1853 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1854 av_log(s->avctx, AV_LOG_DEBUG, " ");
1856 av_log(s->avctx, AV_LOG_DEBUG, "?");
1859 if (IS_INTERLACED(mb_type))
1860 av_log(s->avctx, AV_LOG_DEBUG, "=");
1862 av_log(s->avctx, AV_LOG_DEBUG, " ");
1865 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1871 * find the lowest MB row referenced in the MVs
1873 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1875 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1876 int my, off, i, mvs;
1878 if (s->picture_structure != PICT_FRAME || s->mcsel)
1881 switch (s->mv_type) {
1895 for (i = 0; i < mvs; i++) {
1896 my = s->mv[dir][i][1]<<qpel_shift;
1897 my_max = FFMAX(my_max, my);
1898 my_min = FFMIN(my_min, my);
1901 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1903 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1905 return s->mb_height-1;
1908 /* put block[] to dest[] */
1909 static inline void put_dct(MpegEncContext *s,
1910 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1912 s->dct_unquantize_intra(s, block, i, qscale);
1913 s->dsp.idct_put (dest, line_size, block);
1916 /* add block[] to dest[] */
1917 static inline void add_dct(MpegEncContext *s,
1918 int16_t *block, int i, uint8_t *dest, int line_size)
1920 if (s->block_last_index[i] >= 0) {
1921 s->dsp.idct_add (dest, line_size, block);
1925 static inline void add_dequant_dct(MpegEncContext *s,
1926 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1928 if (s->block_last_index[i] >= 0) {
1929 s->dct_unquantize_inter(s, block, i, qscale);
1931 s->dsp.idct_add (dest, line_size, block);
1936 * Clean dc, ac, coded_block for the current non-intra MB.
1938 void ff_clean_intra_table_entries(MpegEncContext *s)
1940 int wrap = s->b8_stride;
1941 int xy = s->block_index[0];
1944 s->dc_val[0][xy + 1 ] =
1945 s->dc_val[0][xy + wrap] =
1946 s->dc_val[0][xy + 1 + wrap] = 1024;
1948 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1949 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1950 if (s->msmpeg4_version>=3) {
1951 s->coded_block[xy ] =
1952 s->coded_block[xy + 1 ] =
1953 s->coded_block[xy + wrap] =
1954 s->coded_block[xy + 1 + wrap] = 0;
1957 wrap = s->mb_stride;
1958 xy = s->mb_x + s->mb_y * wrap;
1960 s->dc_val[2][xy] = 1024;
1962 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1963 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1965 s->mbintra_table[xy]= 0;
1968 /* generic function called after a macroblock has been parsed by the
1969 decoder or after it has been encoded by the encoder.
1971 Important variables used:
1972 s->mb_intra : true if intra macroblock
1973 s->mv_dir : motion vector direction
1974 s->mv_type : motion vector type
1975 s->mv : motion vector
1976 s->interlaced_dct : true if interlaced dct used (mpeg2)
1978 static av_always_inline
1979 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1982 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1985 FF_DISABLE_DEPRECATION_WARNINGS
1986 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1987 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1990 FF_ENABLE_DEPRECATION_WARNINGS
1991 #endif /* FF_API_XVMC */
1993 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1994 /* print DCT coefficients */
1996 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1998 for(j=0; j<64; j++){
1999 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2001 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2005 s->current_picture.qscale_table[mb_xy] = s->qscale;
2007 /* update DC predictors for P macroblocks */
2009 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2010 if(s->mbintra_table[mb_xy])
2011 ff_clean_intra_table_entries(s);
2015 s->last_dc[2] = 128 << s->intra_dc_precision;
2018 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2019 s->mbintra_table[mb_xy]=1;
2021 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2022 uint8_t *dest_y, *dest_cb, *dest_cr;
2023 int dct_linesize, dct_offset;
2024 op_pixels_func (*op_pix)[4];
2025 qpel_mc_func (*op_qpix)[16];
2026 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2027 const int uvlinesize = s->current_picture.f.linesize[1];
2028 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2029 const int block_size = 8;
2031 /* avoid copy if macroblock skipped in last frame too */
2032 /* skip only during decoding as we might trash the buffers during encoding a bit */
2034 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2036 if (s->mb_skipped) {
2038 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2040 } else if(!s->current_picture.reference) {
2043 *mbskip_ptr = 0; /* not skipped */
2047 dct_linesize = linesize << s->interlaced_dct;
2048 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2052 dest_cb= s->dest[1];
2053 dest_cr= s->dest[2];
2055 dest_y = s->b_scratchpad;
2056 dest_cb= s->b_scratchpad+16*linesize;
2057 dest_cr= s->b_scratchpad+32*linesize;
2061 /* motion handling */
2062 /* decoding or more than one mb_type (MC was already done otherwise) */
2065 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2066 if (s->mv_dir & MV_DIR_FORWARD) {
2067 ff_thread_await_progress(&s->last_picture_ptr->tf,
2068 ff_MPV_lowest_referenced_row(s, 0),
2071 if (s->mv_dir & MV_DIR_BACKWARD) {
2072 ff_thread_await_progress(&s->next_picture_ptr->tf,
2073 ff_MPV_lowest_referenced_row(s, 1),
2078 op_qpix= s->me.qpel_put;
2079 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2080 op_pix = s->hdsp.put_pixels_tab;
2082 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2084 if (s->mv_dir & MV_DIR_FORWARD) {
2085 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2086 op_pix = s->hdsp.avg_pixels_tab;
2087 op_qpix= s->me.qpel_avg;
2089 if (s->mv_dir & MV_DIR_BACKWARD) {
2090 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2094 /* skip dequant / idct if we are really late ;) */
2095 if(s->avctx->skip_idct){
2096 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2097 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2098 || s->avctx->skip_idct >= AVDISCARD_ALL)
2102 /* add dct residue */
2103 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2104 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2105 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2106 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2107 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2108 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2110 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2111 if (s->chroma_y_shift){
2112 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2113 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2117 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2118 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2119 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2120 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2123 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2124 add_dct(s, block[0], 0, dest_y , dct_linesize);
2125 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2126 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2127 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2129 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2130 if(s->chroma_y_shift){//Chroma420
2131 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2132 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2135 dct_linesize = uvlinesize << s->interlaced_dct;
2136 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2138 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2139 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2140 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2141 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2142 if(!s->chroma_x_shift){//Chroma444
2143 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2144 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2145 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2146 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2151 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2152 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2155 /* dct only in intra block */
2156 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2157 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2158 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2159 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2160 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2162 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2163 if(s->chroma_y_shift){
2164 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2165 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2169 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2170 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2171 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2172 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2176 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2177 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2178 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2179 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2181 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2182 if(s->chroma_y_shift){
2183 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2184 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2187 dct_linesize = uvlinesize << s->interlaced_dct;
2188 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2190 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2191 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2192 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2193 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2194 if(!s->chroma_x_shift){//Chroma444
2195 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2196 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2197 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2198 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2206 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2207 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2208 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2213 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2215 if(s->out_format == FMT_MPEG1) {
2216 MPV_decode_mb_internal(s, block, 1);
2219 MPV_decode_mb_internal(s, block, 0);
2223 * @param h is the normal height, this will be reduced automatically if needed for the last row
2225 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2226 Picture *last, int y, int h, int picture_structure,
2227 int first_field, int draw_edges, int low_delay,
2228 int v_edge_pos, int h_edge_pos)
2230 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2231 int hshift = desc->log2_chroma_w;
2232 int vshift = desc->log2_chroma_h;
2233 const int field_pic = picture_structure != PICT_FRAME;
2239 if (!avctx->hwaccel &&
2242 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2243 int *linesize = cur->f.linesize;
2244 int sides = 0, edge_h;
2245 if (y==0) sides |= EDGE_TOP;
2246 if (y + h >= v_edge_pos)
2247 sides |= EDGE_BOTTOM;
2249 edge_h= FFMIN(h, v_edge_pos - y);
2251 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2252 linesize[0], h_edge_pos, edge_h,
2253 EDGE_WIDTH, EDGE_WIDTH, sides);
2254 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2255 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2256 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2257 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2258 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2259 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2262 h = FFMIN(h, avctx->height - y);
2264 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2266 if (avctx->draw_horiz_band) {
2268 int offset[AV_NUM_DATA_POINTERS];
2271 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2272 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2279 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2280 picture_structure == PICT_FRAME &&
2281 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2282 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2285 offset[0]= y * src->linesize[0];
2287 offset[2]= (y >> vshift) * src->linesize[1];
2288 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2294 avctx->draw_horiz_band(avctx, src, offset,
2295 y, picture_structure, h);
2299 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2301 int draw_edges = s->unrestricted_mv && !s->intra_only;
2302 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2303 &s->last_picture, y, h, s->picture_structure,
2304 s->first_field, draw_edges, s->low_delay,
2305 s->v_edge_pos, s->h_edge_pos);
2308 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2309 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2310 const int uvlinesize = s->current_picture.f.linesize[1];
2311 const int mb_size= 4;
2313 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2314 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2315 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2316 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2317 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2318 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2319 //block_index is not used by mpeg2, so it is not affected by chroma_format
2321 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2322 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2323 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2325 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2327 if(s->picture_structure==PICT_FRAME){
2328 s->dest[0] += s->mb_y * linesize << mb_size;
2329 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2330 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2332 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2333 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2334 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2335 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2341 * Permute an 8x8 block.
2342 * @param block the block which will be permuted according to the given permutation vector
2343 * @param permutation the permutation vector
2344 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2345 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2346 * (inverse) permutated to scantable order!
2348 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2354 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2356 for(i=0; i<=last; i++){
2357 const int j= scantable[i];
2362 for(i=0; i<=last; i++){
2363 const int j= scantable[i];
2364 const int perm_j= permutation[j];
2365 block[perm_j]= temp[j];
2369 void ff_mpeg_flush(AVCodecContext *avctx){
2371 MpegEncContext *s = avctx->priv_data;
2373 if(s==NULL || s->picture==NULL)
2376 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2377 ff_mpeg_unref_picture(s, &s->picture[i]);
2378 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2380 ff_mpeg_unref_picture(s, &s->current_picture);
2381 ff_mpeg_unref_picture(s, &s->last_picture);
2382 ff_mpeg_unref_picture(s, &s->next_picture);
2384 s->mb_x= s->mb_y= 0;
2386 s->parse_context.state= -1;
2387 s->parse_context.frame_start_found= 0;
2388 s->parse_context.overread= 0;
2389 s->parse_context.overread_index= 0;
2390 s->parse_context.index= 0;
2391 s->parse_context.last_index= 0;
2392 s->bitstream_buffer_size=0;
2396 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2397 int16_t *block, int n, int qscale)
2399 int i, level, nCoeffs;
2400 const uint16_t *quant_matrix;
2402 nCoeffs= s->block_last_index[n];
2405 block[0] = block[0] * s->y_dc_scale;
2407 block[0] = block[0] * s->c_dc_scale;
2408 /* XXX: only mpeg1 */
2409 quant_matrix = s->intra_matrix;
2410 for(i=1;i<=nCoeffs;i++) {
2411 int j= s->intra_scantable.permutated[i];
2416 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2417 level = (level - 1) | 1;
2420 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2421 level = (level - 1) | 1;
2428 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2429 int16_t *block, int n, int qscale)
2431 int i, level, nCoeffs;
2432 const uint16_t *quant_matrix;
2434 nCoeffs= s->block_last_index[n];
2436 quant_matrix = s->inter_matrix;
2437 for(i=0; i<=nCoeffs; i++) {
2438 int j= s->intra_scantable.permutated[i];
2443 level = (((level << 1) + 1) * qscale *
2444 ((int) (quant_matrix[j]))) >> 4;
2445 level = (level - 1) | 1;
2448 level = (((level << 1) + 1) * qscale *
2449 ((int) (quant_matrix[j]))) >> 4;
2450 level = (level - 1) | 1;
2457 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2458 int16_t *block, int n, int qscale)
2460 int i, level, nCoeffs;
2461 const uint16_t *quant_matrix;
2463 if(s->alternate_scan) nCoeffs= 63;
2464 else nCoeffs= s->block_last_index[n];
2467 block[0] = block[0] * s->y_dc_scale;
2469 block[0] = block[0] * s->c_dc_scale;
2470 quant_matrix = s->intra_matrix;
2471 for(i=1;i<=nCoeffs;i++) {
2472 int j= s->intra_scantable.permutated[i];
2477 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2480 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2487 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2488 int16_t *block, int n, int qscale)
2490 int i, level, nCoeffs;
2491 const uint16_t *quant_matrix;
2494 if(s->alternate_scan) nCoeffs= 63;
2495 else nCoeffs= s->block_last_index[n];
2498 block[0] = block[0] * s->y_dc_scale;
2500 block[0] = block[0] * s->c_dc_scale;
2501 quant_matrix = s->intra_matrix;
2502 for(i=1;i<=nCoeffs;i++) {
2503 int j= s->intra_scantable.permutated[i];
2508 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2511 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2520 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2521 int16_t *block, int n, int qscale)
2523 int i, level, nCoeffs;
2524 const uint16_t *quant_matrix;
2527 if(s->alternate_scan) nCoeffs= 63;
2528 else nCoeffs= s->block_last_index[n];
2530 quant_matrix = s->inter_matrix;
2531 for(i=0; i<=nCoeffs; i++) {
2532 int j= s->intra_scantable.permutated[i];
2537 level = (((level << 1) + 1) * qscale *
2538 ((int) (quant_matrix[j]))) >> 4;
2541 level = (((level << 1) + 1) * qscale *
2542 ((int) (quant_matrix[j]))) >> 4;
2551 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2552 int16_t *block, int n, int qscale)
2554 int i, level, qmul, qadd;
2557 assert(s->block_last_index[n]>=0);
2563 block[0] = block[0] * s->y_dc_scale;
2565 block[0] = block[0] * s->c_dc_scale;
2566 qadd = (qscale - 1) | 1;
2573 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2575 for(i=1; i<=nCoeffs; i++) {
2579 level = level * qmul - qadd;
2581 level = level * qmul + qadd;
2588 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2589 int16_t *block, int n, int qscale)
2591 int i, level, qmul, qadd;
2594 assert(s->block_last_index[n]>=0);
2596 qadd = (qscale - 1) | 1;
2599 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2601 for(i=0; i<=nCoeffs; i++) {
2605 level = level * qmul - qadd;
2607 level = level * qmul + qadd;
2615 * set qscale and update qscale dependent variables.
2617 void ff_set_qscale(MpegEncContext * s, int qscale)
2621 else if (qscale > 31)
2625 s->chroma_qscale= s->chroma_qscale_table[qscale];
2627 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2628 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2631 void ff_MPV_report_decode_progress(MpegEncContext *s)
2633 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2634 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2637 #if CONFIG_ERROR_RESILIENCE
2638 void ff_mpeg_er_frame_start(MpegEncContext *s)
2640 ERContext *er = &s->er;
2642 er->cur_pic = s->current_picture_ptr;
2643 er->last_pic = s->last_picture_ptr;
2644 er->next_pic = s->next_picture_ptr;
2646 er->pp_time = s->pp_time;
2647 er->pb_time = s->pb_time;
2648 er->quarter_sample = s->quarter_sample;
2649 er->partitioned_frame = s->partitioned_frame;
2651 ff_er_frame_start(er);
2653 #endif /* CONFIG_ERROR_RESILIENCE */