2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
38 #include "h264chroma.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
49 static const uint8_t ff_default_chroma_qscale_table[32] = {
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
55 const uint8_t ff_mpeg1_dc_scale_table[128] = {
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const uint8_t ff_alternate_horizontal_scan[64] = {
111 0, 1, 2, 3, 8, 9, 16, 17,
112 10, 11, 4, 5, 6, 7, 15, 14,
113 13, 12, 19, 18, 24, 25, 32, 33,
114 26, 27, 20, 21, 22, 23, 28, 29,
115 30, 31, 34, 35, 40, 41, 48, 49,
116 42, 43, 36, 37, 38, 39, 44, 45,
117 46, 47, 50, 51, 56, 57, 58, 59,
118 52, 53, 54, 55, 60, 61, 62, 63,
121 const uint8_t ff_alternate_vertical_scan[64] = {
122 0, 8, 16, 24, 1, 9, 2, 10,
123 17, 25, 32, 40, 48, 56, 57, 49,
124 41, 33, 26, 18, 3, 11, 4, 12,
125 19, 27, 34, 42, 50, 58, 35, 43,
126 51, 59, 20, 28, 5, 13, 6, 14,
127 21, 29, 36, 44, 52, 60, 37, 45,
128 53, 61, 22, 30, 7, 15, 23, 31,
129 38, 46, 54, 62, 39, 47, 55, 63,
132 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133 int16_t *block, int n, int qscale)
135 int i, level, nCoeffs;
136 const uint16_t *quant_matrix;
138 nCoeffs= s->block_last_index[n];
140 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
141 /* XXX: only mpeg1 */
142 quant_matrix = s->intra_matrix;
143 for(i=1;i<=nCoeffs;i++) {
144 int j= s->intra_scantable.permutated[i];
149 level = (int)(level * qscale * quant_matrix[j]) >> 3;
150 level = (level - 1) | 1;
153 level = (int)(level * qscale * quant_matrix[j]) >> 3;
154 level = (level - 1) | 1;
161 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
162 int16_t *block, int n, int qscale)
164 int i, level, nCoeffs;
165 const uint16_t *quant_matrix;
167 nCoeffs= s->block_last_index[n];
169 quant_matrix = s->inter_matrix;
170 for(i=0; i<=nCoeffs; i++) {
171 int j= s->intra_scantable.permutated[i];
176 level = (((level << 1) + 1) * qscale *
177 ((int) (quant_matrix[j]))) >> 4;
178 level = (level - 1) | 1;
181 level = (((level << 1) + 1) * qscale *
182 ((int) (quant_matrix[j]))) >> 4;
183 level = (level - 1) | 1;
190 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
191 int16_t *block, int n, int qscale)
193 int i, level, nCoeffs;
194 const uint16_t *quant_matrix;
196 if(s->alternate_scan) nCoeffs= 63;
197 else nCoeffs= s->block_last_index[n];
199 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
200 quant_matrix = s->intra_matrix;
201 for(i=1;i<=nCoeffs;i++) {
202 int j= s->intra_scantable.permutated[i];
207 level = (int)(level * qscale * quant_matrix[j]) >> 3;
210 level = (int)(level * qscale * quant_matrix[j]) >> 3;
217 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
218 int16_t *block, int n, int qscale)
220 int i, level, nCoeffs;
221 const uint16_t *quant_matrix;
224 if(s->alternate_scan) nCoeffs= 63;
225 else nCoeffs= s->block_last_index[n];
227 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
229 quant_matrix = s->intra_matrix;
230 for(i=1;i<=nCoeffs;i++) {
231 int j= s->intra_scantable.permutated[i];
236 level = (int)(level * qscale * quant_matrix[j]) >> 3;
239 level = (int)(level * qscale * quant_matrix[j]) >> 3;
248 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
249 int16_t *block, int n, int qscale)
251 int i, level, nCoeffs;
252 const uint16_t *quant_matrix;
255 if(s->alternate_scan) nCoeffs= 63;
256 else nCoeffs= s->block_last_index[n];
258 quant_matrix = s->inter_matrix;
259 for(i=0; i<=nCoeffs; i++) {
260 int j= s->intra_scantable.permutated[i];
265 level = (((level << 1) + 1) * qscale *
266 ((int) (quant_matrix[j]))) >> 4;
269 level = (((level << 1) + 1) * qscale *
270 ((int) (quant_matrix[j]))) >> 4;
279 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
280 int16_t *block, int n, int qscale)
282 int i, level, qmul, qadd;
285 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
290 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
291 qadd = (qscale - 1) | 1;
298 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
300 for(i=1; i<=nCoeffs; i++) {
304 level = level * qmul - qadd;
306 level = level * qmul + qadd;
313 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
314 int16_t *block, int n, int qscale)
316 int i, level, qmul, qadd;
319 av_assert2(s->block_last_index[n]>=0);
321 qadd = (qscale - 1) | 1;
324 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
326 for(i=0; i<=nCoeffs; i++) {
330 level = level * qmul - qadd;
332 level = level * qmul + qadd;
339 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
341 int mb_x, int mb_y, int mb_intra, int mb_skipped)
343 MpegEncContext *s = opaque;
346 s->mv_type = mv_type;
347 s->mb_intra = mb_intra;
348 s->mb_skipped = mb_skipped;
351 memcpy(s->mv, mv, sizeof(*mv));
353 ff_init_block_index(s);
354 ff_update_block_index(s);
356 s->bdsp.clear_blocks(s->block[0]);
358 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
359 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
360 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
363 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
364 ff_MPV_decode_mb(s, s->block);
367 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
370 memset(dst + h*linesize, 128, 16);
373 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
376 memset(dst + h*linesize, 128, 8);
379 /* init common dct for both encoder and decoder */
380 av_cold int ff_dct_common_init(MpegEncContext *s)
382 ff_blockdsp_init(&s->bdsp, s->avctx);
383 ff_dsputil_init(&s->dsp, s->avctx);
384 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
385 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
386 ff_mpegvideodsp_init(&s->mdsp);
387 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
389 if (s->avctx->debug & FF_DEBUG_NOMC) {
391 for (i=0; i<4; i++) {
392 s->hdsp.avg_pixels_tab[0][i] = gray16;
393 s->hdsp.put_pixels_tab[0][i] = gray16;
394 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
396 s->hdsp.avg_pixels_tab[1][i] = gray8;
397 s->hdsp.put_pixels_tab[1][i] = gray8;
398 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
402 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
403 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
404 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
405 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
406 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
407 if (s->flags & CODEC_FLAG_BITEXACT)
408 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
409 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
412 ff_MPV_common_init_axp(s);
414 ff_MPV_common_init_arm(s);
416 ff_MPV_common_init_ppc(s);
418 ff_MPV_common_init_x86(s);
420 /* load & permutate scantables
421 * note: only wmv uses different ones
423 if (s->alternate_scan) {
424 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
425 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
427 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
428 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
430 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
431 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
436 static int frame_size_alloc(MpegEncContext *s, int linesize)
438 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
440 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
444 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
445 return AVERROR_PATCHWELCOME;
448 // edge emu needs blocksize + filter length - 1
449 // (= 17x17 for halfpel / 21x21 for h264)
450 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
451 // at uvlinesize. It supports only YUV420 so 24x24 is enough
452 // linesize * interlaced * MBsize
453 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
456 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
458 s->me.temp = s->me.scratchpad;
459 s->rd_scratchpad = s->me.scratchpad;
460 s->b_scratchpad = s->me.scratchpad;
461 s->obmc_scratchpad = s->me.scratchpad + 16;
465 av_freep(&s->edge_emu_buffer);
466 return AVERROR(ENOMEM);
470 * Allocate a frame buffer
472 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
474 int edges_needed = av_codec_is_encoder(s->avctx->codec);
478 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
479 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
480 s->codec_id != AV_CODEC_ID_MSS2) {
482 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
483 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
486 r = ff_thread_get_buffer(s->avctx, &pic->tf,
487 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
489 pic->f->width = s->avctx->width;
490 pic->f->height = s->avctx->height;
491 pic->f->format = s->avctx->pix_fmt;
492 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
495 if (r < 0 || !pic->f->buf[0]) {
496 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
503 for (i = 0; pic->f->data[i]; i++) {
504 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
505 pic->f->linesize[i] +
506 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
507 pic->f->data[i] += offset;
509 pic->f->width = s->avctx->width;
510 pic->f->height = s->avctx->height;
513 if (s->avctx->hwaccel) {
514 assert(!pic->hwaccel_picture_private);
515 if (s->avctx->hwaccel->frame_priv_data_size) {
516 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
517 if (!pic->hwaccel_priv_buf) {
518 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
521 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
525 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
526 s->uvlinesize != pic->f->linesize[1])) {
527 av_log(s->avctx, AV_LOG_ERROR,
528 "get_buffer() failed (stride changed)\n");
529 ff_mpeg_unref_picture(s, pic);
533 if (pic->f->linesize[1] != pic->f->linesize[2]) {
534 av_log(s->avctx, AV_LOG_ERROR,
535 "get_buffer() failed (uv stride mismatch)\n");
536 ff_mpeg_unref_picture(s, pic);
540 if (!s->edge_emu_buffer &&
541 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
542 av_log(s->avctx, AV_LOG_ERROR,
543 "get_buffer() failed to allocate context scratch buffers.\n");
544 ff_mpeg_unref_picture(s, pic);
551 void ff_free_picture_tables(Picture *pic)
555 pic->alloc_mb_width =
556 pic->alloc_mb_height = 0;
558 av_buffer_unref(&pic->mb_var_buf);
559 av_buffer_unref(&pic->mc_mb_var_buf);
560 av_buffer_unref(&pic->mb_mean_buf);
561 av_buffer_unref(&pic->mbskip_table_buf);
562 av_buffer_unref(&pic->qscale_table_buf);
563 av_buffer_unref(&pic->mb_type_buf);
565 for (i = 0; i < 2; i++) {
566 av_buffer_unref(&pic->motion_val_buf[i]);
567 av_buffer_unref(&pic->ref_index_buf[i]);
571 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
573 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
574 const int mb_array_size = s->mb_stride * s->mb_height;
575 const int b8_array_size = s->b8_stride * s->mb_height * 2;
579 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
580 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
581 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
583 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
584 return AVERROR(ENOMEM);
587 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
588 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
589 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
590 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
591 return AVERROR(ENOMEM);
594 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
595 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
596 int ref_index_size = 4 * mb_array_size;
598 for (i = 0; mv_size && i < 2; i++) {
599 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
600 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
601 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
602 return AVERROR(ENOMEM);
606 pic->alloc_mb_width = s->mb_width;
607 pic->alloc_mb_height = s->mb_height;
612 static int make_tables_writable(Picture *pic)
615 #define MAKE_WRITABLE(table) \
618 (ret = av_buffer_make_writable(&pic->table)) < 0)\
622 MAKE_WRITABLE(mb_var_buf);
623 MAKE_WRITABLE(mc_mb_var_buf);
624 MAKE_WRITABLE(mb_mean_buf);
625 MAKE_WRITABLE(mbskip_table_buf);
626 MAKE_WRITABLE(qscale_table_buf);
627 MAKE_WRITABLE(mb_type_buf);
629 for (i = 0; i < 2; i++) {
630 MAKE_WRITABLE(motion_val_buf[i]);
631 MAKE_WRITABLE(ref_index_buf[i]);
638 * Allocate a Picture.
639 * The pixels are allocated/set by calling get_buffer() if shared = 0
641 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
645 if (pic->qscale_table_buf)
646 if ( pic->alloc_mb_width != s->mb_width
647 || pic->alloc_mb_height != s->mb_height)
648 ff_free_picture_tables(pic);
651 av_assert0(pic->f->data[0]);
654 av_assert0(!pic->f->buf[0]);
656 if (alloc_frame_buffer(s, pic) < 0)
659 s->linesize = pic->f->linesize[0];
660 s->uvlinesize = pic->f->linesize[1];
663 if (!pic->qscale_table_buf)
664 ret = alloc_picture_tables(s, pic);
666 ret = make_tables_writable(pic);
671 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
672 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
673 pic->mb_mean = pic->mb_mean_buf->data;
676 pic->mbskip_table = pic->mbskip_table_buf->data;
677 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
678 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
680 if (pic->motion_val_buf[0]) {
681 for (i = 0; i < 2; i++) {
682 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
683 pic->ref_index[i] = pic->ref_index_buf[i]->data;
689 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
690 ff_mpeg_unref_picture(s, pic);
691 ff_free_picture_tables(pic);
692 return AVERROR(ENOMEM);
696 * Deallocate a picture.
698 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
700 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
703 /* WM Image / Screen codecs allocate internal buffers with different
704 * dimensions / colorspaces; ignore user-defined callbacks for these. */
705 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
706 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
707 s->codec_id != AV_CODEC_ID_MSS2)
708 ff_thread_release_buffer(s->avctx, &pic->tf);
710 av_frame_unref(pic->f);
712 av_buffer_unref(&pic->hwaccel_priv_buf);
714 if (pic->needs_realloc)
715 ff_free_picture_tables(pic);
717 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
720 static int update_picture_tables(Picture *dst, Picture *src)
724 #define UPDATE_TABLE(table)\
727 (!dst->table || dst->table->buffer != src->table->buffer)) {\
728 av_buffer_unref(&dst->table);\
729 dst->table = av_buffer_ref(src->table);\
731 ff_free_picture_tables(dst);\
732 return AVERROR(ENOMEM);\
737 UPDATE_TABLE(mb_var_buf);
738 UPDATE_TABLE(mc_mb_var_buf);
739 UPDATE_TABLE(mb_mean_buf);
740 UPDATE_TABLE(mbskip_table_buf);
741 UPDATE_TABLE(qscale_table_buf);
742 UPDATE_TABLE(mb_type_buf);
743 for (i = 0; i < 2; i++) {
744 UPDATE_TABLE(motion_val_buf[i]);
745 UPDATE_TABLE(ref_index_buf[i]);
748 dst->mb_var = src->mb_var;
749 dst->mc_mb_var = src->mc_mb_var;
750 dst->mb_mean = src->mb_mean;
751 dst->mbskip_table = src->mbskip_table;
752 dst->qscale_table = src->qscale_table;
753 dst->mb_type = src->mb_type;
754 for (i = 0; i < 2; i++) {
755 dst->motion_val[i] = src->motion_val[i];
756 dst->ref_index[i] = src->ref_index[i];
759 dst->alloc_mb_width = src->alloc_mb_width;
760 dst->alloc_mb_height = src->alloc_mb_height;
765 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
769 av_assert0(!dst->f->buf[0]);
770 av_assert0(src->f->buf[0]);
774 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
778 ret = update_picture_tables(dst, src);
782 if (src->hwaccel_picture_private) {
783 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
784 if (!dst->hwaccel_priv_buf)
786 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
789 dst->field_picture = src->field_picture;
790 dst->mb_var_sum = src->mb_var_sum;
791 dst->mc_mb_var_sum = src->mc_mb_var_sum;
792 dst->b_frame_score = src->b_frame_score;
793 dst->needs_realloc = src->needs_realloc;
794 dst->reference = src->reference;
795 dst->shared = src->shared;
799 ff_mpeg_unref_picture(s, dst);
803 static void exchange_uv(MpegEncContext *s)
808 s->pblocks[4] = s->pblocks[5];
812 static int init_duplicate_context(MpegEncContext *s)
814 int y_size = s->b8_stride * (2 * s->mb_height + 1);
815 int c_size = s->mb_stride * (s->mb_height + 1);
816 int yc_size = y_size + 2 * c_size;
819 if (s->mb_height & 1)
820 yc_size += 2*s->b8_stride + 2*s->mb_stride;
827 s->obmc_scratchpad = NULL;
830 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
831 ME_MAP_SIZE * sizeof(uint32_t), fail)
832 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
833 ME_MAP_SIZE * sizeof(uint32_t), fail)
834 if (s->avctx->noise_reduction) {
835 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
836 2 * 64 * sizeof(int), fail)
839 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
840 s->block = s->blocks[0];
842 for (i = 0; i < 12; i++) {
843 s->pblocks[i] = &s->block[i];
845 if (s->avctx->codec_tag == AV_RL32("VCR2"))
848 if (s->out_format == FMT_H263) {
850 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
851 yc_size * sizeof(int16_t) * 16, fail);
852 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
853 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
854 s->ac_val[2] = s->ac_val[1] + c_size;
859 return -1; // free() through ff_MPV_common_end()
862 static void free_duplicate_context(MpegEncContext *s)
867 av_freep(&s->edge_emu_buffer);
868 av_freep(&s->me.scratchpad);
872 s->obmc_scratchpad = NULL;
874 av_freep(&s->dct_error_sum);
875 av_freep(&s->me.map);
876 av_freep(&s->me.score_map);
877 av_freep(&s->blocks);
878 av_freep(&s->ac_val_base);
882 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
884 #define COPY(a) bak->a = src->a
885 COPY(edge_emu_buffer);
890 COPY(obmc_scratchpad);
897 COPY(me.map_generation);
909 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
913 // FIXME copy only needed parts
915 backup_duplicate_context(&bak, dst);
916 memcpy(dst, src, sizeof(MpegEncContext));
917 backup_duplicate_context(dst, &bak);
918 for (i = 0; i < 12; i++) {
919 dst->pblocks[i] = &dst->block[i];
921 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
923 if (!dst->edge_emu_buffer &&
924 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
925 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
926 "scratch buffers.\n");
929 // STOP_TIMER("update_duplicate_context")
930 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
934 int ff_mpeg_update_thread_context(AVCodecContext *dst,
935 const AVCodecContext *src)
938 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
945 // FIXME can parameters change on I-frames?
946 // in that case dst may need a reinit
947 if (!s->context_initialized) {
948 memcpy(s, s1, sizeof(MpegEncContext));
951 s->bitstream_buffer = NULL;
952 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
954 if (s1->context_initialized){
955 // s->picture_range_start += MAX_PICTURE_COUNT;
956 // s->picture_range_end += MAX_PICTURE_COUNT;
957 if((ret = ff_MPV_common_init(s)) < 0){
958 memset(s, 0, sizeof(MpegEncContext));
965 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
966 s->context_reinit = 0;
967 s->height = s1->height;
968 s->width = s1->width;
969 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
973 s->avctx->coded_height = s1->avctx->coded_height;
974 s->avctx->coded_width = s1->avctx->coded_width;
975 s->avctx->width = s1->avctx->width;
976 s->avctx->height = s1->avctx->height;
978 s->coded_picture_number = s1->coded_picture_number;
979 s->picture_number = s1->picture_number;
981 av_assert0(!s->picture || s->picture != s1->picture);
983 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
984 ff_mpeg_unref_picture(s, &s->picture[i]);
985 if (s1->picture[i].f->buf[0] &&
986 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
990 #define UPDATE_PICTURE(pic)\
992 ff_mpeg_unref_picture(s, &s->pic);\
993 if (s1->pic.f && s1->pic.f->buf[0])\
994 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
996 ret = update_picture_tables(&s->pic, &s1->pic);\
1001 UPDATE_PICTURE(current_picture);
1002 UPDATE_PICTURE(last_picture);
1003 UPDATE_PICTURE(next_picture);
1005 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1006 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1007 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1009 // Error/bug resilience
1010 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1011 s->workaround_bugs = s1->workaround_bugs;
1012 s->padding_bug_score = s1->padding_bug_score;
1014 // MPEG4 timing info
1015 memcpy(&s->last_time_base, &s1->last_time_base,
1016 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1017 (char *) &s1->last_time_base);
1020 s->max_b_frames = s1->max_b_frames;
1021 s->low_delay = s1->low_delay;
1022 s->droppable = s1->droppable;
1024 // DivX handling (doesn't work)
1025 s->divx_packed = s1->divx_packed;
1027 if (s1->bitstream_buffer) {
1028 if (s1->bitstream_buffer_size +
1029 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1030 av_fast_malloc(&s->bitstream_buffer,
1031 &s->allocated_bitstream_buffer_size,
1032 s1->allocated_bitstream_buffer_size);
1033 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1034 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1035 s1->bitstream_buffer_size);
1036 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1037 FF_INPUT_BUFFER_PADDING_SIZE);
1040 // linesize dependend scratch buffer allocation
1041 if (!s->edge_emu_buffer)
1043 if (frame_size_alloc(s, s1->linesize) < 0) {
1044 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1045 "scratch buffers.\n");
1046 return AVERROR(ENOMEM);
1049 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1050 "be allocated due to unknown size.\n");
1053 // MPEG2/interlacing info
1054 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1055 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1057 if (!s1->first_field) {
1058 s->last_pict_type = s1->pict_type;
1059 if (s1->current_picture_ptr)
1060 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1067 * Set the given MpegEncContext to common defaults
1068 * (same for encoding and decoding).
1069 * The changed fields will not depend upon the
1070 * prior state of the MpegEncContext.
1072 void ff_MPV_common_defaults(MpegEncContext *s)
1074 s->y_dc_scale_table =
1075 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1076 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1077 s->progressive_frame = 1;
1078 s->progressive_sequence = 1;
1079 s->picture_structure = PICT_FRAME;
1081 s->coded_picture_number = 0;
1082 s->picture_number = 0;
1087 s->slice_context_count = 1;
1091 * Set the given MpegEncContext to defaults for decoding.
1092 * the changed fields will not depend upon
1093 * the prior state of the MpegEncContext.
1095 void ff_MPV_decode_defaults(MpegEncContext *s)
1097 ff_MPV_common_defaults(s);
1100 static int init_er(MpegEncContext *s)
1102 ERContext *er = &s->er;
1103 int mb_array_size = s->mb_height * s->mb_stride;
1106 er->avctx = s->avctx;
1109 er->mb_index2xy = s->mb_index2xy;
1110 er->mb_num = s->mb_num;
1111 er->mb_width = s->mb_width;
1112 er->mb_height = s->mb_height;
1113 er->mb_stride = s->mb_stride;
1114 er->b8_stride = s->b8_stride;
1116 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1117 er->error_status_table = av_mallocz(mb_array_size);
1118 if (!er->er_temp_buffer || !er->error_status_table)
1121 er->mbskip_table = s->mbskip_table;
1122 er->mbintra_table = s->mbintra_table;
1124 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1125 er->dc_val[i] = s->dc_val[i];
1127 er->decode_mb = mpeg_er_decode_mb;
1132 av_freep(&er->er_temp_buffer);
1133 av_freep(&er->error_status_table);
1134 return AVERROR(ENOMEM);
1138 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1140 static int init_context_frame(MpegEncContext *s)
1142 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1144 s->mb_width = (s->width + 15) / 16;
1145 s->mb_stride = s->mb_width + 1;
1146 s->b8_stride = s->mb_width * 2 + 1;
1147 mb_array_size = s->mb_height * s->mb_stride;
1148 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1150 /* set default edge pos, will be overridden
1151 * in decode_header if needed */
1152 s->h_edge_pos = s->mb_width * 16;
1153 s->v_edge_pos = s->mb_height * 16;
1155 s->mb_num = s->mb_width * s->mb_height;
1160 s->block_wrap[3] = s->b8_stride;
1162 s->block_wrap[5] = s->mb_stride;
1164 y_size = s->b8_stride * (2 * s->mb_height + 1);
1165 c_size = s->mb_stride * (s->mb_height + 1);
1166 yc_size = y_size + 2 * c_size;
1168 if (s->mb_height & 1)
1169 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1171 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1172 for (y = 0; y < s->mb_height; y++)
1173 for (x = 0; x < s->mb_width; x++)
1174 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1176 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1179 /* Allocate MV tables */
1180 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1181 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1182 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1183 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1184 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1185 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1186 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1187 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1188 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1189 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1190 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1191 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1193 /* Allocate MB type table */
1194 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1196 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1198 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1199 mb_array_size * sizeof(float), fail);
1200 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1201 mb_array_size * sizeof(float), fail);
1205 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1206 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1207 /* interlaced direct mode decoding tables */
1208 for (i = 0; i < 2; i++) {
1210 for (j = 0; j < 2; j++) {
1211 for (k = 0; k < 2; k++) {
1212 FF_ALLOCZ_OR_GOTO(s->avctx,
1213 s->b_field_mv_table_base[i][j][k],
1214 mv_table_size * 2 * sizeof(int16_t),
1216 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1219 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1220 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1221 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1223 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1226 if (s->out_format == FMT_H263) {
1228 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1229 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1231 /* cbp, ac_pred, pred_dir */
1232 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1233 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1236 if (s->h263_pred || s->h263_plus || !s->encoding) {
1238 // MN: we need these for error resilience of intra-frames
1239 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1240 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1241 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1242 s->dc_val[2] = s->dc_val[1] + c_size;
1243 for (i = 0; i < yc_size; i++)
1244 s->dc_val_base[i] = 1024;
1247 /* which mb is a intra block */
1248 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1249 memset(s->mbintra_table, 1, mb_array_size);
1251 /* init macroblock skip table */
1252 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1253 // Note the + 1 is for a quicker mpeg4 slice_end detection
1257 return AVERROR(ENOMEM);
1261 * init common structure for both encoder and decoder.
1262 * this assumes that some variables like width/height are already set
1264 av_cold int ff_MPV_common_init(MpegEncContext *s)
1267 int nb_slices = (HAVE_THREADS &&
1268 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1269 s->avctx->thread_count : 1;
1271 if (s->encoding && s->avctx->slices)
1272 nb_slices = s->avctx->slices;
1274 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1275 s->mb_height = (s->height + 31) / 32 * 2;
1277 s->mb_height = (s->height + 15) / 16;
1279 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1280 av_log(s->avctx, AV_LOG_ERROR,
1281 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1285 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1288 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1290 max_slices = MAX_THREADS;
1291 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1292 " reducing to %d\n", nb_slices, max_slices);
1293 nb_slices = max_slices;
1296 if ((s->width || s->height) &&
1297 av_image_check_size(s->width, s->height, 0, s->avctx))
1300 ff_dct_common_init(s);
1302 s->flags = s->avctx->flags;
1303 s->flags2 = s->avctx->flags2;
1305 /* set chroma shifts */
1306 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1308 &s->chroma_y_shift);
1310 /* convert fourcc to upper case */
1311 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1313 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1315 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1316 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1317 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1318 s->picture[i].f = av_frame_alloc();
1319 if (!s->picture[i].f)
1322 memset(&s->next_picture, 0, sizeof(s->next_picture));
1323 memset(&s->last_picture, 0, sizeof(s->last_picture));
1324 memset(&s->current_picture, 0, sizeof(s->current_picture));
1325 memset(&s->new_picture, 0, sizeof(s->new_picture));
1326 s->next_picture.f = av_frame_alloc();
1327 if (!s->next_picture.f)
1329 s->last_picture.f = av_frame_alloc();
1330 if (!s->last_picture.f)
1332 s->current_picture.f = av_frame_alloc();
1333 if (!s->current_picture.f)
1335 s->new_picture.f = av_frame_alloc();
1336 if (!s->new_picture.f)
1339 if (init_context_frame(s))
1342 s->parse_context.state = -1;
1344 s->context_initialized = 1;
1345 s->thread_context[0] = s;
1347 // if (s->width && s->height) {
1348 if (nb_slices > 1) {
1349 for (i = 1; i < nb_slices; i++) {
1350 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1351 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1354 for (i = 0; i < nb_slices; i++) {
1355 if (init_duplicate_context(s->thread_context[i]) < 0)
1357 s->thread_context[i]->start_mb_y =
1358 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1359 s->thread_context[i]->end_mb_y =
1360 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1363 if (init_duplicate_context(s) < 0)
1366 s->end_mb_y = s->mb_height;
1368 s->slice_context_count = nb_slices;
1373 ff_MPV_common_end(s);
1378 * Frees and resets MpegEncContext fields depending on the resolution.
1379 * Is used during resolution changes to avoid a full reinitialization of the
1382 static int free_context_frame(MpegEncContext *s)
1386 av_freep(&s->mb_type);
1387 av_freep(&s->p_mv_table_base);
1388 av_freep(&s->b_forw_mv_table_base);
1389 av_freep(&s->b_back_mv_table_base);
1390 av_freep(&s->b_bidir_forw_mv_table_base);
1391 av_freep(&s->b_bidir_back_mv_table_base);
1392 av_freep(&s->b_direct_mv_table_base);
1393 s->p_mv_table = NULL;
1394 s->b_forw_mv_table = NULL;
1395 s->b_back_mv_table = NULL;
1396 s->b_bidir_forw_mv_table = NULL;
1397 s->b_bidir_back_mv_table = NULL;
1398 s->b_direct_mv_table = NULL;
1399 for (i = 0; i < 2; i++) {
1400 for (j = 0; j < 2; j++) {
1401 for (k = 0; k < 2; k++) {
1402 av_freep(&s->b_field_mv_table_base[i][j][k]);
1403 s->b_field_mv_table[i][j][k] = NULL;
1405 av_freep(&s->b_field_select_table[i][j]);
1406 av_freep(&s->p_field_mv_table_base[i][j]);
1407 s->p_field_mv_table[i][j] = NULL;
1409 av_freep(&s->p_field_select_table[i]);
1412 av_freep(&s->dc_val_base);
1413 av_freep(&s->coded_block_base);
1414 av_freep(&s->mbintra_table);
1415 av_freep(&s->cbp_table);
1416 av_freep(&s->pred_dir_table);
1418 av_freep(&s->mbskip_table);
1420 av_freep(&s->er.error_status_table);
1421 av_freep(&s->er.er_temp_buffer);
1422 av_freep(&s->mb_index2xy);
1423 av_freep(&s->lambda_table);
1425 av_freep(&s->cplx_tab);
1426 av_freep(&s->bits_tab);
1428 s->linesize = s->uvlinesize = 0;
1433 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1437 if (s->slice_context_count > 1) {
1438 for (i = 0; i < s->slice_context_count; i++) {
1439 free_duplicate_context(s->thread_context[i]);
1441 for (i = 1; i < s->slice_context_count; i++) {
1442 av_freep(&s->thread_context[i]);
1445 free_duplicate_context(s);
1447 if ((err = free_context_frame(s)) < 0)
1451 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1452 s->picture[i].needs_realloc = 1;
1455 s->last_picture_ptr =
1456 s->next_picture_ptr =
1457 s->current_picture_ptr = NULL;
1460 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1461 s->mb_height = (s->height + 31) / 32 * 2;
1463 s->mb_height = (s->height + 15) / 16;
1465 if ((s->width || s->height) &&
1466 av_image_check_size(s->width, s->height, 0, s->avctx))
1467 return AVERROR_INVALIDDATA;
1469 if ((err = init_context_frame(s)))
1472 s->thread_context[0] = s;
1474 if (s->width && s->height) {
1475 int nb_slices = s->slice_context_count;
1476 if (nb_slices > 1) {
1477 for (i = 1; i < nb_slices; i++) {
1478 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1479 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1482 for (i = 0; i < nb_slices; i++) {
1483 if (init_duplicate_context(s->thread_context[i]) < 0)
1485 s->thread_context[i]->start_mb_y =
1486 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1487 s->thread_context[i]->end_mb_y =
1488 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1491 err = init_duplicate_context(s);
1495 s->end_mb_y = s->mb_height;
1497 s->slice_context_count = nb_slices;
1502 ff_MPV_common_end(s);
1506 /* init common structure for both encoder and decoder */
1507 void ff_MPV_common_end(MpegEncContext *s)
1511 if (s->slice_context_count > 1) {
1512 for (i = 0; i < s->slice_context_count; i++) {
1513 free_duplicate_context(s->thread_context[i]);
1515 for (i = 1; i < s->slice_context_count; i++) {
1516 av_freep(&s->thread_context[i]);
1518 s->slice_context_count = 1;
1519 } else free_duplicate_context(s);
1521 av_freep(&s->parse_context.buffer);
1522 s->parse_context.buffer_size = 0;
1524 av_freep(&s->bitstream_buffer);
1525 s->allocated_bitstream_buffer_size = 0;
1528 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1529 ff_free_picture_tables(&s->picture[i]);
1530 ff_mpeg_unref_picture(s, &s->picture[i]);
1531 av_frame_free(&s->picture[i].f);
1534 av_freep(&s->picture);
1535 ff_free_picture_tables(&s->last_picture);
1536 ff_mpeg_unref_picture(s, &s->last_picture);
1537 av_frame_free(&s->last_picture.f);
1538 ff_free_picture_tables(&s->current_picture);
1539 ff_mpeg_unref_picture(s, &s->current_picture);
1540 av_frame_free(&s->current_picture.f);
1541 ff_free_picture_tables(&s->next_picture);
1542 ff_mpeg_unref_picture(s, &s->next_picture);
1543 av_frame_free(&s->next_picture.f);
1544 ff_free_picture_tables(&s->new_picture);
1545 ff_mpeg_unref_picture(s, &s->new_picture);
1546 av_frame_free(&s->new_picture.f);
1548 free_context_frame(s);
1550 s->context_initialized = 0;
1551 s->last_picture_ptr =
1552 s->next_picture_ptr =
1553 s->current_picture_ptr = NULL;
1554 s->linesize = s->uvlinesize = 0;
1557 av_cold void ff_init_rl(RLTable *rl,
1558 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1560 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1561 uint8_t index_run[MAX_RUN + 1];
1562 int last, run, level, start, end, i;
1564 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1565 if (static_store && rl->max_level[0])
1568 /* compute max_level[], max_run[] and index_run[] */
1569 for (last = 0; last < 2; last++) {
1578 memset(max_level, 0, MAX_RUN + 1);
1579 memset(max_run, 0, MAX_LEVEL + 1);
1580 memset(index_run, rl->n, MAX_RUN + 1);
1581 for (i = start; i < end; i++) {
1582 run = rl->table_run[i];
1583 level = rl->table_level[i];
1584 if (index_run[run] == rl->n)
1586 if (level > max_level[run])
1587 max_level[run] = level;
1588 if (run > max_run[level])
1589 max_run[level] = run;
1592 rl->max_level[last] = static_store[last];
1594 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1595 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1597 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1599 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1600 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1602 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1604 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1605 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1609 av_cold void ff_init_vlc_rl(RLTable *rl)
1613 for (q = 0; q < 32; q++) {
1615 int qadd = (q - 1) | 1;
1621 for (i = 0; i < rl->vlc.table_size; i++) {
1622 int code = rl->vlc.table[i][0];
1623 int len = rl->vlc.table[i][1];
1626 if (len == 0) { // illegal code
1629 } else if (len < 0) { // more bits needed
1633 if (code == rl->n) { // esc
1637 run = rl->table_run[code] + 1;
1638 level = rl->table_level[code] * qmul + qadd;
1639 if (code >= rl->last) run += 192;
1642 rl->rl_vlc[q][i].len = len;
1643 rl->rl_vlc[q][i].level = level;
1644 rl->rl_vlc[q][i].run = run;
1649 static void release_unused_pictures(MpegEncContext *s)
1653 /* release non reference frames */
1654 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1655 if (!s->picture[i].reference)
1656 ff_mpeg_unref_picture(s, &s->picture[i]);
1660 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1662 if (pic == s->last_picture_ptr)
1664 if (pic->f->buf[0] == NULL)
1666 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1671 static int find_unused_picture(MpegEncContext *s, int shared)
1676 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1677 if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1681 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1682 if (pic_is_unused(s, &s->picture[i]))
1687 av_log(s->avctx, AV_LOG_FATAL,
1688 "Internal error, picture buffer overflow\n");
1689 /* We could return -1, but the codec would crash trying to draw into a
1690 * non-existing frame anyway. This is safer than waiting for a random crash.
1691 * Also the return of this is never useful, an encoder must only allocate
1692 * as much as allowed in the specification. This has no relationship to how
1693 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1694 * enough for such valid streams).
1695 * Plus, a decoder has to check stream validity and remove frames if too
1696 * many reference frames are around. Waiting for "OOM" is not correct at
1697 * all. Similarly, missing reference frames have to be replaced by
1698 * interpolated/MC frames, anything else is a bug in the codec ...
1704 int ff_find_unused_picture(MpegEncContext *s, int shared)
1706 int ret = find_unused_picture(s, shared);
1708 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1709 if (s->picture[ret].needs_realloc) {
1710 s->picture[ret].needs_realloc = 0;
1711 ff_free_picture_tables(&s->picture[ret]);
1712 ff_mpeg_unref_picture(s, &s->picture[ret]);
1718 static void gray_frame(AVFrame *frame)
1720 int i, h_chroma_shift, v_chroma_shift;
1722 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1724 for(i=0; i<frame->height; i++)
1725 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1726 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1727 memset(frame->data[1] + frame->linesize[1]*i,
1728 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1729 memset(frame->data[2] + frame->linesize[2]*i,
1730 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1735 * generic function called after decoding
1736 * the header and before a frame is decoded.
1738 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1744 if (!ff_thread_can_start_frame(avctx)) {
1745 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1749 /* mark & release old frames */
1750 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1751 s->last_picture_ptr != s->next_picture_ptr &&
1752 s->last_picture_ptr->f->buf[0]) {
1753 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1756 /* release forgotten pictures */
1757 /* if (mpeg124/h263) */
1758 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1759 if (&s->picture[i] != s->last_picture_ptr &&
1760 &s->picture[i] != s->next_picture_ptr &&
1761 s->picture[i].reference && !s->picture[i].needs_realloc) {
1762 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1763 av_log(avctx, AV_LOG_ERROR,
1764 "releasing zombie picture\n");
1765 ff_mpeg_unref_picture(s, &s->picture[i]);
1769 ff_mpeg_unref_picture(s, &s->current_picture);
1771 release_unused_pictures(s);
1773 if (s->current_picture_ptr &&
1774 s->current_picture_ptr->f->buf[0] == NULL) {
1775 // we already have a unused image
1776 // (maybe it was set before reading the header)
1777 pic = s->current_picture_ptr;
1779 i = ff_find_unused_picture(s, 0);
1781 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1784 pic = &s->picture[i];
1788 if (!s->droppable) {
1789 if (s->pict_type != AV_PICTURE_TYPE_B)
1793 pic->f->coded_picture_number = s->coded_picture_number++;
1795 if (ff_alloc_picture(s, pic, 0) < 0)
1798 s->current_picture_ptr = pic;
1799 // FIXME use only the vars from current_pic
1800 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1801 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1802 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1803 if (s->picture_structure != PICT_FRAME)
1804 s->current_picture_ptr->f->top_field_first =
1805 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1807 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1808 !s->progressive_sequence;
1809 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1811 s->current_picture_ptr->f->pict_type = s->pict_type;
1812 // if (s->flags && CODEC_FLAG_QSCALE)
1813 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1814 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1816 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1817 s->current_picture_ptr)) < 0)
1820 if (s->pict_type != AV_PICTURE_TYPE_B) {
1821 s->last_picture_ptr = s->next_picture_ptr;
1823 s->next_picture_ptr = s->current_picture_ptr;
1825 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1826 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1827 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1828 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1829 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1830 s->pict_type, s->droppable);
1832 if ((s->last_picture_ptr == NULL ||
1833 s->last_picture_ptr->f->buf[0] == NULL) &&
1834 (s->pict_type != AV_PICTURE_TYPE_I ||
1835 s->picture_structure != PICT_FRAME)) {
1836 int h_chroma_shift, v_chroma_shift;
1837 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1838 &h_chroma_shift, &v_chroma_shift);
1839 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1840 av_log(avctx, AV_LOG_DEBUG,
1841 "allocating dummy last picture for B frame\n");
1842 else if (s->pict_type != AV_PICTURE_TYPE_I)
1843 av_log(avctx, AV_LOG_ERROR,
1844 "warning: first frame is no keyframe\n");
1845 else if (s->picture_structure != PICT_FRAME)
1846 av_log(avctx, AV_LOG_DEBUG,
1847 "allocate dummy last picture for field based first keyframe\n");
1849 /* Allocate a dummy frame */
1850 i = ff_find_unused_picture(s, 0);
1852 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1855 s->last_picture_ptr = &s->picture[i];
1857 s->last_picture_ptr->reference = 3;
1858 s->last_picture_ptr->f->key_frame = 0;
1859 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1861 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1862 s->last_picture_ptr = NULL;
1866 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1867 for(i=0; i<avctx->height; i++)
1868 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1869 0x80, avctx->width);
1870 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1871 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1872 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1873 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1874 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1877 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1878 for(i=0; i<avctx->height; i++)
1879 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1883 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1884 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1886 if ((s->next_picture_ptr == NULL ||
1887 s->next_picture_ptr->f->buf[0] == NULL) &&
1888 s->pict_type == AV_PICTURE_TYPE_B) {
1889 /* Allocate a dummy frame */
1890 i = ff_find_unused_picture(s, 0);
1892 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1895 s->next_picture_ptr = &s->picture[i];
1897 s->next_picture_ptr->reference = 3;
1898 s->next_picture_ptr->f->key_frame = 0;
1899 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1901 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1902 s->next_picture_ptr = NULL;
1905 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1906 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1909 #if 0 // BUFREF-FIXME
1910 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1911 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1913 if (s->last_picture_ptr) {
1914 ff_mpeg_unref_picture(s, &s->last_picture);
1915 if (s->last_picture_ptr->f->buf[0] &&
1916 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1917 s->last_picture_ptr)) < 0)
1920 if (s->next_picture_ptr) {
1921 ff_mpeg_unref_picture(s, &s->next_picture);
1922 if (s->next_picture_ptr->f->buf[0] &&
1923 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1924 s->next_picture_ptr)) < 0)
1928 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1929 s->last_picture_ptr->f->buf[0]));
1931 if (s->picture_structure!= PICT_FRAME) {
1933 for (i = 0; i < 4; i++) {
1934 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1935 s->current_picture.f->data[i] +=
1936 s->current_picture.f->linesize[i];
1938 s->current_picture.f->linesize[i] *= 2;
1939 s->last_picture.f->linesize[i] *= 2;
1940 s->next_picture.f->linesize[i] *= 2;
1944 s->err_recognition = avctx->err_recognition;
1946 /* set dequantizer, we can't do it during init as
1947 * it might change for mpeg4 and we can't do it in the header
1948 * decode as init is not called for mpeg4 there yet */
1949 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1950 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1951 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1952 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1953 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1954 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1956 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1957 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1960 if (s->avctx->debug & FF_DEBUG_NOMC) {
1961 gray_frame(s->current_picture_ptr->f);
1967 /* called after a frame has been decoded. */
1968 void ff_MPV_frame_end(MpegEncContext *s)
1972 if (s->current_picture.reference)
1973 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1977 * Draw a line from (ex, ey) -> (sx, sy).
1978 * @param w width of the image
1979 * @param h height of the image
1980 * @param stride stride/linesize of the image
1981 * @param color color of the arrow
1983 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1984 int w, int h, int stride, int color)
1988 sx = av_clip(sx, 0, w - 1);
1989 sy = av_clip(sy, 0, h - 1);
1990 ex = av_clip(ex, 0, w - 1);
1991 ey = av_clip(ey, 0, h - 1);
1993 buf[sy * stride + sx] += color;
1995 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1997 FFSWAP(int, sx, ex);
1998 FFSWAP(int, sy, ey);
2000 buf += sx + sy * stride;
2002 f = ((ey - sy) << 16) / ex;
2003 for (x = 0; x <= ex; x++) {
2005 fr = (x * f) & 0xFFFF;
2006 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2007 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2011 FFSWAP(int, sx, ex);
2012 FFSWAP(int, sy, ey);
2014 buf += sx + sy * stride;
2017 f = ((ex - sx) << 16) / ey;
2020 for(y= 0; y <= ey; y++){
2022 fr = (y*f) & 0xFFFF;
2023 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2024 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2030 * Draw an arrow from (ex, ey) -> (sx, sy).
2031 * @param w width of the image
2032 * @param h height of the image
2033 * @param stride stride/linesize of the image
2034 * @param color color of the arrow
2036 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2037 int ey, int w, int h, int stride, int color)
2041 sx = av_clip(sx, -100, w + 100);
2042 sy = av_clip(sy, -100, h + 100);
2043 ex = av_clip(ex, -100, w + 100);
2044 ey = av_clip(ey, -100, h + 100);
2049 if (dx * dx + dy * dy > 3 * 3) {
2052 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2054 // FIXME subpixel accuracy
2055 rx = ROUNDED_DIV(rx * 3 << 4, length);
2056 ry = ROUNDED_DIV(ry * 3 << 4, length);
2058 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2059 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2061 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2065 * Print debugging info for the given picture.
2067 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2068 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2070 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2072 if (avctx->hwaccel || !mbtype_table
2073 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2077 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2080 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2081 av_get_picture_type_char(pict->pict_type));
2082 for (y = 0; y < mb_height; y++) {
2083 for (x = 0; x < mb_width; x++) {
2084 if (avctx->debug & FF_DEBUG_SKIP) {
2085 int count = mbskip_table[x + y * mb_stride];
2088 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2090 if (avctx->debug & FF_DEBUG_QP) {
2091 av_log(avctx, AV_LOG_DEBUG, "%2d",
2092 qscale_table[x + y * mb_stride]);
2094 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2095 int mb_type = mbtype_table[x + y * mb_stride];
2096 // Type & MV direction
2097 if (IS_PCM(mb_type))
2098 av_log(avctx, AV_LOG_DEBUG, "P");
2099 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2100 av_log(avctx, AV_LOG_DEBUG, "A");
2101 else if (IS_INTRA4x4(mb_type))
2102 av_log(avctx, AV_LOG_DEBUG, "i");
2103 else if (IS_INTRA16x16(mb_type))
2104 av_log(avctx, AV_LOG_DEBUG, "I");
2105 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2106 av_log(avctx, AV_LOG_DEBUG, "d");
2107 else if (IS_DIRECT(mb_type))
2108 av_log(avctx, AV_LOG_DEBUG, "D");
2109 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2110 av_log(avctx, AV_LOG_DEBUG, "g");
2111 else if (IS_GMC(mb_type))
2112 av_log(avctx, AV_LOG_DEBUG, "G");
2113 else if (IS_SKIP(mb_type))
2114 av_log(avctx, AV_LOG_DEBUG, "S");
2115 else if (!USES_LIST(mb_type, 1))
2116 av_log(avctx, AV_LOG_DEBUG, ">");
2117 else if (!USES_LIST(mb_type, 0))
2118 av_log(avctx, AV_LOG_DEBUG, "<");
2120 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2121 av_log(avctx, AV_LOG_DEBUG, "X");
2125 if (IS_8X8(mb_type))
2126 av_log(avctx, AV_LOG_DEBUG, "+");
2127 else if (IS_16X8(mb_type))
2128 av_log(avctx, AV_LOG_DEBUG, "-");
2129 else if (IS_8X16(mb_type))
2130 av_log(avctx, AV_LOG_DEBUG, "|");
2131 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2132 av_log(avctx, AV_LOG_DEBUG, " ");
2134 av_log(avctx, AV_LOG_DEBUG, "?");
2137 if (IS_INTERLACED(mb_type))
2138 av_log(avctx, AV_LOG_DEBUG, "=");
2140 av_log(avctx, AV_LOG_DEBUG, " ");
2143 av_log(avctx, AV_LOG_DEBUG, "\n");
2147 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2148 (avctx->debug_mv)) {
2149 const int shift = 1 + quarter_sample;
2153 int h_chroma_shift, v_chroma_shift, block_height;
2154 const int width = avctx->width;
2155 const int height = avctx->height;
2156 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2157 const int mv_stride = (mb_width << mv_sample_log2) +
2158 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2160 *low_delay = 0; // needed to see the vectors without trashing the buffers
2162 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2164 av_frame_make_writable(pict);
2166 pict->opaque = NULL;
2167 ptr = pict->data[0];
2168 block_height = 16 >> v_chroma_shift;
2170 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2172 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2173 const int mb_index = mb_x + mb_y * mb_stride;
2174 if ((avctx->debug_mv) && motion_val[0]) {
2176 for (type = 0; type < 3; type++) {
2180 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2181 (pict->pict_type!= AV_PICTURE_TYPE_P))
2186 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2187 (pict->pict_type!= AV_PICTURE_TYPE_B))
2192 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2193 (pict->pict_type!= AV_PICTURE_TYPE_B))
2198 if (!USES_LIST(mbtype_table[mb_index], direction))
2201 if (IS_8X8(mbtype_table[mb_index])) {
2203 for (i = 0; i < 4; i++) {
2204 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2205 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2206 int xy = (mb_x * 2 + (i & 1) +
2207 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2208 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2209 int my = (motion_val[direction][xy][1] >> shift) + sy;
2210 draw_arrow(ptr, sx, sy, mx, my, width,
2211 height, pict->linesize[0], 100);
2213 } else if (IS_16X8(mbtype_table[mb_index])) {
2215 for (i = 0; i < 2; i++) {
2216 int sx = mb_x * 16 + 8;
2217 int sy = mb_y * 16 + 4 + 8 * i;
2218 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2219 int mx = (motion_val[direction][xy][0] >> shift);
2220 int my = (motion_val[direction][xy][1] >> shift);
2222 if (IS_INTERLACED(mbtype_table[mb_index]))
2225 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2226 height, pict->linesize[0], 100);
2228 } else if (IS_8X16(mbtype_table[mb_index])) {
2230 for (i = 0; i < 2; i++) {
2231 int sx = mb_x * 16 + 4 + 8 * i;
2232 int sy = mb_y * 16 + 8;
2233 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2234 int mx = motion_val[direction][xy][0] >> shift;
2235 int my = motion_val[direction][xy][1] >> shift;
2237 if (IS_INTERLACED(mbtype_table[mb_index]))
2240 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2241 height, pict->linesize[0], 100);
2244 int sx= mb_x * 16 + 8;
2245 int sy= mb_y * 16 + 8;
2246 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2247 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2248 int my= (motion_val[direction][xy][1]>>shift) + sy;
2249 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2253 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2254 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2255 0x0101010101010101ULL;
2257 for (y = 0; y < block_height; y++) {
2258 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2259 (block_height * mb_y + y) *
2260 pict->linesize[1]) = c;
2261 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2262 (block_height * mb_y + y) *
2263 pict->linesize[2]) = c;
2266 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2268 int mb_type = mbtype_table[mb_index];
2271 #define COLOR(theta, r) \
2272 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2273 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2277 if (IS_PCM(mb_type)) {
2279 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2280 IS_INTRA16x16(mb_type)) {
2282 } else if (IS_INTRA4x4(mb_type)) {
2284 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2286 } else if (IS_DIRECT(mb_type)) {
2288 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2290 } else if (IS_GMC(mb_type)) {
2292 } else if (IS_SKIP(mb_type)) {
2294 } else if (!USES_LIST(mb_type, 1)) {
2296 } else if (!USES_LIST(mb_type, 0)) {
2299 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2303 u *= 0x0101010101010101ULL;
2304 v *= 0x0101010101010101ULL;
2305 for (y = 0; y < block_height; y++) {
2306 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2307 (block_height * mb_y + y) * pict->linesize[1]) = u;
2308 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2309 (block_height * mb_y + y) * pict->linesize[2]) = v;
2313 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2314 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2315 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2316 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2317 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2319 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2320 for (y = 0; y < 16; y++)
2321 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2322 pict->linesize[0]] ^= 0x80;
2324 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2325 int dm = 1 << (mv_sample_log2 - 2);
2326 for (i = 0; i < 4; i++) {
2327 int sx = mb_x * 16 + 8 * (i & 1);
2328 int sy = mb_y * 16 + 8 * (i >> 1);
2329 int xy = (mb_x * 2 + (i & 1) +
2330 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2332 int32_t *mv = (int32_t *) &motion_val[0][xy];
2333 if (mv[0] != mv[dm] ||
2334 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2335 for (y = 0; y < 8; y++)
2336 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2337 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2338 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2339 pict->linesize[0]) ^= 0x8080808080808080ULL;
2343 if (IS_INTERLACED(mb_type) &&
2344 avctx->codec->id == AV_CODEC_ID_H264) {
2348 mbskip_table[mb_index] = 0;
2354 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2356 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2357 p->qscale_table, p->motion_val, &s->low_delay,
2358 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2361 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2363 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2364 int offset = 2*s->mb_stride + 1;
2366 return AVERROR(ENOMEM);
2367 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2368 ref->size -= offset;
2369 ref->data += offset;
2370 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2373 static inline int hpel_motion_lowres(MpegEncContext *s,
2374 uint8_t *dest, uint8_t *src,
2375 int field_based, int field_select,
2376 int src_x, int src_y,
2377 int width, int height, ptrdiff_t stride,
2378 int h_edge_pos, int v_edge_pos,
2379 int w, int h, h264_chroma_mc_func *pix_op,
2380 int motion_x, int motion_y)
2382 const int lowres = s->avctx->lowres;
2383 const int op_index = FFMIN(lowres, 3);
2384 const int s_mask = (2 << lowres) - 1;
2388 if (s->quarter_sample) {
2393 sx = motion_x & s_mask;
2394 sy = motion_y & s_mask;
2395 src_x += motion_x >> lowres + 1;
2396 src_y += motion_y >> lowres + 1;
2398 src += src_y * stride + src_x;
2400 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2401 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2402 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2403 s->linesize, s->linesize,
2404 w + 1, (h + 1) << field_based,
2405 src_x, src_y << field_based,
2406 h_edge_pos, v_edge_pos);
2407 src = s->edge_emu_buffer;
2411 sx = (sx << 2) >> lowres;
2412 sy = (sy << 2) >> lowres;
2415 pix_op[op_index](dest, src, stride, h, sx, sy);
2419 /* apply one mpeg motion vector to the three components */
2420 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2427 uint8_t **ref_picture,
2428 h264_chroma_mc_func *pix_op,
2429 int motion_x, int motion_y,
2432 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2433 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2434 ptrdiff_t uvlinesize, linesize;
2435 const int lowres = s->avctx->lowres;
2436 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2437 const int block_s = 8>>lowres;
2438 const int s_mask = (2 << lowres) - 1;
2439 const int h_edge_pos = s->h_edge_pos >> lowres;
2440 const int v_edge_pos = s->v_edge_pos >> lowres;
2441 linesize = s->current_picture.f->linesize[0] << field_based;
2442 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2444 // FIXME obviously not perfect but qpel will not work in lowres anyway
2445 if (s->quarter_sample) {
2451 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2454 sx = motion_x & s_mask;
2455 sy = motion_y & s_mask;
2456 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2457 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2459 if (s->out_format == FMT_H263) {
2460 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2461 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2462 uvsrc_x = src_x >> 1;
2463 uvsrc_y = src_y >> 1;
2464 } else if (s->out_format == FMT_H261) {
2465 // even chroma mv's are full pel in H261
2468 uvsx = (2 * mx) & s_mask;
2469 uvsy = (2 * my) & s_mask;
2470 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2471 uvsrc_y = mb_y * block_s + (my >> lowres);
2473 if(s->chroma_y_shift){
2478 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2479 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2481 if(s->chroma_x_shift){
2485 uvsy = motion_y & s_mask;
2487 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2490 uvsx = motion_x & s_mask;
2491 uvsy = motion_y & s_mask;
2498 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2499 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2500 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2502 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2503 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2504 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2505 linesize >> field_based, linesize >> field_based,
2506 17, 17 + field_based,
2507 src_x, src_y << field_based, h_edge_pos,
2509 ptr_y = s->edge_emu_buffer;
2510 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2511 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2512 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2513 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2514 uvlinesize >> field_based, uvlinesize >> field_based,
2516 uvsrc_x, uvsrc_y << field_based,
2517 h_edge_pos >> 1, v_edge_pos >> 1);
2518 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2519 uvlinesize >> field_based,uvlinesize >> field_based,
2521 uvsrc_x, uvsrc_y << field_based,
2522 h_edge_pos >> 1, v_edge_pos >> 1);
2528 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2530 dest_y += s->linesize;
2531 dest_cb += s->uvlinesize;
2532 dest_cr += s->uvlinesize;
2536 ptr_y += s->linesize;
2537 ptr_cb += s->uvlinesize;
2538 ptr_cr += s->uvlinesize;
2541 sx = (sx << 2) >> lowres;
2542 sy = (sy << 2) >> lowres;
2543 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2545 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2546 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2547 uvsx = (uvsx << 2) >> lowres;
2548 uvsy = (uvsy << 2) >> lowres;
2550 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2551 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2554 // FIXME h261 lowres loop filter
2557 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2558 uint8_t *dest_cb, uint8_t *dest_cr,
2559 uint8_t **ref_picture,
2560 h264_chroma_mc_func * pix_op,
2563 const int lowres = s->avctx->lowres;
2564 const int op_index = FFMIN(lowres, 3);
2565 const int block_s = 8 >> lowres;
2566 const int s_mask = (2 << lowres) - 1;
2567 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2568 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2569 int emu = 0, src_x, src_y, sx, sy;
2573 if (s->quarter_sample) {
2578 /* In case of 8X8, we construct a single chroma motion vector
2579 with a special rounding */
2580 mx = ff_h263_round_chroma(mx);
2581 my = ff_h263_round_chroma(my);
2585 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2586 src_y = s->mb_y * block_s + (my >> lowres + 1);
2588 offset = src_y * s->uvlinesize + src_x;
2589 ptr = ref_picture[1] + offset;
2590 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2591 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2592 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2593 s->uvlinesize, s->uvlinesize,
2595 src_x, src_y, h_edge_pos, v_edge_pos);
2596 ptr = s->edge_emu_buffer;
2599 sx = (sx << 2) >> lowres;
2600 sy = (sy << 2) >> lowres;
2601 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2603 ptr = ref_picture[2] + offset;
2605 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2606 s->uvlinesize, s->uvlinesize,
2608 src_x, src_y, h_edge_pos, v_edge_pos);
2609 ptr = s->edge_emu_buffer;
2611 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2615 * motion compensation of a single macroblock
2617 * @param dest_y luma destination pointer
2618 * @param dest_cb chroma cb/u destination pointer
2619 * @param dest_cr chroma cr/v destination pointer
2620 * @param dir direction (0->forward, 1->backward)
2621 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2622 * @param pix_op halfpel motion compensation function (average or put normally)
2623 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2625 static inline void MPV_motion_lowres(MpegEncContext *s,
2626 uint8_t *dest_y, uint8_t *dest_cb,
2628 int dir, uint8_t **ref_picture,
2629 h264_chroma_mc_func *pix_op)
2633 const int lowres = s->avctx->lowres;
2634 const int block_s = 8 >>lowres;
2639 switch (s->mv_type) {
2641 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2643 ref_picture, pix_op,
2644 s->mv[dir][0][0], s->mv[dir][0][1],
2650 for (i = 0; i < 4; i++) {
2651 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2652 s->linesize) * block_s,
2653 ref_picture[0], 0, 0,
2654 (2 * mb_x + (i & 1)) * block_s,
2655 (2 * mb_y + (i >> 1)) * block_s,
2656 s->width, s->height, s->linesize,
2657 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2658 block_s, block_s, pix_op,
2659 s->mv[dir][i][0], s->mv[dir][i][1]);
2661 mx += s->mv[dir][i][0];
2662 my += s->mv[dir][i][1];
2665 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2666 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2670 if (s->picture_structure == PICT_FRAME) {
2672 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2673 1, 0, s->field_select[dir][0],
2674 ref_picture, pix_op,
2675 s->mv[dir][0][0], s->mv[dir][0][1],
2678 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2679 1, 1, s->field_select[dir][1],
2680 ref_picture, pix_op,
2681 s->mv[dir][1][0], s->mv[dir][1][1],
2684 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2685 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2686 ref_picture = s->current_picture_ptr->f->data;
2689 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2690 0, 0, s->field_select[dir][0],
2691 ref_picture, pix_op,
2693 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2697 for (i = 0; i < 2; i++) {
2698 uint8_t **ref2picture;
2700 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2701 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2702 ref2picture = ref_picture;
2704 ref2picture = s->current_picture_ptr->f->data;
2707 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2708 0, 0, s->field_select[dir][i],
2709 ref2picture, pix_op,
2710 s->mv[dir][i][0], s->mv[dir][i][1] +
2711 2 * block_s * i, block_s, mb_y >> 1);
2713 dest_y += 2 * block_s * s->linesize;
2714 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2715 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2719 if (s->picture_structure == PICT_FRAME) {
2720 for (i = 0; i < 2; i++) {
2722 for (j = 0; j < 2; j++) {
2723 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2725 ref_picture, pix_op,
2726 s->mv[dir][2 * i + j][0],
2727 s->mv[dir][2 * i + j][1],
2730 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2733 for (i = 0; i < 2; i++) {
2734 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2735 0, 0, s->picture_structure != i + 1,
2736 ref_picture, pix_op,
2737 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2738 2 * block_s, mb_y >> 1);
2740 // after put we make avg of the same block
2741 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2743 // opposite parity is always in the same
2744 // frame if this is second field
2745 if (!s->first_field) {
2746 ref_picture = s->current_picture_ptr->f->data;
2757 * find the lowest MB row referenced in the MVs
2759 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2761 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2762 int my, off, i, mvs;
2764 if (s->picture_structure != PICT_FRAME || s->mcsel)
2767 switch (s->mv_type) {
2781 for (i = 0; i < mvs; i++) {
2782 my = s->mv[dir][i][1]<<qpel_shift;
2783 my_max = FFMAX(my_max, my);
2784 my_min = FFMIN(my_min, my);
2787 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2789 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2791 return s->mb_height-1;
2794 /* put block[] to dest[] */
2795 static inline void put_dct(MpegEncContext *s,
2796 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2798 s->dct_unquantize_intra(s, block, i, qscale);
2799 s->dsp.idct_put (dest, line_size, block);
2802 /* add block[] to dest[] */
2803 static inline void add_dct(MpegEncContext *s,
2804 int16_t *block, int i, uint8_t *dest, int line_size)
2806 if (s->block_last_index[i] >= 0) {
2807 s->dsp.idct_add (dest, line_size, block);
2811 static inline void add_dequant_dct(MpegEncContext *s,
2812 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2814 if (s->block_last_index[i] >= 0) {
2815 s->dct_unquantize_inter(s, block, i, qscale);
2817 s->dsp.idct_add (dest, line_size, block);
2822 * Clean dc, ac, coded_block for the current non-intra MB.
2824 void ff_clean_intra_table_entries(MpegEncContext *s)
2826 int wrap = s->b8_stride;
2827 int xy = s->block_index[0];
2830 s->dc_val[0][xy + 1 ] =
2831 s->dc_val[0][xy + wrap] =
2832 s->dc_val[0][xy + 1 + wrap] = 1024;
2834 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2835 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2836 if (s->msmpeg4_version>=3) {
2837 s->coded_block[xy ] =
2838 s->coded_block[xy + 1 ] =
2839 s->coded_block[xy + wrap] =
2840 s->coded_block[xy + 1 + wrap] = 0;
2843 wrap = s->mb_stride;
2844 xy = s->mb_x + s->mb_y * wrap;
2846 s->dc_val[2][xy] = 1024;
2848 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2849 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2851 s->mbintra_table[xy]= 0;
2854 /* generic function called after a macroblock has been parsed by the
2855 decoder or after it has been encoded by the encoder.
2857 Important variables used:
2858 s->mb_intra : true if intra macroblock
2859 s->mv_dir : motion vector direction
2860 s->mv_type : motion vector type
2861 s->mv : motion vector
2862 s->interlaced_dct : true if interlaced dct used (mpeg2)
2864 static av_always_inline
2865 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2866 int lowres_flag, int is_mpeg12)
2868 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2871 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2872 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2876 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2877 /* print DCT coefficients */
2879 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2881 for(j=0; j<64; j++){
2882 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2884 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2888 s->current_picture.qscale_table[mb_xy] = s->qscale;
2890 /* update DC predictors for P macroblocks */
2892 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2893 if(s->mbintra_table[mb_xy])
2894 ff_clean_intra_table_entries(s);
2898 s->last_dc[2] = 128 << s->intra_dc_precision;
2901 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2902 s->mbintra_table[mb_xy]=1;
2904 if ( (s->flags&CODEC_FLAG_PSNR)
2905 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
2906 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2907 uint8_t *dest_y, *dest_cb, *dest_cr;
2908 int dct_linesize, dct_offset;
2909 op_pixels_func (*op_pix)[4];
2910 qpel_mc_func (*op_qpix)[16];
2911 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2912 const int uvlinesize = s->current_picture.f->linesize[1];
2913 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2914 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2916 /* avoid copy if macroblock skipped in last frame too */
2917 /* skip only during decoding as we might trash the buffers during encoding a bit */
2919 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2921 if (s->mb_skipped) {
2923 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2925 } else if(!s->current_picture.reference) {
2928 *mbskip_ptr = 0; /* not skipped */
2932 dct_linesize = linesize << s->interlaced_dct;
2933 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2937 dest_cb= s->dest[1];
2938 dest_cr= s->dest[2];
2940 dest_y = s->b_scratchpad;
2941 dest_cb= s->b_scratchpad+16*linesize;
2942 dest_cr= s->b_scratchpad+32*linesize;
2946 /* motion handling */
2947 /* decoding or more than one mb_type (MC was already done otherwise) */
2950 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2951 if (s->mv_dir & MV_DIR_FORWARD) {
2952 ff_thread_await_progress(&s->last_picture_ptr->tf,
2953 ff_MPV_lowest_referenced_row(s, 0),
2956 if (s->mv_dir & MV_DIR_BACKWARD) {
2957 ff_thread_await_progress(&s->next_picture_ptr->tf,
2958 ff_MPV_lowest_referenced_row(s, 1),
2964 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2966 if (s->mv_dir & MV_DIR_FORWARD) {
2967 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
2968 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2970 if (s->mv_dir & MV_DIR_BACKWARD) {
2971 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
2974 op_qpix = s->me.qpel_put;
2975 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2976 op_pix = s->hdsp.put_pixels_tab;
2978 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2980 if (s->mv_dir & MV_DIR_FORWARD) {
2981 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2982 op_pix = s->hdsp.avg_pixels_tab;
2983 op_qpix= s->me.qpel_avg;
2985 if (s->mv_dir & MV_DIR_BACKWARD) {
2986 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2991 /* skip dequant / idct if we are really late ;) */
2992 if(s->avctx->skip_idct){
2993 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2994 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2995 || s->avctx->skip_idct >= AVDISCARD_ALL)
2999 /* add dct residue */
3000 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3001 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3002 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3003 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3004 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3005 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3007 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3008 if (s->chroma_y_shift){
3009 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3010 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3014 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3015 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3016 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3017 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3020 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3021 add_dct(s, block[0], 0, dest_y , dct_linesize);
3022 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3023 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3024 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3026 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3027 if(s->chroma_y_shift){//Chroma420
3028 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3029 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3032 dct_linesize = uvlinesize << s->interlaced_dct;
3033 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3035 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3036 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3037 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3038 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3039 if(!s->chroma_x_shift){//Chroma444
3040 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3041 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3042 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3043 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3048 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3049 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3052 /* dct only in intra block */
3053 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3054 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3055 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3056 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3057 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3059 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3060 if(s->chroma_y_shift){
3061 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3062 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3066 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3067 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3068 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3069 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3073 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
3074 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3075 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
3076 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3078 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3079 if(s->chroma_y_shift){
3080 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
3081 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
3084 dct_linesize = uvlinesize << s->interlaced_dct;
3085 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3087 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
3088 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
3089 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3090 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3091 if(!s->chroma_x_shift){//Chroma444
3092 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3093 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3094 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3095 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3103 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3104 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3105 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3110 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
3112 if(s->out_format == FMT_MPEG1) {
3113 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
3114 else MPV_decode_mb_internal(s, block, 0, 1);
3117 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
3118 else MPV_decode_mb_internal(s, block, 0, 0);
3121 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3123 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3124 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3125 s->first_field, s->low_delay);
3128 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3129 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3130 const int uvlinesize = s->current_picture.f->linesize[1];
3131 const int mb_size= 4 - s->avctx->lowres;
3133 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3134 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3135 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3136 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3137 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3138 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3139 //block_index is not used by mpeg2, so it is not affected by chroma_format
3141 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3142 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3143 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3145 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3147 if(s->picture_structure==PICT_FRAME){
3148 s->dest[0] += s->mb_y * linesize << mb_size;
3149 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3150 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3152 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3153 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3154 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3155 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3161 * Permute an 8x8 block.
3162 * @param block the block which will be permuted according to the given permutation vector
3163 * @param permutation the permutation vector
3164 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3165 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3166 * (inverse) permutated to scantable order!
3168 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3174 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3176 for(i=0; i<=last; i++){
3177 const int j= scantable[i];
3182 for(i=0; i<=last; i++){
3183 const int j= scantable[i];
3184 const int perm_j= permutation[j];
3185 block[perm_j]= temp[j];
3189 void ff_mpeg_flush(AVCodecContext *avctx){
3191 MpegEncContext *s = avctx->priv_data;
3193 if(s==NULL || s->picture==NULL)
3196 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3197 ff_mpeg_unref_picture(s, &s->picture[i]);
3198 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3200 ff_mpeg_unref_picture(s, &s->current_picture);
3201 ff_mpeg_unref_picture(s, &s->last_picture);
3202 ff_mpeg_unref_picture(s, &s->next_picture);
3204 s->mb_x= s->mb_y= 0;
3207 s->parse_context.state= -1;
3208 s->parse_context.frame_start_found= 0;
3209 s->parse_context.overread= 0;
3210 s->parse_context.overread_index= 0;
3211 s->parse_context.index= 0;
3212 s->parse_context.last_index= 0;
3213 s->bitstream_buffer_size=0;
3218 * set qscale and update qscale dependent variables.
3220 void ff_set_qscale(MpegEncContext * s, int qscale)
3224 else if (qscale > 31)
3228 s->chroma_qscale= s->chroma_qscale_table[qscale];
3230 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3231 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3234 void ff_MPV_report_decode_progress(MpegEncContext *s)
3236 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3237 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);