2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
38 #include "h264chroma.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
49 static const uint8_t ff_default_chroma_qscale_table[32] = {
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
55 const uint8_t ff_mpeg1_dc_scale_table[128] = {
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const uint8_t ff_alternate_horizontal_scan[64] = {
111 0, 1, 2, 3, 8, 9, 16, 17,
112 10, 11, 4, 5, 6, 7, 15, 14,
113 13, 12, 19, 18, 24, 25, 32, 33,
114 26, 27, 20, 21, 22, 23, 28, 29,
115 30, 31, 34, 35, 40, 41, 48, 49,
116 42, 43, 36, 37, 38, 39, 44, 45,
117 46, 47, 50, 51, 56, 57, 58, 59,
118 52, 53, 54, 55, 60, 61, 62, 63,
121 const uint8_t ff_alternate_vertical_scan[64] = {
122 0, 8, 16, 24, 1, 9, 2, 10,
123 17, 25, 32, 40, 48, 56, 57, 49,
124 41, 33, 26, 18, 3, 11, 4, 12,
125 19, 27, 34, 42, 50, 58, 35, 43,
126 51, 59, 20, 28, 5, 13, 6, 14,
127 21, 29, 36, 44, 52, 60, 37, 45,
128 53, 61, 22, 30, 7, 15, 23, 31,
129 38, 46, 54, 62, 39, 47, 55, 63,
132 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133 int16_t *block, int n, int qscale)
135 int i, level, nCoeffs;
136 const uint16_t *quant_matrix;
138 nCoeffs= s->block_last_index[n];
140 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
141 /* XXX: only mpeg1 */
142 quant_matrix = s->intra_matrix;
143 for(i=1;i<=nCoeffs;i++) {
144 int j= s->intra_scantable.permutated[i];
149 level = (int)(level * qscale * quant_matrix[j]) >> 3;
150 level = (level - 1) | 1;
153 level = (int)(level * qscale * quant_matrix[j]) >> 3;
154 level = (level - 1) | 1;
161 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
162 int16_t *block, int n, int qscale)
164 int i, level, nCoeffs;
165 const uint16_t *quant_matrix;
167 nCoeffs= s->block_last_index[n];
169 quant_matrix = s->inter_matrix;
170 for(i=0; i<=nCoeffs; i++) {
171 int j= s->intra_scantable.permutated[i];
176 level = (((level << 1) + 1) * qscale *
177 ((int) (quant_matrix[j]))) >> 4;
178 level = (level - 1) | 1;
181 level = (((level << 1) + 1) * qscale *
182 ((int) (quant_matrix[j]))) >> 4;
183 level = (level - 1) | 1;
190 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
191 int16_t *block, int n, int qscale)
193 int i, level, nCoeffs;
194 const uint16_t *quant_matrix;
196 if(s->alternate_scan) nCoeffs= 63;
197 else nCoeffs= s->block_last_index[n];
199 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
200 quant_matrix = s->intra_matrix;
201 for(i=1;i<=nCoeffs;i++) {
202 int j= s->intra_scantable.permutated[i];
207 level = (int)(level * qscale * quant_matrix[j]) >> 3;
210 level = (int)(level * qscale * quant_matrix[j]) >> 3;
217 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
218 int16_t *block, int n, int qscale)
220 int i, level, nCoeffs;
221 const uint16_t *quant_matrix;
224 if(s->alternate_scan) nCoeffs= 63;
225 else nCoeffs= s->block_last_index[n];
227 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
229 quant_matrix = s->intra_matrix;
230 for(i=1;i<=nCoeffs;i++) {
231 int j= s->intra_scantable.permutated[i];
236 level = (int)(level * qscale * quant_matrix[j]) >> 3;
239 level = (int)(level * qscale * quant_matrix[j]) >> 3;
248 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
249 int16_t *block, int n, int qscale)
251 int i, level, nCoeffs;
252 const uint16_t *quant_matrix;
255 if(s->alternate_scan) nCoeffs= 63;
256 else nCoeffs= s->block_last_index[n];
258 quant_matrix = s->inter_matrix;
259 for(i=0; i<=nCoeffs; i++) {
260 int j= s->intra_scantable.permutated[i];
265 level = (((level << 1) + 1) * qscale *
266 ((int) (quant_matrix[j]))) >> 4;
269 level = (((level << 1) + 1) * qscale *
270 ((int) (quant_matrix[j]))) >> 4;
279 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
280 int16_t *block, int n, int qscale)
282 int i, level, qmul, qadd;
285 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
290 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
291 qadd = (qscale - 1) | 1;
298 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
300 for(i=1; i<=nCoeffs; i++) {
304 level = level * qmul - qadd;
306 level = level * qmul + qadd;
313 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
314 int16_t *block, int n, int qscale)
316 int i, level, qmul, qadd;
319 av_assert2(s->block_last_index[n]>=0);
321 qadd = (qscale - 1) | 1;
324 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
326 for(i=0; i<=nCoeffs; i++) {
330 level = level * qmul - qadd;
332 level = level * qmul + qadd;
339 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
341 int mb_x, int mb_y, int mb_intra, int mb_skipped)
343 MpegEncContext *s = opaque;
346 s->mv_type = mv_type;
347 s->mb_intra = mb_intra;
348 s->mb_skipped = mb_skipped;
351 memcpy(s->mv, mv, sizeof(*mv));
353 ff_init_block_index(s);
354 ff_update_block_index(s);
356 s->bdsp.clear_blocks(s->block[0]);
358 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
359 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
360 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
363 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
364 ff_MPV_decode_mb(s, s->block);
367 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
370 memset(dst + h*linesize, 128, 16);
373 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
376 memset(dst + h*linesize, 128, 8);
379 /* init common dct for both encoder and decoder */
380 av_cold int ff_dct_common_init(MpegEncContext *s)
382 ff_blockdsp_init(&s->bdsp, s->avctx);
383 ff_dsputil_init(&s->dsp, s->avctx);
384 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
385 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
386 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
388 if (s->avctx->debug & FF_DEBUG_NOMC) {
390 for (i=0; i<4; i++) {
391 s->hdsp.avg_pixels_tab[0][i] = gray16;
392 s->hdsp.put_pixels_tab[0][i] = gray16;
393 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
395 s->hdsp.avg_pixels_tab[1][i] = gray8;
396 s->hdsp.put_pixels_tab[1][i] = gray8;
397 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
401 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
402 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
403 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
404 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
405 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
406 if (s->flags & CODEC_FLAG_BITEXACT)
407 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
408 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
411 ff_MPV_common_init_axp(s);
413 ff_MPV_common_init_arm(s);
415 ff_MPV_common_init_ppc(s);
417 ff_MPV_common_init_x86(s);
419 /* load & permutate scantables
420 * note: only wmv uses different ones
422 if (s->alternate_scan) {
423 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
424 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
426 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
427 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
429 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
430 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
435 static int frame_size_alloc(MpegEncContext *s, int linesize)
437 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
439 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
443 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
444 return AVERROR_PATCHWELCOME;
447 // edge emu needs blocksize + filter length - 1
448 // (= 17x17 for halfpel / 21x21 for h264)
449 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
450 // at uvlinesize. It supports only YUV420 so 24x24 is enough
451 // linesize * interlaced * MBsize
452 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
455 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
457 s->me.temp = s->me.scratchpad;
458 s->rd_scratchpad = s->me.scratchpad;
459 s->b_scratchpad = s->me.scratchpad;
460 s->obmc_scratchpad = s->me.scratchpad + 16;
464 av_freep(&s->edge_emu_buffer);
465 return AVERROR(ENOMEM);
469 * Allocate a frame buffer
471 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
473 int edges_needed = av_codec_is_encoder(s->avctx->codec);
477 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
478 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
479 s->codec_id != AV_CODEC_ID_MSS2) {
481 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
482 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
485 r = ff_thread_get_buffer(s->avctx, &pic->tf,
486 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
488 pic->f->width = s->avctx->width;
489 pic->f->height = s->avctx->height;
490 pic->f->format = s->avctx->pix_fmt;
491 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
494 if (r < 0 || !pic->f->buf[0]) {
495 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
502 for (i = 0; pic->f->data[i]; i++) {
503 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
504 pic->f->linesize[i] +
505 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
506 pic->f->data[i] += offset;
508 pic->f->width = s->avctx->width;
509 pic->f->height = s->avctx->height;
512 if (s->avctx->hwaccel) {
513 assert(!pic->hwaccel_picture_private);
514 if (s->avctx->hwaccel->frame_priv_data_size) {
515 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
516 if (!pic->hwaccel_priv_buf) {
517 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
520 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
524 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
525 s->uvlinesize != pic->f->linesize[1])) {
526 av_log(s->avctx, AV_LOG_ERROR,
527 "get_buffer() failed (stride changed)\n");
528 ff_mpeg_unref_picture(s, pic);
532 if (pic->f->linesize[1] != pic->f->linesize[2]) {
533 av_log(s->avctx, AV_LOG_ERROR,
534 "get_buffer() failed (uv stride mismatch)\n");
535 ff_mpeg_unref_picture(s, pic);
539 if (!s->edge_emu_buffer &&
540 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
541 av_log(s->avctx, AV_LOG_ERROR,
542 "get_buffer() failed to allocate context scratch buffers.\n");
543 ff_mpeg_unref_picture(s, pic);
550 void ff_free_picture_tables(Picture *pic)
554 pic->alloc_mb_width =
555 pic->alloc_mb_height = 0;
557 av_buffer_unref(&pic->mb_var_buf);
558 av_buffer_unref(&pic->mc_mb_var_buf);
559 av_buffer_unref(&pic->mb_mean_buf);
560 av_buffer_unref(&pic->mbskip_table_buf);
561 av_buffer_unref(&pic->qscale_table_buf);
562 av_buffer_unref(&pic->mb_type_buf);
564 for (i = 0; i < 2; i++) {
565 av_buffer_unref(&pic->motion_val_buf[i]);
566 av_buffer_unref(&pic->ref_index_buf[i]);
570 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
572 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
573 const int mb_array_size = s->mb_stride * s->mb_height;
574 const int b8_array_size = s->b8_stride * s->mb_height * 2;
578 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
579 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
580 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
582 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
583 return AVERROR(ENOMEM);
586 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
587 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
588 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
589 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
590 return AVERROR(ENOMEM);
593 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
594 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
595 int ref_index_size = 4 * mb_array_size;
597 for (i = 0; mv_size && i < 2; i++) {
598 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
599 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
600 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
601 return AVERROR(ENOMEM);
605 pic->alloc_mb_width = s->mb_width;
606 pic->alloc_mb_height = s->mb_height;
611 static int make_tables_writable(Picture *pic)
614 #define MAKE_WRITABLE(table) \
617 (ret = av_buffer_make_writable(&pic->table)) < 0)\
621 MAKE_WRITABLE(mb_var_buf);
622 MAKE_WRITABLE(mc_mb_var_buf);
623 MAKE_WRITABLE(mb_mean_buf);
624 MAKE_WRITABLE(mbskip_table_buf);
625 MAKE_WRITABLE(qscale_table_buf);
626 MAKE_WRITABLE(mb_type_buf);
628 for (i = 0; i < 2; i++) {
629 MAKE_WRITABLE(motion_val_buf[i]);
630 MAKE_WRITABLE(ref_index_buf[i]);
637 * Allocate a Picture.
638 * The pixels are allocated/set by calling get_buffer() if shared = 0
640 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
644 if (pic->qscale_table_buf)
645 if ( pic->alloc_mb_width != s->mb_width
646 || pic->alloc_mb_height != s->mb_height)
647 ff_free_picture_tables(pic);
650 av_assert0(pic->f->data[0]);
653 av_assert0(!pic->f->buf[0]);
655 if (alloc_frame_buffer(s, pic) < 0)
658 s->linesize = pic->f->linesize[0];
659 s->uvlinesize = pic->f->linesize[1];
662 if (!pic->qscale_table_buf)
663 ret = alloc_picture_tables(s, pic);
665 ret = make_tables_writable(pic);
670 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
671 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
672 pic->mb_mean = pic->mb_mean_buf->data;
675 pic->mbskip_table = pic->mbskip_table_buf->data;
676 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
677 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
679 if (pic->motion_val_buf[0]) {
680 for (i = 0; i < 2; i++) {
681 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
682 pic->ref_index[i] = pic->ref_index_buf[i]->data;
688 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
689 ff_mpeg_unref_picture(s, pic);
690 ff_free_picture_tables(pic);
691 return AVERROR(ENOMEM);
695 * Deallocate a picture.
697 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
699 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
702 /* WM Image / Screen codecs allocate internal buffers with different
703 * dimensions / colorspaces; ignore user-defined callbacks for these. */
704 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
705 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
706 s->codec_id != AV_CODEC_ID_MSS2)
707 ff_thread_release_buffer(s->avctx, &pic->tf);
709 av_frame_unref(pic->f);
711 av_buffer_unref(&pic->hwaccel_priv_buf);
713 if (pic->needs_realloc)
714 ff_free_picture_tables(pic);
716 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
719 static int update_picture_tables(Picture *dst, Picture *src)
723 #define UPDATE_TABLE(table)\
726 (!dst->table || dst->table->buffer != src->table->buffer)) {\
727 av_buffer_unref(&dst->table);\
728 dst->table = av_buffer_ref(src->table);\
730 ff_free_picture_tables(dst);\
731 return AVERROR(ENOMEM);\
736 UPDATE_TABLE(mb_var_buf);
737 UPDATE_TABLE(mc_mb_var_buf);
738 UPDATE_TABLE(mb_mean_buf);
739 UPDATE_TABLE(mbskip_table_buf);
740 UPDATE_TABLE(qscale_table_buf);
741 UPDATE_TABLE(mb_type_buf);
742 for (i = 0; i < 2; i++) {
743 UPDATE_TABLE(motion_val_buf[i]);
744 UPDATE_TABLE(ref_index_buf[i]);
747 dst->mb_var = src->mb_var;
748 dst->mc_mb_var = src->mc_mb_var;
749 dst->mb_mean = src->mb_mean;
750 dst->mbskip_table = src->mbskip_table;
751 dst->qscale_table = src->qscale_table;
752 dst->mb_type = src->mb_type;
753 for (i = 0; i < 2; i++) {
754 dst->motion_val[i] = src->motion_val[i];
755 dst->ref_index[i] = src->ref_index[i];
758 dst->alloc_mb_width = src->alloc_mb_width;
759 dst->alloc_mb_height = src->alloc_mb_height;
764 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
768 av_assert0(!dst->f->buf[0]);
769 av_assert0(src->f->buf[0]);
773 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
777 ret = update_picture_tables(dst, src);
781 if (src->hwaccel_picture_private) {
782 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
783 if (!dst->hwaccel_priv_buf)
785 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
788 dst->field_picture = src->field_picture;
789 dst->mb_var_sum = src->mb_var_sum;
790 dst->mc_mb_var_sum = src->mc_mb_var_sum;
791 dst->b_frame_score = src->b_frame_score;
792 dst->needs_realloc = src->needs_realloc;
793 dst->reference = src->reference;
794 dst->shared = src->shared;
798 ff_mpeg_unref_picture(s, dst);
802 static void exchange_uv(MpegEncContext *s)
807 s->pblocks[4] = s->pblocks[5];
811 static int init_duplicate_context(MpegEncContext *s)
813 int y_size = s->b8_stride * (2 * s->mb_height + 1);
814 int c_size = s->mb_stride * (s->mb_height + 1);
815 int yc_size = y_size + 2 * c_size;
818 if (s->mb_height & 1)
819 yc_size += 2*s->b8_stride + 2*s->mb_stride;
826 s->obmc_scratchpad = NULL;
829 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
830 ME_MAP_SIZE * sizeof(uint32_t), fail)
831 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
832 ME_MAP_SIZE * sizeof(uint32_t), fail)
833 if (s->avctx->noise_reduction) {
834 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
835 2 * 64 * sizeof(int), fail)
838 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
839 s->block = s->blocks[0];
841 for (i = 0; i < 12; i++) {
842 s->pblocks[i] = &s->block[i];
844 if (s->avctx->codec_tag == AV_RL32("VCR2"))
847 if (s->out_format == FMT_H263) {
849 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
850 yc_size * sizeof(int16_t) * 16, fail);
851 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
852 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
853 s->ac_val[2] = s->ac_val[1] + c_size;
858 return -1; // free() through ff_MPV_common_end()
861 static void free_duplicate_context(MpegEncContext *s)
866 av_freep(&s->edge_emu_buffer);
867 av_freep(&s->me.scratchpad);
871 s->obmc_scratchpad = NULL;
873 av_freep(&s->dct_error_sum);
874 av_freep(&s->me.map);
875 av_freep(&s->me.score_map);
876 av_freep(&s->blocks);
877 av_freep(&s->ac_val_base);
881 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
883 #define COPY(a) bak->a = src->a
884 COPY(edge_emu_buffer);
889 COPY(obmc_scratchpad);
896 COPY(me.map_generation);
908 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
912 // FIXME copy only needed parts
914 backup_duplicate_context(&bak, dst);
915 memcpy(dst, src, sizeof(MpegEncContext));
916 backup_duplicate_context(dst, &bak);
917 for (i = 0; i < 12; i++) {
918 dst->pblocks[i] = &dst->block[i];
920 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
922 if (!dst->edge_emu_buffer &&
923 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
924 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
925 "scratch buffers.\n");
928 // STOP_TIMER("update_duplicate_context")
929 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
933 int ff_mpeg_update_thread_context(AVCodecContext *dst,
934 const AVCodecContext *src)
937 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
944 // FIXME can parameters change on I-frames?
945 // in that case dst may need a reinit
946 if (!s->context_initialized) {
947 memcpy(s, s1, sizeof(MpegEncContext));
950 s->bitstream_buffer = NULL;
951 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
953 if (s1->context_initialized){
954 // s->picture_range_start += MAX_PICTURE_COUNT;
955 // s->picture_range_end += MAX_PICTURE_COUNT;
956 if((ret = ff_MPV_common_init(s)) < 0){
957 memset(s, 0, sizeof(MpegEncContext));
964 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
965 s->context_reinit = 0;
966 s->height = s1->height;
967 s->width = s1->width;
968 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
972 s->avctx->coded_height = s1->avctx->coded_height;
973 s->avctx->coded_width = s1->avctx->coded_width;
974 s->avctx->width = s1->avctx->width;
975 s->avctx->height = s1->avctx->height;
977 s->coded_picture_number = s1->coded_picture_number;
978 s->picture_number = s1->picture_number;
980 av_assert0(!s->picture || s->picture != s1->picture);
982 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
983 ff_mpeg_unref_picture(s, &s->picture[i]);
984 if (s1->picture[i].f->buf[0] &&
985 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
989 #define UPDATE_PICTURE(pic)\
991 ff_mpeg_unref_picture(s, &s->pic);\
992 if (s1->pic.f && s1->pic.f->buf[0])\
993 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
995 ret = update_picture_tables(&s->pic, &s1->pic);\
1000 UPDATE_PICTURE(current_picture);
1001 UPDATE_PICTURE(last_picture);
1002 UPDATE_PICTURE(next_picture);
1004 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1005 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1006 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1008 // Error/bug resilience
1009 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1010 s->workaround_bugs = s1->workaround_bugs;
1011 s->padding_bug_score = s1->padding_bug_score;
1013 // MPEG4 timing info
1014 memcpy(&s->last_time_base, &s1->last_time_base,
1015 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1016 (char *) &s1->last_time_base);
1019 s->max_b_frames = s1->max_b_frames;
1020 s->low_delay = s1->low_delay;
1021 s->droppable = s1->droppable;
1023 // DivX handling (doesn't work)
1024 s->divx_packed = s1->divx_packed;
1026 if (s1->bitstream_buffer) {
1027 if (s1->bitstream_buffer_size +
1028 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1029 av_fast_malloc(&s->bitstream_buffer,
1030 &s->allocated_bitstream_buffer_size,
1031 s1->allocated_bitstream_buffer_size);
1032 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1033 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1034 s1->bitstream_buffer_size);
1035 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1036 FF_INPUT_BUFFER_PADDING_SIZE);
1039 // linesize dependend scratch buffer allocation
1040 if (!s->edge_emu_buffer)
1042 if (frame_size_alloc(s, s1->linesize) < 0) {
1043 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1044 "scratch buffers.\n");
1045 return AVERROR(ENOMEM);
1048 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1049 "be allocated due to unknown size.\n");
1052 // MPEG2/interlacing info
1053 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1054 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1056 if (!s1->first_field) {
1057 s->last_pict_type = s1->pict_type;
1058 if (s1->current_picture_ptr)
1059 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1066 * Set the given MpegEncContext to common defaults
1067 * (same for encoding and decoding).
1068 * The changed fields will not depend upon the
1069 * prior state of the MpegEncContext.
1071 void ff_MPV_common_defaults(MpegEncContext *s)
1073 s->y_dc_scale_table =
1074 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1075 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1076 s->progressive_frame = 1;
1077 s->progressive_sequence = 1;
1078 s->picture_structure = PICT_FRAME;
1080 s->coded_picture_number = 0;
1081 s->picture_number = 0;
1086 s->slice_context_count = 1;
1090 * Set the given MpegEncContext to defaults for decoding.
1091 * the changed fields will not depend upon
1092 * the prior state of the MpegEncContext.
1094 void ff_MPV_decode_defaults(MpegEncContext *s)
1096 ff_MPV_common_defaults(s);
1099 static int init_er(MpegEncContext *s)
1101 ERContext *er = &s->er;
1102 int mb_array_size = s->mb_height * s->mb_stride;
1105 er->avctx = s->avctx;
1108 er->mb_index2xy = s->mb_index2xy;
1109 er->mb_num = s->mb_num;
1110 er->mb_width = s->mb_width;
1111 er->mb_height = s->mb_height;
1112 er->mb_stride = s->mb_stride;
1113 er->b8_stride = s->b8_stride;
1115 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1116 er->error_status_table = av_mallocz(mb_array_size);
1117 if (!er->er_temp_buffer || !er->error_status_table)
1120 er->mbskip_table = s->mbskip_table;
1121 er->mbintra_table = s->mbintra_table;
1123 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1124 er->dc_val[i] = s->dc_val[i];
1126 er->decode_mb = mpeg_er_decode_mb;
1131 av_freep(&er->er_temp_buffer);
1132 av_freep(&er->error_status_table);
1133 return AVERROR(ENOMEM);
1137 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1139 static int init_context_frame(MpegEncContext *s)
1141 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1143 s->mb_width = (s->width + 15) / 16;
1144 s->mb_stride = s->mb_width + 1;
1145 s->b8_stride = s->mb_width * 2 + 1;
1146 mb_array_size = s->mb_height * s->mb_stride;
1147 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1149 /* set default edge pos, will be overridden
1150 * in decode_header if needed */
1151 s->h_edge_pos = s->mb_width * 16;
1152 s->v_edge_pos = s->mb_height * 16;
1154 s->mb_num = s->mb_width * s->mb_height;
1159 s->block_wrap[3] = s->b8_stride;
1161 s->block_wrap[5] = s->mb_stride;
1163 y_size = s->b8_stride * (2 * s->mb_height + 1);
1164 c_size = s->mb_stride * (s->mb_height + 1);
1165 yc_size = y_size + 2 * c_size;
1167 if (s->mb_height & 1)
1168 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1170 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1171 for (y = 0; y < s->mb_height; y++)
1172 for (x = 0; x < s->mb_width; x++)
1173 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1175 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1178 /* Allocate MV tables */
1179 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1180 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1181 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1182 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1183 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1184 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1185 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1186 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1187 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1188 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1189 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1190 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1192 /* Allocate MB type table */
1193 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1195 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1197 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1198 mb_array_size * sizeof(float), fail);
1199 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1200 mb_array_size * sizeof(float), fail);
1204 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1205 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1206 /* interlaced direct mode decoding tables */
1207 for (i = 0; i < 2; i++) {
1209 for (j = 0; j < 2; j++) {
1210 for (k = 0; k < 2; k++) {
1211 FF_ALLOCZ_OR_GOTO(s->avctx,
1212 s->b_field_mv_table_base[i][j][k],
1213 mv_table_size * 2 * sizeof(int16_t),
1215 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1218 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1219 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1220 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1222 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1225 if (s->out_format == FMT_H263) {
1227 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1228 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1230 /* cbp, ac_pred, pred_dir */
1231 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1232 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1235 if (s->h263_pred || s->h263_plus || !s->encoding) {
1237 // MN: we need these for error resilience of intra-frames
1238 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1239 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1240 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1241 s->dc_val[2] = s->dc_val[1] + c_size;
1242 for (i = 0; i < yc_size; i++)
1243 s->dc_val_base[i] = 1024;
1246 /* which mb is a intra block */
1247 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1248 memset(s->mbintra_table, 1, mb_array_size);
1250 /* init macroblock skip table */
1251 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1252 // Note the + 1 is for a quicker mpeg4 slice_end detection
1256 return AVERROR(ENOMEM);
1260 * init common structure for both encoder and decoder.
1261 * this assumes that some variables like width/height are already set
1263 av_cold int ff_MPV_common_init(MpegEncContext *s)
1266 int nb_slices = (HAVE_THREADS &&
1267 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1268 s->avctx->thread_count : 1;
1270 if (s->encoding && s->avctx->slices)
1271 nb_slices = s->avctx->slices;
1273 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1274 s->mb_height = (s->height + 31) / 32 * 2;
1276 s->mb_height = (s->height + 15) / 16;
1278 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1279 av_log(s->avctx, AV_LOG_ERROR,
1280 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1284 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1287 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1289 max_slices = MAX_THREADS;
1290 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1291 " reducing to %d\n", nb_slices, max_slices);
1292 nb_slices = max_slices;
1295 if ((s->width || s->height) &&
1296 av_image_check_size(s->width, s->height, 0, s->avctx))
1299 ff_dct_common_init(s);
1301 s->flags = s->avctx->flags;
1302 s->flags2 = s->avctx->flags2;
1304 /* set chroma shifts */
1305 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1307 &s->chroma_y_shift);
1309 /* convert fourcc to upper case */
1310 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1312 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1314 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1315 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1316 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1317 s->picture[i].f = av_frame_alloc();
1318 if (!s->picture[i].f)
1321 memset(&s->next_picture, 0, sizeof(s->next_picture));
1322 memset(&s->last_picture, 0, sizeof(s->last_picture));
1323 memset(&s->current_picture, 0, sizeof(s->current_picture));
1324 memset(&s->new_picture, 0, sizeof(s->new_picture));
1325 s->next_picture.f = av_frame_alloc();
1326 if (!s->next_picture.f)
1328 s->last_picture.f = av_frame_alloc();
1329 if (!s->last_picture.f)
1331 s->current_picture.f = av_frame_alloc();
1332 if (!s->current_picture.f)
1334 s->new_picture.f = av_frame_alloc();
1335 if (!s->new_picture.f)
1338 if (init_context_frame(s))
1341 s->parse_context.state = -1;
1343 s->context_initialized = 1;
1344 s->thread_context[0] = s;
1346 // if (s->width && s->height) {
1347 if (nb_slices > 1) {
1348 for (i = 1; i < nb_slices; i++) {
1349 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1350 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1353 for (i = 0; i < nb_slices; i++) {
1354 if (init_duplicate_context(s->thread_context[i]) < 0)
1356 s->thread_context[i]->start_mb_y =
1357 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1358 s->thread_context[i]->end_mb_y =
1359 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1362 if (init_duplicate_context(s) < 0)
1365 s->end_mb_y = s->mb_height;
1367 s->slice_context_count = nb_slices;
1372 ff_MPV_common_end(s);
1377 * Frees and resets MpegEncContext fields depending on the resolution.
1378 * Is used during resolution changes to avoid a full reinitialization of the
1381 static int free_context_frame(MpegEncContext *s)
1385 av_freep(&s->mb_type);
1386 av_freep(&s->p_mv_table_base);
1387 av_freep(&s->b_forw_mv_table_base);
1388 av_freep(&s->b_back_mv_table_base);
1389 av_freep(&s->b_bidir_forw_mv_table_base);
1390 av_freep(&s->b_bidir_back_mv_table_base);
1391 av_freep(&s->b_direct_mv_table_base);
1392 s->p_mv_table = NULL;
1393 s->b_forw_mv_table = NULL;
1394 s->b_back_mv_table = NULL;
1395 s->b_bidir_forw_mv_table = NULL;
1396 s->b_bidir_back_mv_table = NULL;
1397 s->b_direct_mv_table = NULL;
1398 for (i = 0; i < 2; i++) {
1399 for (j = 0; j < 2; j++) {
1400 for (k = 0; k < 2; k++) {
1401 av_freep(&s->b_field_mv_table_base[i][j][k]);
1402 s->b_field_mv_table[i][j][k] = NULL;
1404 av_freep(&s->b_field_select_table[i][j]);
1405 av_freep(&s->p_field_mv_table_base[i][j]);
1406 s->p_field_mv_table[i][j] = NULL;
1408 av_freep(&s->p_field_select_table[i]);
1411 av_freep(&s->dc_val_base);
1412 av_freep(&s->coded_block_base);
1413 av_freep(&s->mbintra_table);
1414 av_freep(&s->cbp_table);
1415 av_freep(&s->pred_dir_table);
1417 av_freep(&s->mbskip_table);
1419 av_freep(&s->er.error_status_table);
1420 av_freep(&s->er.er_temp_buffer);
1421 av_freep(&s->mb_index2xy);
1422 av_freep(&s->lambda_table);
1424 av_freep(&s->cplx_tab);
1425 av_freep(&s->bits_tab);
1427 s->linesize = s->uvlinesize = 0;
1432 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1436 if (s->slice_context_count > 1) {
1437 for (i = 0; i < s->slice_context_count; i++) {
1438 free_duplicate_context(s->thread_context[i]);
1440 for (i = 1; i < s->slice_context_count; i++) {
1441 av_freep(&s->thread_context[i]);
1444 free_duplicate_context(s);
1446 if ((err = free_context_frame(s)) < 0)
1450 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1451 s->picture[i].needs_realloc = 1;
1454 s->last_picture_ptr =
1455 s->next_picture_ptr =
1456 s->current_picture_ptr = NULL;
1459 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1460 s->mb_height = (s->height + 31) / 32 * 2;
1462 s->mb_height = (s->height + 15) / 16;
1464 if ((s->width || s->height) &&
1465 av_image_check_size(s->width, s->height, 0, s->avctx))
1466 return AVERROR_INVALIDDATA;
1468 if ((err = init_context_frame(s)))
1471 s->thread_context[0] = s;
1473 if (s->width && s->height) {
1474 int nb_slices = s->slice_context_count;
1475 if (nb_slices > 1) {
1476 for (i = 1; i < nb_slices; i++) {
1477 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1478 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1481 for (i = 0; i < nb_slices; i++) {
1482 if (init_duplicate_context(s->thread_context[i]) < 0)
1484 s->thread_context[i]->start_mb_y =
1485 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1486 s->thread_context[i]->end_mb_y =
1487 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1490 err = init_duplicate_context(s);
1494 s->end_mb_y = s->mb_height;
1496 s->slice_context_count = nb_slices;
1501 ff_MPV_common_end(s);
1505 /* init common structure for both encoder and decoder */
1506 void ff_MPV_common_end(MpegEncContext *s)
1510 if (s->slice_context_count > 1) {
1511 for (i = 0; i < s->slice_context_count; i++) {
1512 free_duplicate_context(s->thread_context[i]);
1514 for (i = 1; i < s->slice_context_count; i++) {
1515 av_freep(&s->thread_context[i]);
1517 s->slice_context_count = 1;
1518 } else free_duplicate_context(s);
1520 av_freep(&s->parse_context.buffer);
1521 s->parse_context.buffer_size = 0;
1523 av_freep(&s->bitstream_buffer);
1524 s->allocated_bitstream_buffer_size = 0;
1527 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1528 ff_free_picture_tables(&s->picture[i]);
1529 ff_mpeg_unref_picture(s, &s->picture[i]);
1530 av_frame_free(&s->picture[i].f);
1533 av_freep(&s->picture);
1534 ff_free_picture_tables(&s->last_picture);
1535 ff_mpeg_unref_picture(s, &s->last_picture);
1536 av_frame_free(&s->last_picture.f);
1537 ff_free_picture_tables(&s->current_picture);
1538 ff_mpeg_unref_picture(s, &s->current_picture);
1539 av_frame_free(&s->current_picture.f);
1540 ff_free_picture_tables(&s->next_picture);
1541 ff_mpeg_unref_picture(s, &s->next_picture);
1542 av_frame_free(&s->next_picture.f);
1543 ff_free_picture_tables(&s->new_picture);
1544 ff_mpeg_unref_picture(s, &s->new_picture);
1545 av_frame_free(&s->new_picture.f);
1547 free_context_frame(s);
1549 s->context_initialized = 0;
1550 s->last_picture_ptr =
1551 s->next_picture_ptr =
1552 s->current_picture_ptr = NULL;
1553 s->linesize = s->uvlinesize = 0;
1556 av_cold void ff_init_rl(RLTable *rl,
1557 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1559 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1560 uint8_t index_run[MAX_RUN + 1];
1561 int last, run, level, start, end, i;
1563 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1564 if (static_store && rl->max_level[0])
1567 /* compute max_level[], max_run[] and index_run[] */
1568 for (last = 0; last < 2; last++) {
1577 memset(max_level, 0, MAX_RUN + 1);
1578 memset(max_run, 0, MAX_LEVEL + 1);
1579 memset(index_run, rl->n, MAX_RUN + 1);
1580 for (i = start; i < end; i++) {
1581 run = rl->table_run[i];
1582 level = rl->table_level[i];
1583 if (index_run[run] == rl->n)
1585 if (level > max_level[run])
1586 max_level[run] = level;
1587 if (run > max_run[level])
1588 max_run[level] = run;
1591 rl->max_level[last] = static_store[last];
1593 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1594 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1596 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1598 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1599 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1601 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1603 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1604 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1608 av_cold void ff_init_vlc_rl(RLTable *rl)
1612 for (q = 0; q < 32; q++) {
1614 int qadd = (q - 1) | 1;
1620 for (i = 0; i < rl->vlc.table_size; i++) {
1621 int code = rl->vlc.table[i][0];
1622 int len = rl->vlc.table[i][1];
1625 if (len == 0) { // illegal code
1628 } else if (len < 0) { // more bits needed
1632 if (code == rl->n) { // esc
1636 run = rl->table_run[code] + 1;
1637 level = rl->table_level[code] * qmul + qadd;
1638 if (code >= rl->last) run += 192;
1641 rl->rl_vlc[q][i].len = len;
1642 rl->rl_vlc[q][i].level = level;
1643 rl->rl_vlc[q][i].run = run;
1648 static void release_unused_pictures(MpegEncContext *s)
1652 /* release non reference frames */
1653 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1654 if (!s->picture[i].reference)
1655 ff_mpeg_unref_picture(s, &s->picture[i]);
1659 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1661 if (pic == s->last_picture_ptr)
1663 if (pic->f->buf[0] == NULL)
1665 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1670 static int find_unused_picture(MpegEncContext *s, int shared)
1675 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1676 if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1680 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1681 if (pic_is_unused(s, &s->picture[i]))
1686 av_log(s->avctx, AV_LOG_FATAL,
1687 "Internal error, picture buffer overflow\n");
1688 /* We could return -1, but the codec would crash trying to draw into a
1689 * non-existing frame anyway. This is safer than waiting for a random crash.
1690 * Also the return of this is never useful, an encoder must only allocate
1691 * as much as allowed in the specification. This has no relationship to how
1692 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1693 * enough for such valid streams).
1694 * Plus, a decoder has to check stream validity and remove frames if too
1695 * many reference frames are around. Waiting for "OOM" is not correct at
1696 * all. Similarly, missing reference frames have to be replaced by
1697 * interpolated/MC frames, anything else is a bug in the codec ...
1703 int ff_find_unused_picture(MpegEncContext *s, int shared)
1705 int ret = find_unused_picture(s, shared);
1707 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1708 if (s->picture[ret].needs_realloc) {
1709 s->picture[ret].needs_realloc = 0;
1710 ff_free_picture_tables(&s->picture[ret]);
1711 ff_mpeg_unref_picture(s, &s->picture[ret]);
1717 static void gray_frame(AVFrame *frame)
1719 int i, h_chroma_shift, v_chroma_shift;
1721 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1723 for(i=0; i<frame->height; i++)
1724 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1725 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1726 memset(frame->data[1] + frame->linesize[1]*i,
1727 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1728 memset(frame->data[2] + frame->linesize[2]*i,
1729 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1734 * generic function called after decoding
1735 * the header and before a frame is decoded.
1737 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1743 if (!ff_thread_can_start_frame(avctx)) {
1744 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1748 /* mark & release old frames */
1749 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1750 s->last_picture_ptr != s->next_picture_ptr &&
1751 s->last_picture_ptr->f->buf[0]) {
1752 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1755 /* release forgotten pictures */
1756 /* if (mpeg124/h263) */
1757 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1758 if (&s->picture[i] != s->last_picture_ptr &&
1759 &s->picture[i] != s->next_picture_ptr &&
1760 s->picture[i].reference && !s->picture[i].needs_realloc) {
1761 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1762 av_log(avctx, AV_LOG_ERROR,
1763 "releasing zombie picture\n");
1764 ff_mpeg_unref_picture(s, &s->picture[i]);
1768 ff_mpeg_unref_picture(s, &s->current_picture);
1770 release_unused_pictures(s);
1772 if (s->current_picture_ptr &&
1773 s->current_picture_ptr->f->buf[0] == NULL) {
1774 // we already have a unused image
1775 // (maybe it was set before reading the header)
1776 pic = s->current_picture_ptr;
1778 i = ff_find_unused_picture(s, 0);
1780 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1783 pic = &s->picture[i];
1787 if (!s->droppable) {
1788 if (s->pict_type != AV_PICTURE_TYPE_B)
1792 pic->f->coded_picture_number = s->coded_picture_number++;
1794 if (ff_alloc_picture(s, pic, 0) < 0)
1797 s->current_picture_ptr = pic;
1798 // FIXME use only the vars from current_pic
1799 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1800 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1801 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1802 if (s->picture_structure != PICT_FRAME)
1803 s->current_picture_ptr->f->top_field_first =
1804 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1806 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1807 !s->progressive_sequence;
1808 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1810 s->current_picture_ptr->f->pict_type = s->pict_type;
1811 // if (s->flags && CODEC_FLAG_QSCALE)
1812 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1813 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1815 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1816 s->current_picture_ptr)) < 0)
1819 if (s->pict_type != AV_PICTURE_TYPE_B) {
1820 s->last_picture_ptr = s->next_picture_ptr;
1822 s->next_picture_ptr = s->current_picture_ptr;
1824 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1825 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1826 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1827 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1828 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1829 s->pict_type, s->droppable);
1831 if ((s->last_picture_ptr == NULL ||
1832 s->last_picture_ptr->f->buf[0] == NULL) &&
1833 (s->pict_type != AV_PICTURE_TYPE_I ||
1834 s->picture_structure != PICT_FRAME)) {
1835 int h_chroma_shift, v_chroma_shift;
1836 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1837 &h_chroma_shift, &v_chroma_shift);
1838 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1839 av_log(avctx, AV_LOG_DEBUG,
1840 "allocating dummy last picture for B frame\n");
1841 else if (s->pict_type != AV_PICTURE_TYPE_I)
1842 av_log(avctx, AV_LOG_ERROR,
1843 "warning: first frame is no keyframe\n");
1844 else if (s->picture_structure != PICT_FRAME)
1845 av_log(avctx, AV_LOG_DEBUG,
1846 "allocate dummy last picture for field based first keyframe\n");
1848 /* Allocate a dummy frame */
1849 i = ff_find_unused_picture(s, 0);
1851 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1854 s->last_picture_ptr = &s->picture[i];
1856 s->last_picture_ptr->reference = 3;
1857 s->last_picture_ptr->f->key_frame = 0;
1858 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1860 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1861 s->last_picture_ptr = NULL;
1865 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1866 for(i=0; i<avctx->height; i++)
1867 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1868 0x80, avctx->width);
1869 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1870 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1871 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1872 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1873 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1876 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1877 for(i=0; i<avctx->height; i++)
1878 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1882 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1883 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1885 if ((s->next_picture_ptr == NULL ||
1886 s->next_picture_ptr->f->buf[0] == NULL) &&
1887 s->pict_type == AV_PICTURE_TYPE_B) {
1888 /* Allocate a dummy frame */
1889 i = ff_find_unused_picture(s, 0);
1891 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1894 s->next_picture_ptr = &s->picture[i];
1896 s->next_picture_ptr->reference = 3;
1897 s->next_picture_ptr->f->key_frame = 0;
1898 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1900 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1901 s->next_picture_ptr = NULL;
1904 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1905 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1908 #if 0 // BUFREF-FIXME
1909 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1910 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1912 if (s->last_picture_ptr) {
1913 ff_mpeg_unref_picture(s, &s->last_picture);
1914 if (s->last_picture_ptr->f->buf[0] &&
1915 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1916 s->last_picture_ptr)) < 0)
1919 if (s->next_picture_ptr) {
1920 ff_mpeg_unref_picture(s, &s->next_picture);
1921 if (s->next_picture_ptr->f->buf[0] &&
1922 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1923 s->next_picture_ptr)) < 0)
1927 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1928 s->last_picture_ptr->f->buf[0]));
1930 if (s->picture_structure!= PICT_FRAME) {
1932 for (i = 0; i < 4; i++) {
1933 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1934 s->current_picture.f->data[i] +=
1935 s->current_picture.f->linesize[i];
1937 s->current_picture.f->linesize[i] *= 2;
1938 s->last_picture.f->linesize[i] *= 2;
1939 s->next_picture.f->linesize[i] *= 2;
1943 s->err_recognition = avctx->err_recognition;
1945 /* set dequantizer, we can't do it during init as
1946 * it might change for mpeg4 and we can't do it in the header
1947 * decode as init is not called for mpeg4 there yet */
1948 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1949 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1950 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1951 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1952 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1953 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1955 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1956 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1959 if (s->avctx->debug & FF_DEBUG_NOMC) {
1960 gray_frame(s->current_picture_ptr->f);
1966 /* called after a frame has been decoded. */
1967 void ff_MPV_frame_end(MpegEncContext *s)
1971 if (s->current_picture.reference)
1972 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1976 * Draw a line from (ex, ey) -> (sx, sy).
1977 * @param w width of the image
1978 * @param h height of the image
1979 * @param stride stride/linesize of the image
1980 * @param color color of the arrow
1982 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1983 int w, int h, int stride, int color)
1987 sx = av_clip(sx, 0, w - 1);
1988 sy = av_clip(sy, 0, h - 1);
1989 ex = av_clip(ex, 0, w - 1);
1990 ey = av_clip(ey, 0, h - 1);
1992 buf[sy * stride + sx] += color;
1994 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1996 FFSWAP(int, sx, ex);
1997 FFSWAP(int, sy, ey);
1999 buf += sx + sy * stride;
2001 f = ((ey - sy) << 16) / ex;
2002 for (x = 0; x <= ex; x++) {
2004 fr = (x * f) & 0xFFFF;
2005 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2006 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2010 FFSWAP(int, sx, ex);
2011 FFSWAP(int, sy, ey);
2013 buf += sx + sy * stride;
2016 f = ((ex - sx) << 16) / ey;
2019 for(y= 0; y <= ey; y++){
2021 fr = (y*f) & 0xFFFF;
2022 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2023 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2029 * Draw an arrow from (ex, ey) -> (sx, sy).
2030 * @param w width of the image
2031 * @param h height of the image
2032 * @param stride stride/linesize of the image
2033 * @param color color of the arrow
2035 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2036 int ey, int w, int h, int stride, int color)
2040 sx = av_clip(sx, -100, w + 100);
2041 sy = av_clip(sy, -100, h + 100);
2042 ex = av_clip(ex, -100, w + 100);
2043 ey = av_clip(ey, -100, h + 100);
2048 if (dx * dx + dy * dy > 3 * 3) {
2051 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2053 // FIXME subpixel accuracy
2054 rx = ROUNDED_DIV(rx * 3 << 4, length);
2055 ry = ROUNDED_DIV(ry * 3 << 4, length);
2057 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2058 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2060 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2064 * Print debugging info for the given picture.
2066 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2067 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2069 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2071 if (avctx->hwaccel || !mbtype_table
2072 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2076 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2079 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2080 av_get_picture_type_char(pict->pict_type));
2081 for (y = 0; y < mb_height; y++) {
2082 for (x = 0; x < mb_width; x++) {
2083 if (avctx->debug & FF_DEBUG_SKIP) {
2084 int count = mbskip_table[x + y * mb_stride];
2087 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2089 if (avctx->debug & FF_DEBUG_QP) {
2090 av_log(avctx, AV_LOG_DEBUG, "%2d",
2091 qscale_table[x + y * mb_stride]);
2093 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2094 int mb_type = mbtype_table[x + y * mb_stride];
2095 // Type & MV direction
2096 if (IS_PCM(mb_type))
2097 av_log(avctx, AV_LOG_DEBUG, "P");
2098 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2099 av_log(avctx, AV_LOG_DEBUG, "A");
2100 else if (IS_INTRA4x4(mb_type))
2101 av_log(avctx, AV_LOG_DEBUG, "i");
2102 else if (IS_INTRA16x16(mb_type))
2103 av_log(avctx, AV_LOG_DEBUG, "I");
2104 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2105 av_log(avctx, AV_LOG_DEBUG, "d");
2106 else if (IS_DIRECT(mb_type))
2107 av_log(avctx, AV_LOG_DEBUG, "D");
2108 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2109 av_log(avctx, AV_LOG_DEBUG, "g");
2110 else if (IS_GMC(mb_type))
2111 av_log(avctx, AV_LOG_DEBUG, "G");
2112 else if (IS_SKIP(mb_type))
2113 av_log(avctx, AV_LOG_DEBUG, "S");
2114 else if (!USES_LIST(mb_type, 1))
2115 av_log(avctx, AV_LOG_DEBUG, ">");
2116 else if (!USES_LIST(mb_type, 0))
2117 av_log(avctx, AV_LOG_DEBUG, "<");
2119 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2120 av_log(avctx, AV_LOG_DEBUG, "X");
2124 if (IS_8X8(mb_type))
2125 av_log(avctx, AV_LOG_DEBUG, "+");
2126 else if (IS_16X8(mb_type))
2127 av_log(avctx, AV_LOG_DEBUG, "-");
2128 else if (IS_8X16(mb_type))
2129 av_log(avctx, AV_LOG_DEBUG, "|");
2130 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2131 av_log(avctx, AV_LOG_DEBUG, " ");
2133 av_log(avctx, AV_LOG_DEBUG, "?");
2136 if (IS_INTERLACED(mb_type))
2137 av_log(avctx, AV_LOG_DEBUG, "=");
2139 av_log(avctx, AV_LOG_DEBUG, " ");
2142 av_log(avctx, AV_LOG_DEBUG, "\n");
2146 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2147 (avctx->debug_mv)) {
2148 const int shift = 1 + quarter_sample;
2152 int h_chroma_shift, v_chroma_shift, block_height;
2153 const int width = avctx->width;
2154 const int height = avctx->height;
2155 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2156 const int mv_stride = (mb_width << mv_sample_log2) +
2157 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2159 *low_delay = 0; // needed to see the vectors without trashing the buffers
2161 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2163 av_frame_make_writable(pict);
2165 pict->opaque = NULL;
2166 ptr = pict->data[0];
2167 block_height = 16 >> v_chroma_shift;
2169 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2171 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2172 const int mb_index = mb_x + mb_y * mb_stride;
2173 if ((avctx->debug_mv) && motion_val[0]) {
2175 for (type = 0; type < 3; type++) {
2179 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2180 (pict->pict_type!= AV_PICTURE_TYPE_P))
2185 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2186 (pict->pict_type!= AV_PICTURE_TYPE_B))
2191 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2192 (pict->pict_type!= AV_PICTURE_TYPE_B))
2197 if (!USES_LIST(mbtype_table[mb_index], direction))
2200 if (IS_8X8(mbtype_table[mb_index])) {
2202 for (i = 0; i < 4; i++) {
2203 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2204 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2205 int xy = (mb_x * 2 + (i & 1) +
2206 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2207 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2208 int my = (motion_val[direction][xy][1] >> shift) + sy;
2209 draw_arrow(ptr, sx, sy, mx, my, width,
2210 height, pict->linesize[0], 100);
2212 } else if (IS_16X8(mbtype_table[mb_index])) {
2214 for (i = 0; i < 2; i++) {
2215 int sx = mb_x * 16 + 8;
2216 int sy = mb_y * 16 + 4 + 8 * i;
2217 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2218 int mx = (motion_val[direction][xy][0] >> shift);
2219 int my = (motion_val[direction][xy][1] >> shift);
2221 if (IS_INTERLACED(mbtype_table[mb_index]))
2224 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2225 height, pict->linesize[0], 100);
2227 } else if (IS_8X16(mbtype_table[mb_index])) {
2229 for (i = 0; i < 2; i++) {
2230 int sx = mb_x * 16 + 4 + 8 * i;
2231 int sy = mb_y * 16 + 8;
2232 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2233 int mx = motion_val[direction][xy][0] >> shift;
2234 int my = motion_val[direction][xy][1] >> shift;
2236 if (IS_INTERLACED(mbtype_table[mb_index]))
2239 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2240 height, pict->linesize[0], 100);
2243 int sx= mb_x * 16 + 8;
2244 int sy= mb_y * 16 + 8;
2245 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2246 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2247 int my= (motion_val[direction][xy][1]>>shift) + sy;
2248 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2252 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2253 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2254 0x0101010101010101ULL;
2256 for (y = 0; y < block_height; y++) {
2257 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2258 (block_height * mb_y + y) *
2259 pict->linesize[1]) = c;
2260 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2261 (block_height * mb_y + y) *
2262 pict->linesize[2]) = c;
2265 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2267 int mb_type = mbtype_table[mb_index];
2270 #define COLOR(theta, r) \
2271 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2272 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2276 if (IS_PCM(mb_type)) {
2278 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2279 IS_INTRA16x16(mb_type)) {
2281 } else if (IS_INTRA4x4(mb_type)) {
2283 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2285 } else if (IS_DIRECT(mb_type)) {
2287 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2289 } else if (IS_GMC(mb_type)) {
2291 } else if (IS_SKIP(mb_type)) {
2293 } else if (!USES_LIST(mb_type, 1)) {
2295 } else if (!USES_LIST(mb_type, 0)) {
2298 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2302 u *= 0x0101010101010101ULL;
2303 v *= 0x0101010101010101ULL;
2304 for (y = 0; y < block_height; y++) {
2305 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2306 (block_height * mb_y + y) * pict->linesize[1]) = u;
2307 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2308 (block_height * mb_y + y) * pict->linesize[2]) = v;
2312 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2313 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2314 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2315 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2316 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2318 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2319 for (y = 0; y < 16; y++)
2320 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2321 pict->linesize[0]] ^= 0x80;
2323 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2324 int dm = 1 << (mv_sample_log2 - 2);
2325 for (i = 0; i < 4; i++) {
2326 int sx = mb_x * 16 + 8 * (i & 1);
2327 int sy = mb_y * 16 + 8 * (i >> 1);
2328 int xy = (mb_x * 2 + (i & 1) +
2329 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2331 int32_t *mv = (int32_t *) &motion_val[0][xy];
2332 if (mv[0] != mv[dm] ||
2333 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2334 for (y = 0; y < 8; y++)
2335 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2336 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2337 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2338 pict->linesize[0]) ^= 0x8080808080808080ULL;
2342 if (IS_INTERLACED(mb_type) &&
2343 avctx->codec->id == AV_CODEC_ID_H264) {
2347 mbskip_table[mb_index] = 0;
2353 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2355 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2356 p->qscale_table, p->motion_val, &s->low_delay,
2357 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2360 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2362 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2363 int offset = 2*s->mb_stride + 1;
2365 return AVERROR(ENOMEM);
2366 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2367 ref->size -= offset;
2368 ref->data += offset;
2369 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2372 static inline int hpel_motion_lowres(MpegEncContext *s,
2373 uint8_t *dest, uint8_t *src,
2374 int field_based, int field_select,
2375 int src_x, int src_y,
2376 int width, int height, ptrdiff_t stride,
2377 int h_edge_pos, int v_edge_pos,
2378 int w, int h, h264_chroma_mc_func *pix_op,
2379 int motion_x, int motion_y)
2381 const int lowres = s->avctx->lowres;
2382 const int op_index = FFMIN(lowres, 3);
2383 const int s_mask = (2 << lowres) - 1;
2387 if (s->quarter_sample) {
2392 sx = motion_x & s_mask;
2393 sy = motion_y & s_mask;
2394 src_x += motion_x >> lowres + 1;
2395 src_y += motion_y >> lowres + 1;
2397 src += src_y * stride + src_x;
2399 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2400 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2401 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2402 s->linesize, s->linesize,
2403 w + 1, (h + 1) << field_based,
2404 src_x, src_y << field_based,
2405 h_edge_pos, v_edge_pos);
2406 src = s->edge_emu_buffer;
2410 sx = (sx << 2) >> lowres;
2411 sy = (sy << 2) >> lowres;
2414 pix_op[op_index](dest, src, stride, h, sx, sy);
2418 /* apply one mpeg motion vector to the three components */
2419 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2426 uint8_t **ref_picture,
2427 h264_chroma_mc_func *pix_op,
2428 int motion_x, int motion_y,
2431 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2432 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2433 ptrdiff_t uvlinesize, linesize;
2434 const int lowres = s->avctx->lowres;
2435 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2436 const int block_s = 8>>lowres;
2437 const int s_mask = (2 << lowres) - 1;
2438 const int h_edge_pos = s->h_edge_pos >> lowres;
2439 const int v_edge_pos = s->v_edge_pos >> lowres;
2440 linesize = s->current_picture.f->linesize[0] << field_based;
2441 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2443 // FIXME obviously not perfect but qpel will not work in lowres anyway
2444 if (s->quarter_sample) {
2450 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2453 sx = motion_x & s_mask;
2454 sy = motion_y & s_mask;
2455 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2456 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2458 if (s->out_format == FMT_H263) {
2459 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2460 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2461 uvsrc_x = src_x >> 1;
2462 uvsrc_y = src_y >> 1;
2463 } else if (s->out_format == FMT_H261) {
2464 // even chroma mv's are full pel in H261
2467 uvsx = (2 * mx) & s_mask;
2468 uvsy = (2 * my) & s_mask;
2469 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2470 uvsrc_y = mb_y * block_s + (my >> lowres);
2472 if(s->chroma_y_shift){
2477 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2478 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2480 if(s->chroma_x_shift){
2484 uvsy = motion_y & s_mask;
2486 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2489 uvsx = motion_x & s_mask;
2490 uvsy = motion_y & s_mask;
2497 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2498 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2499 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2501 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2502 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2503 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2504 linesize >> field_based, linesize >> field_based,
2505 17, 17 + field_based,
2506 src_x, src_y << field_based, h_edge_pos,
2508 ptr_y = s->edge_emu_buffer;
2509 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2510 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2511 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2512 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2513 uvlinesize >> field_based, uvlinesize >> field_based,
2515 uvsrc_x, uvsrc_y << field_based,
2516 h_edge_pos >> 1, v_edge_pos >> 1);
2517 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2518 uvlinesize >> field_based,uvlinesize >> field_based,
2520 uvsrc_x, uvsrc_y << field_based,
2521 h_edge_pos >> 1, v_edge_pos >> 1);
2527 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2529 dest_y += s->linesize;
2530 dest_cb += s->uvlinesize;
2531 dest_cr += s->uvlinesize;
2535 ptr_y += s->linesize;
2536 ptr_cb += s->uvlinesize;
2537 ptr_cr += s->uvlinesize;
2540 sx = (sx << 2) >> lowres;
2541 sy = (sy << 2) >> lowres;
2542 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2544 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2545 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2546 uvsx = (uvsx << 2) >> lowres;
2547 uvsy = (uvsy << 2) >> lowres;
2549 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2550 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2553 // FIXME h261 lowres loop filter
2556 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2557 uint8_t *dest_cb, uint8_t *dest_cr,
2558 uint8_t **ref_picture,
2559 h264_chroma_mc_func * pix_op,
2562 const int lowres = s->avctx->lowres;
2563 const int op_index = FFMIN(lowres, 3);
2564 const int block_s = 8 >> lowres;
2565 const int s_mask = (2 << lowres) - 1;
2566 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2567 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2568 int emu = 0, src_x, src_y, sx, sy;
2572 if (s->quarter_sample) {
2577 /* In case of 8X8, we construct a single chroma motion vector
2578 with a special rounding */
2579 mx = ff_h263_round_chroma(mx);
2580 my = ff_h263_round_chroma(my);
2584 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2585 src_y = s->mb_y * block_s + (my >> lowres + 1);
2587 offset = src_y * s->uvlinesize + src_x;
2588 ptr = ref_picture[1] + offset;
2589 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2590 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2591 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2592 s->uvlinesize, s->uvlinesize,
2594 src_x, src_y, h_edge_pos, v_edge_pos);
2595 ptr = s->edge_emu_buffer;
2598 sx = (sx << 2) >> lowres;
2599 sy = (sy << 2) >> lowres;
2600 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2602 ptr = ref_picture[2] + offset;
2604 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2605 s->uvlinesize, s->uvlinesize,
2607 src_x, src_y, h_edge_pos, v_edge_pos);
2608 ptr = s->edge_emu_buffer;
2610 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2614 * motion compensation of a single macroblock
2616 * @param dest_y luma destination pointer
2617 * @param dest_cb chroma cb/u destination pointer
2618 * @param dest_cr chroma cr/v destination pointer
2619 * @param dir direction (0->forward, 1->backward)
2620 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2621 * @param pix_op halfpel motion compensation function (average or put normally)
2622 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2624 static inline void MPV_motion_lowres(MpegEncContext *s,
2625 uint8_t *dest_y, uint8_t *dest_cb,
2627 int dir, uint8_t **ref_picture,
2628 h264_chroma_mc_func *pix_op)
2632 const int lowres = s->avctx->lowres;
2633 const int block_s = 8 >>lowres;
2638 switch (s->mv_type) {
2640 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2642 ref_picture, pix_op,
2643 s->mv[dir][0][0], s->mv[dir][0][1],
2649 for (i = 0; i < 4; i++) {
2650 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2651 s->linesize) * block_s,
2652 ref_picture[0], 0, 0,
2653 (2 * mb_x + (i & 1)) * block_s,
2654 (2 * mb_y + (i >> 1)) * block_s,
2655 s->width, s->height, s->linesize,
2656 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2657 block_s, block_s, pix_op,
2658 s->mv[dir][i][0], s->mv[dir][i][1]);
2660 mx += s->mv[dir][i][0];
2661 my += s->mv[dir][i][1];
2664 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2665 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2669 if (s->picture_structure == PICT_FRAME) {
2671 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2672 1, 0, s->field_select[dir][0],
2673 ref_picture, pix_op,
2674 s->mv[dir][0][0], s->mv[dir][0][1],
2677 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2678 1, 1, s->field_select[dir][1],
2679 ref_picture, pix_op,
2680 s->mv[dir][1][0], s->mv[dir][1][1],
2683 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2684 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2685 ref_picture = s->current_picture_ptr->f->data;
2688 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2689 0, 0, s->field_select[dir][0],
2690 ref_picture, pix_op,
2692 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2696 for (i = 0; i < 2; i++) {
2697 uint8_t **ref2picture;
2699 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2700 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2701 ref2picture = ref_picture;
2703 ref2picture = s->current_picture_ptr->f->data;
2706 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2707 0, 0, s->field_select[dir][i],
2708 ref2picture, pix_op,
2709 s->mv[dir][i][0], s->mv[dir][i][1] +
2710 2 * block_s * i, block_s, mb_y >> 1);
2712 dest_y += 2 * block_s * s->linesize;
2713 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2714 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2718 if (s->picture_structure == PICT_FRAME) {
2719 for (i = 0; i < 2; i++) {
2721 for (j = 0; j < 2; j++) {
2722 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2724 ref_picture, pix_op,
2725 s->mv[dir][2 * i + j][0],
2726 s->mv[dir][2 * i + j][1],
2729 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2732 for (i = 0; i < 2; i++) {
2733 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2734 0, 0, s->picture_structure != i + 1,
2735 ref_picture, pix_op,
2736 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2737 2 * block_s, mb_y >> 1);
2739 // after put we make avg of the same block
2740 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2742 // opposite parity is always in the same
2743 // frame if this is second field
2744 if (!s->first_field) {
2745 ref_picture = s->current_picture_ptr->f->data;
2756 * find the lowest MB row referenced in the MVs
2758 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2760 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2761 int my, off, i, mvs;
2763 if (s->picture_structure != PICT_FRAME || s->mcsel)
2766 switch (s->mv_type) {
2780 for (i = 0; i < mvs; i++) {
2781 my = s->mv[dir][i][1]<<qpel_shift;
2782 my_max = FFMAX(my_max, my);
2783 my_min = FFMIN(my_min, my);
2786 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2788 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2790 return s->mb_height-1;
2793 /* put block[] to dest[] */
2794 static inline void put_dct(MpegEncContext *s,
2795 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2797 s->dct_unquantize_intra(s, block, i, qscale);
2798 s->dsp.idct_put (dest, line_size, block);
2801 /* add block[] to dest[] */
2802 static inline void add_dct(MpegEncContext *s,
2803 int16_t *block, int i, uint8_t *dest, int line_size)
2805 if (s->block_last_index[i] >= 0) {
2806 s->dsp.idct_add (dest, line_size, block);
2810 static inline void add_dequant_dct(MpegEncContext *s,
2811 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2813 if (s->block_last_index[i] >= 0) {
2814 s->dct_unquantize_inter(s, block, i, qscale);
2816 s->dsp.idct_add (dest, line_size, block);
2821 * Clean dc, ac, coded_block for the current non-intra MB.
2823 void ff_clean_intra_table_entries(MpegEncContext *s)
2825 int wrap = s->b8_stride;
2826 int xy = s->block_index[0];
2829 s->dc_val[0][xy + 1 ] =
2830 s->dc_val[0][xy + wrap] =
2831 s->dc_val[0][xy + 1 + wrap] = 1024;
2833 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2834 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2835 if (s->msmpeg4_version>=3) {
2836 s->coded_block[xy ] =
2837 s->coded_block[xy + 1 ] =
2838 s->coded_block[xy + wrap] =
2839 s->coded_block[xy + 1 + wrap] = 0;
2842 wrap = s->mb_stride;
2843 xy = s->mb_x + s->mb_y * wrap;
2845 s->dc_val[2][xy] = 1024;
2847 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2848 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2850 s->mbintra_table[xy]= 0;
2853 /* generic function called after a macroblock has been parsed by the
2854 decoder or after it has been encoded by the encoder.
2856 Important variables used:
2857 s->mb_intra : true if intra macroblock
2858 s->mv_dir : motion vector direction
2859 s->mv_type : motion vector type
2860 s->mv : motion vector
2861 s->interlaced_dct : true if interlaced dct used (mpeg2)
2863 static av_always_inline
2864 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2865 int lowres_flag, int is_mpeg12)
2867 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2870 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2871 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2875 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2876 /* print DCT coefficients */
2878 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2880 for(j=0; j<64; j++){
2881 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2883 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2887 s->current_picture.qscale_table[mb_xy] = s->qscale;
2889 /* update DC predictors for P macroblocks */
2891 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2892 if(s->mbintra_table[mb_xy])
2893 ff_clean_intra_table_entries(s);
2897 s->last_dc[2] = 128 << s->intra_dc_precision;
2900 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2901 s->mbintra_table[mb_xy]=1;
2903 if ( (s->flags&CODEC_FLAG_PSNR)
2904 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
2905 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2906 uint8_t *dest_y, *dest_cb, *dest_cr;
2907 int dct_linesize, dct_offset;
2908 op_pixels_func (*op_pix)[4];
2909 qpel_mc_func (*op_qpix)[16];
2910 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2911 const int uvlinesize = s->current_picture.f->linesize[1];
2912 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2913 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2915 /* avoid copy if macroblock skipped in last frame too */
2916 /* skip only during decoding as we might trash the buffers during encoding a bit */
2918 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2920 if (s->mb_skipped) {
2922 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2924 } else if(!s->current_picture.reference) {
2927 *mbskip_ptr = 0; /* not skipped */
2931 dct_linesize = linesize << s->interlaced_dct;
2932 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2936 dest_cb= s->dest[1];
2937 dest_cr= s->dest[2];
2939 dest_y = s->b_scratchpad;
2940 dest_cb= s->b_scratchpad+16*linesize;
2941 dest_cr= s->b_scratchpad+32*linesize;
2945 /* motion handling */
2946 /* decoding or more than one mb_type (MC was already done otherwise) */
2949 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2950 if (s->mv_dir & MV_DIR_FORWARD) {
2951 ff_thread_await_progress(&s->last_picture_ptr->tf,
2952 ff_MPV_lowest_referenced_row(s, 0),
2955 if (s->mv_dir & MV_DIR_BACKWARD) {
2956 ff_thread_await_progress(&s->next_picture_ptr->tf,
2957 ff_MPV_lowest_referenced_row(s, 1),
2963 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2965 if (s->mv_dir & MV_DIR_FORWARD) {
2966 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
2967 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2969 if (s->mv_dir & MV_DIR_BACKWARD) {
2970 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
2973 op_qpix = s->me.qpel_put;
2974 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2975 op_pix = s->hdsp.put_pixels_tab;
2977 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2979 if (s->mv_dir & MV_DIR_FORWARD) {
2980 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2981 op_pix = s->hdsp.avg_pixels_tab;
2982 op_qpix= s->me.qpel_avg;
2984 if (s->mv_dir & MV_DIR_BACKWARD) {
2985 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2990 /* skip dequant / idct if we are really late ;) */
2991 if(s->avctx->skip_idct){
2992 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2993 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2994 || s->avctx->skip_idct >= AVDISCARD_ALL)
2998 /* add dct residue */
2999 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3000 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3001 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3002 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3003 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3004 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3006 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3007 if (s->chroma_y_shift){
3008 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3009 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3013 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3014 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3015 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3016 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3019 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3020 add_dct(s, block[0], 0, dest_y , dct_linesize);
3021 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3022 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3023 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3025 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3026 if(s->chroma_y_shift){//Chroma420
3027 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3028 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3031 dct_linesize = uvlinesize << s->interlaced_dct;
3032 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3034 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3035 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3036 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3037 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3038 if(!s->chroma_x_shift){//Chroma444
3039 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3040 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3041 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3042 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3047 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3048 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3051 /* dct only in intra block */
3052 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3053 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3054 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3055 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3056 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3058 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3059 if(s->chroma_y_shift){
3060 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3061 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3065 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3066 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3067 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3068 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3072 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
3073 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3074 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
3075 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3077 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3078 if(s->chroma_y_shift){
3079 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
3080 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
3083 dct_linesize = uvlinesize << s->interlaced_dct;
3084 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3086 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
3087 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
3088 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3089 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3090 if(!s->chroma_x_shift){//Chroma444
3091 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3092 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3093 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3094 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3102 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3103 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3104 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3109 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
3111 if(s->out_format == FMT_MPEG1) {
3112 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
3113 else MPV_decode_mb_internal(s, block, 0, 1);
3116 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
3117 else MPV_decode_mb_internal(s, block, 0, 0);
3120 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3122 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3123 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3124 s->first_field, s->low_delay);
3127 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3128 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3129 const int uvlinesize = s->current_picture.f->linesize[1];
3130 const int mb_size= 4 - s->avctx->lowres;
3132 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3133 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3134 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3135 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3136 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3137 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3138 //block_index is not used by mpeg2, so it is not affected by chroma_format
3140 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3141 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3142 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3144 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3146 if(s->picture_structure==PICT_FRAME){
3147 s->dest[0] += s->mb_y * linesize << mb_size;
3148 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3149 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3151 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3152 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3153 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3154 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3160 * Permute an 8x8 block.
3161 * @param block the block which will be permuted according to the given permutation vector
3162 * @param permutation the permutation vector
3163 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3164 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3165 * (inverse) permutated to scantable order!
3167 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3173 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3175 for(i=0; i<=last; i++){
3176 const int j= scantable[i];
3181 for(i=0; i<=last; i++){
3182 const int j= scantable[i];
3183 const int perm_j= permutation[j];
3184 block[perm_j]= temp[j];
3188 void ff_mpeg_flush(AVCodecContext *avctx){
3190 MpegEncContext *s = avctx->priv_data;
3192 if(s==NULL || s->picture==NULL)
3195 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3196 ff_mpeg_unref_picture(s, &s->picture[i]);
3197 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3199 ff_mpeg_unref_picture(s, &s->current_picture);
3200 ff_mpeg_unref_picture(s, &s->last_picture);
3201 ff_mpeg_unref_picture(s, &s->next_picture);
3203 s->mb_x= s->mb_y= 0;
3206 s->parse_context.state= -1;
3207 s->parse_context.frame_start_found= 0;
3208 s->parse_context.overread= 0;
3209 s->parse_context.overread_index= 0;
3210 s->parse_context.index= 0;
3211 s->parse_context.last_index= 0;
3212 s->bitstream_buffer_size=0;
3217 * set qscale and update qscale dependent variables.
3219 void ff_set_qscale(MpegEncContext * s, int qscale)
3223 else if (qscale > 31)
3227 s->chroma_qscale= s->chroma_qscale_table[qscale];
3229 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3230 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3233 void ff_MPV_report_decode_progress(MpegEncContext *s)
3235 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3236 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3239 #if CONFIG_ERROR_RESILIENCE
3240 void ff_mpeg_set_erpic(ERPicture *dst, Picture *src)
3244 memset(dst, 0, sizeof(*dst));
3254 for (i = 0; i < 2; i++) {
3255 dst->motion_val[i] = src->motion_val[i];
3256 dst->ref_index[i] = src->ref_index[i];
3259 dst->mb_type = src->mb_type;
3260 dst->field_picture = src->field_picture;
3263 void ff_mpeg_er_frame_start(MpegEncContext *s)
3265 ERContext *er = &s->er;
3267 ff_mpeg_set_erpic(&er->cur_pic, s->current_picture_ptr);
3268 ff_mpeg_set_erpic(&er->next_pic, s->next_picture_ptr);
3269 ff_mpeg_set_erpic(&er->last_pic, s->last_picture_ptr);
3271 er->pp_time = s->pp_time;
3272 er->pb_time = s->pb_time;
3273 er->quarter_sample = s->quarter_sample;
3274 er->partitioned_frame = s->partitioned_frame;
3276 ff_er_frame_start(er);
3278 #endif /* CONFIG_ERROR_RESILIENCE */