2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
37 #include "h264chroma.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
48 static const uint8_t ff_default_chroma_qscale_table[32] = {
49 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
50 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
51 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
54 const uint8_t ff_mpeg1_dc_scale_table[128] = {
55 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
56 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
66 static const uint8_t mpeg2_dc_scale_table1[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
78 static const uint8_t mpeg2_dc_scale_table2[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
90 static const uint8_t mpeg2_dc_scale_table3[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
103 ff_mpeg1_dc_scale_table,
104 mpeg2_dc_scale_table1,
105 mpeg2_dc_scale_table2,
106 mpeg2_dc_scale_table3,
109 const uint8_t ff_alternate_horizontal_scan[64] = {
110 0, 1, 2, 3, 8, 9, 16, 17,
111 10, 11, 4, 5, 6, 7, 15, 14,
112 13, 12, 19, 18, 24, 25, 32, 33,
113 26, 27, 20, 21, 22, 23, 28, 29,
114 30, 31, 34, 35, 40, 41, 48, 49,
115 42, 43, 36, 37, 38, 39, 44, 45,
116 46, 47, 50, 51, 56, 57, 58, 59,
117 52, 53, 54, 55, 60, 61, 62, 63,
120 const uint8_t ff_alternate_vertical_scan[64] = {
121 0, 8, 16, 24, 1, 9, 2, 10,
122 17, 25, 32, 40, 48, 56, 57, 49,
123 41, 33, 26, 18, 3, 11, 4, 12,
124 19, 27, 34, 42, 50, 58, 35, 43,
125 51, 59, 20, 28, 5, 13, 6, 14,
126 21, 29, 36, 44, 52, 60, 37, 45,
127 53, 61, 22, 30, 7, 15, 23, 31,
128 38, 46, 54, 62, 39, 47, 55, 63,
131 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
132 int16_t *block, int n, int qscale)
134 int i, level, nCoeffs;
135 const uint16_t *quant_matrix;
137 nCoeffs= s->block_last_index[n];
139 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
140 /* XXX: only mpeg1 */
141 quant_matrix = s->intra_matrix;
142 for(i=1;i<=nCoeffs;i++) {
143 int j= s->intra_scantable.permutated[i];
148 level = (int)(level * qscale * quant_matrix[j]) >> 3;
149 level = (level - 1) | 1;
152 level = (int)(level * qscale * quant_matrix[j]) >> 3;
153 level = (level - 1) | 1;
160 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
161 int16_t *block, int n, int qscale)
163 int i, level, nCoeffs;
164 const uint16_t *quant_matrix;
166 nCoeffs= s->block_last_index[n];
168 quant_matrix = s->inter_matrix;
169 for(i=0; i<=nCoeffs; i++) {
170 int j= s->intra_scantable.permutated[i];
175 level = (((level << 1) + 1) * qscale *
176 ((int) (quant_matrix[j]))) >> 4;
177 level = (level - 1) | 1;
180 level = (((level << 1) + 1) * qscale *
181 ((int) (quant_matrix[j]))) >> 4;
182 level = (level - 1) | 1;
189 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
190 int16_t *block, int n, int qscale)
192 int i, level, nCoeffs;
193 const uint16_t *quant_matrix;
195 if(s->alternate_scan) nCoeffs= 63;
196 else nCoeffs= s->block_last_index[n];
198 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
199 quant_matrix = s->intra_matrix;
200 for(i=1;i<=nCoeffs;i++) {
201 int j= s->intra_scantable.permutated[i];
206 level = (int)(level * qscale * quant_matrix[j]) >> 3;
209 level = (int)(level * qscale * quant_matrix[j]) >> 3;
216 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
217 int16_t *block, int n, int qscale)
219 int i, level, nCoeffs;
220 const uint16_t *quant_matrix;
223 if(s->alternate_scan) nCoeffs= 63;
224 else nCoeffs= s->block_last_index[n];
226 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
228 quant_matrix = s->intra_matrix;
229 for(i=1;i<=nCoeffs;i++) {
230 int j= s->intra_scantable.permutated[i];
235 level = (int)(level * qscale * quant_matrix[j]) >> 3;
238 level = (int)(level * qscale * quant_matrix[j]) >> 3;
247 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
248 int16_t *block, int n, int qscale)
250 int i, level, nCoeffs;
251 const uint16_t *quant_matrix;
254 if(s->alternate_scan) nCoeffs= 63;
255 else nCoeffs= s->block_last_index[n];
257 quant_matrix = s->inter_matrix;
258 for(i=0; i<=nCoeffs; i++) {
259 int j= s->intra_scantable.permutated[i];
264 level = (((level << 1) + 1) * qscale *
265 ((int) (quant_matrix[j]))) >> 4;
268 level = (((level << 1) + 1) * qscale *
269 ((int) (quant_matrix[j]))) >> 4;
278 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
279 int16_t *block, int n, int qscale)
281 int i, level, qmul, qadd;
284 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
289 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
290 qadd = (qscale - 1) | 1;
297 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
299 for(i=1; i<=nCoeffs; i++) {
303 level = level * qmul - qadd;
305 level = level * qmul + qadd;
312 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
313 int16_t *block, int n, int qscale)
315 int i, level, qmul, qadd;
318 av_assert2(s->block_last_index[n]>=0);
320 qadd = (qscale - 1) | 1;
323 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
325 for(i=0; i<=nCoeffs; i++) {
329 level = level * qmul - qadd;
331 level = level * qmul + qadd;
338 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
340 int mb_x, int mb_y, int mb_intra, int mb_skipped)
342 MpegEncContext *s = opaque;
345 s->mv_type = mv_type;
346 s->mb_intra = mb_intra;
347 s->mb_skipped = mb_skipped;
350 memcpy(s->mv, mv, sizeof(*mv));
352 ff_init_block_index(s);
353 ff_update_block_index(s);
355 s->dsp.clear_blocks(s->block[0]);
357 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
358 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
359 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
362 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
363 ff_MPV_decode_mb(s, s->block);
366 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
369 memset(dst + h*linesize, 128, 16);
372 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
375 memset(dst + h*linesize, 128, 8);
378 /* init common dct for both encoder and decoder */
379 av_cold int ff_dct_common_init(MpegEncContext *s)
381 ff_dsputil_init(&s->dsp, s->avctx);
382 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
383 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
384 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
386 if (s->avctx->debug & FF_DEBUG_NOMC) {
388 for (i=0; i<4; i++) {
389 s->hdsp.avg_pixels_tab[0][i] = gray16;
390 s->hdsp.put_pixels_tab[0][i] = gray16;
391 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
393 s->hdsp.avg_pixels_tab[1][i] = gray8;
394 s->hdsp.put_pixels_tab[1][i] = gray8;
395 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
399 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
400 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
401 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
402 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
403 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
404 if (s->flags & CODEC_FLAG_BITEXACT)
405 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
406 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
409 ff_MPV_common_init_axp(s);
411 ff_MPV_common_init_arm(s);
413 ff_MPV_common_init_ppc(s);
415 ff_MPV_common_init_x86(s);
417 /* load & permutate scantables
418 * note: only wmv uses different ones
420 if (s->alternate_scan) {
421 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
422 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
424 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
425 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
427 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
428 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
433 static int frame_size_alloc(MpegEncContext *s, int linesize)
435 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
437 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
441 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
442 return AVERROR_PATCHWELCOME;
445 // edge emu needs blocksize + filter length - 1
446 // (= 17x17 for halfpel / 21x21 for h264)
447 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
448 // at uvlinesize. It supports only YUV420 so 24x24 is enough
449 // linesize * interlaced * MBsize
450 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
453 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
455 s->me.temp = s->me.scratchpad;
456 s->rd_scratchpad = s->me.scratchpad;
457 s->b_scratchpad = s->me.scratchpad;
458 s->obmc_scratchpad = s->me.scratchpad + 16;
462 av_freep(&s->edge_emu_buffer);
463 return AVERROR(ENOMEM);
467 * Allocate a frame buffer
469 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
471 int edges_needed = av_codec_is_encoder(s->avctx->codec);
475 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
476 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
477 s->codec_id != AV_CODEC_ID_MSS2) {
479 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
480 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
483 r = ff_thread_get_buffer(s->avctx, &pic->tf,
484 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
486 pic->f->width = s->avctx->width;
487 pic->f->height = s->avctx->height;
488 pic->f->format = s->avctx->pix_fmt;
489 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
492 if (r < 0 || !pic->f->buf[0]) {
493 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
500 for (i = 0; pic->f->data[i]; i++) {
501 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
502 pic->f->linesize[i] +
503 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
504 pic->f->data[i] += offset;
506 pic->f->width = s->avctx->width;
507 pic->f->height = s->avctx->height;
510 if (s->avctx->hwaccel) {
511 assert(!pic->hwaccel_picture_private);
512 if (s->avctx->hwaccel->frame_priv_data_size) {
513 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
514 if (!pic->hwaccel_priv_buf) {
515 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
518 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
522 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
523 s->uvlinesize != pic->f->linesize[1])) {
524 av_log(s->avctx, AV_LOG_ERROR,
525 "get_buffer() failed (stride changed)\n");
526 ff_mpeg_unref_picture(s, pic);
530 if (pic->f->linesize[1] != pic->f->linesize[2]) {
531 av_log(s->avctx, AV_LOG_ERROR,
532 "get_buffer() failed (uv stride mismatch)\n");
533 ff_mpeg_unref_picture(s, pic);
537 if (!s->edge_emu_buffer &&
538 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
539 av_log(s->avctx, AV_LOG_ERROR,
540 "get_buffer() failed to allocate context scratch buffers.\n");
541 ff_mpeg_unref_picture(s, pic);
548 void ff_free_picture_tables(Picture *pic)
552 pic->alloc_mb_width =
553 pic->alloc_mb_height = 0;
555 av_buffer_unref(&pic->mb_var_buf);
556 av_buffer_unref(&pic->mc_mb_var_buf);
557 av_buffer_unref(&pic->mb_mean_buf);
558 av_buffer_unref(&pic->mbskip_table_buf);
559 av_buffer_unref(&pic->qscale_table_buf);
560 av_buffer_unref(&pic->mb_type_buf);
562 for (i = 0; i < 2; i++) {
563 av_buffer_unref(&pic->motion_val_buf[i]);
564 av_buffer_unref(&pic->ref_index_buf[i]);
568 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
570 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
571 const int mb_array_size = s->mb_stride * s->mb_height;
572 const int b8_array_size = s->b8_stride * s->mb_height * 2;
576 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
577 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
578 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
580 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
581 return AVERROR(ENOMEM);
584 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
585 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
586 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
587 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
588 return AVERROR(ENOMEM);
591 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
592 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
593 int ref_index_size = 4 * mb_array_size;
595 for (i = 0; mv_size && i < 2; i++) {
596 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
597 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
598 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
599 return AVERROR(ENOMEM);
603 pic->alloc_mb_width = s->mb_width;
604 pic->alloc_mb_height = s->mb_height;
609 static int make_tables_writable(Picture *pic)
612 #define MAKE_WRITABLE(table) \
615 (ret = av_buffer_make_writable(&pic->table)) < 0)\
619 MAKE_WRITABLE(mb_var_buf);
620 MAKE_WRITABLE(mc_mb_var_buf);
621 MAKE_WRITABLE(mb_mean_buf);
622 MAKE_WRITABLE(mbskip_table_buf);
623 MAKE_WRITABLE(qscale_table_buf);
624 MAKE_WRITABLE(mb_type_buf);
626 for (i = 0; i < 2; i++) {
627 MAKE_WRITABLE(motion_val_buf[i]);
628 MAKE_WRITABLE(ref_index_buf[i]);
635 * Allocate a Picture.
636 * The pixels are allocated/set by calling get_buffer() if shared = 0
638 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
642 if (pic->qscale_table_buf)
643 if ( pic->alloc_mb_width != s->mb_width
644 || pic->alloc_mb_height != s->mb_height)
645 ff_free_picture_tables(pic);
648 av_assert0(pic->f->data[0]);
651 av_assert0(!pic->f->buf[0]);
653 if (alloc_frame_buffer(s, pic) < 0)
656 s->linesize = pic->f->linesize[0];
657 s->uvlinesize = pic->f->linesize[1];
660 if (!pic->qscale_table_buf)
661 ret = alloc_picture_tables(s, pic);
663 ret = make_tables_writable(pic);
668 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
669 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
670 pic->mb_mean = pic->mb_mean_buf->data;
673 pic->mbskip_table = pic->mbskip_table_buf->data;
674 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
675 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
677 if (pic->motion_val_buf[0]) {
678 for (i = 0; i < 2; i++) {
679 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
680 pic->ref_index[i] = pic->ref_index_buf[i]->data;
686 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
687 ff_mpeg_unref_picture(s, pic);
688 ff_free_picture_tables(pic);
689 return AVERROR(ENOMEM);
693 * Deallocate a picture.
695 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
697 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
700 /* WM Image / Screen codecs allocate internal buffers with different
701 * dimensions / colorspaces; ignore user-defined callbacks for these. */
702 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
703 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
704 s->codec_id != AV_CODEC_ID_MSS2)
705 ff_thread_release_buffer(s->avctx, &pic->tf);
707 av_frame_unref(pic->f);
709 av_buffer_unref(&pic->hwaccel_priv_buf);
711 if (pic->needs_realloc)
712 ff_free_picture_tables(pic);
714 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
717 static int update_picture_tables(Picture *dst, Picture *src)
721 #define UPDATE_TABLE(table)\
724 (!dst->table || dst->table->buffer != src->table->buffer)) {\
725 av_buffer_unref(&dst->table);\
726 dst->table = av_buffer_ref(src->table);\
728 ff_free_picture_tables(dst);\
729 return AVERROR(ENOMEM);\
734 UPDATE_TABLE(mb_var_buf);
735 UPDATE_TABLE(mc_mb_var_buf);
736 UPDATE_TABLE(mb_mean_buf);
737 UPDATE_TABLE(mbskip_table_buf);
738 UPDATE_TABLE(qscale_table_buf);
739 UPDATE_TABLE(mb_type_buf);
740 for (i = 0; i < 2; i++) {
741 UPDATE_TABLE(motion_val_buf[i]);
742 UPDATE_TABLE(ref_index_buf[i]);
745 dst->mb_var = src->mb_var;
746 dst->mc_mb_var = src->mc_mb_var;
747 dst->mb_mean = src->mb_mean;
748 dst->mbskip_table = src->mbskip_table;
749 dst->qscale_table = src->qscale_table;
750 dst->mb_type = src->mb_type;
751 for (i = 0; i < 2; i++) {
752 dst->motion_val[i] = src->motion_val[i];
753 dst->ref_index[i] = src->ref_index[i];
756 dst->alloc_mb_width = src->alloc_mb_width;
757 dst->alloc_mb_height = src->alloc_mb_height;
762 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
766 av_assert0(!dst->f->buf[0]);
767 av_assert0(src->f->buf[0]);
771 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
775 ret = update_picture_tables(dst, src);
779 if (src->hwaccel_picture_private) {
780 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
781 if (!dst->hwaccel_priv_buf)
783 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
786 dst->field_picture = src->field_picture;
787 dst->mb_var_sum = src->mb_var_sum;
788 dst->mc_mb_var_sum = src->mc_mb_var_sum;
789 dst->b_frame_score = src->b_frame_score;
790 dst->needs_realloc = src->needs_realloc;
791 dst->reference = src->reference;
792 dst->shared = src->shared;
796 ff_mpeg_unref_picture(s, dst);
800 static void exchange_uv(MpegEncContext *s)
805 s->pblocks[4] = s->pblocks[5];
809 static int init_duplicate_context(MpegEncContext *s)
811 int y_size = s->b8_stride * (2 * s->mb_height + 1);
812 int c_size = s->mb_stride * (s->mb_height + 1);
813 int yc_size = y_size + 2 * c_size;
816 if (s->mb_height & 1)
817 yc_size += 2*s->b8_stride + 2*s->mb_stride;
824 s->obmc_scratchpad = NULL;
827 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
828 ME_MAP_SIZE * sizeof(uint32_t), fail)
829 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
830 ME_MAP_SIZE * sizeof(uint32_t), fail)
831 if (s->avctx->noise_reduction) {
832 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
833 2 * 64 * sizeof(int), fail)
836 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
837 s->block = s->blocks[0];
839 for (i = 0; i < 12; i++) {
840 s->pblocks[i] = &s->block[i];
842 if (s->avctx->codec_tag == AV_RL32("VCR2"))
845 if (s->out_format == FMT_H263) {
847 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
848 yc_size * sizeof(int16_t) * 16, fail);
849 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
850 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
851 s->ac_val[2] = s->ac_val[1] + c_size;
856 return -1; // free() through ff_MPV_common_end()
859 static void free_duplicate_context(MpegEncContext *s)
864 av_freep(&s->edge_emu_buffer);
865 av_freep(&s->me.scratchpad);
869 s->obmc_scratchpad = NULL;
871 av_freep(&s->dct_error_sum);
872 av_freep(&s->me.map);
873 av_freep(&s->me.score_map);
874 av_freep(&s->blocks);
875 av_freep(&s->ac_val_base);
879 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
881 #define COPY(a) bak->a = src->a
882 COPY(edge_emu_buffer);
887 COPY(obmc_scratchpad);
894 COPY(me.map_generation);
906 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
910 // FIXME copy only needed parts
912 backup_duplicate_context(&bak, dst);
913 memcpy(dst, src, sizeof(MpegEncContext));
914 backup_duplicate_context(dst, &bak);
915 for (i = 0; i < 12; i++) {
916 dst->pblocks[i] = &dst->block[i];
918 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
920 if (!dst->edge_emu_buffer &&
921 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
922 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
923 "scratch buffers.\n");
926 // STOP_TIMER("update_duplicate_context")
927 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
931 int ff_mpeg_update_thread_context(AVCodecContext *dst,
932 const AVCodecContext *src)
935 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
942 // FIXME can parameters change on I-frames?
943 // in that case dst may need a reinit
944 if (!s->context_initialized) {
945 memcpy(s, s1, sizeof(MpegEncContext));
948 s->bitstream_buffer = NULL;
949 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
951 if (s1->context_initialized){
952 // s->picture_range_start += MAX_PICTURE_COUNT;
953 // s->picture_range_end += MAX_PICTURE_COUNT;
954 if((ret = ff_MPV_common_init(s)) < 0){
955 memset(s, 0, sizeof(MpegEncContext));
962 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
963 s->context_reinit = 0;
964 s->height = s1->height;
965 s->width = s1->width;
966 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
970 s->avctx->coded_height = s1->avctx->coded_height;
971 s->avctx->coded_width = s1->avctx->coded_width;
972 s->avctx->width = s1->avctx->width;
973 s->avctx->height = s1->avctx->height;
975 s->coded_picture_number = s1->coded_picture_number;
976 s->picture_number = s1->picture_number;
978 av_assert0(!s->picture || s->picture != s1->picture);
980 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
981 ff_mpeg_unref_picture(s, &s->picture[i]);
982 if (s1->picture[i].f->buf[0] &&
983 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
987 #define UPDATE_PICTURE(pic)\
989 ff_mpeg_unref_picture(s, &s->pic);\
990 if (s1->pic.f && s1->pic.f->buf[0])\
991 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
993 ret = update_picture_tables(&s->pic, &s1->pic);\
998 UPDATE_PICTURE(current_picture);
999 UPDATE_PICTURE(last_picture);
1000 UPDATE_PICTURE(next_picture);
1002 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1003 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1004 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1006 // Error/bug resilience
1007 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1008 s->workaround_bugs = s1->workaround_bugs;
1009 s->padding_bug_score = s1->padding_bug_score;
1011 // MPEG4 timing info
1012 memcpy(&s->last_time_base, &s1->last_time_base,
1013 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1014 (char *) &s1->last_time_base);
1017 s->max_b_frames = s1->max_b_frames;
1018 s->low_delay = s1->low_delay;
1019 s->droppable = s1->droppable;
1021 // DivX handling (doesn't work)
1022 s->divx_packed = s1->divx_packed;
1024 if (s1->bitstream_buffer) {
1025 if (s1->bitstream_buffer_size +
1026 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1027 av_fast_malloc(&s->bitstream_buffer,
1028 &s->allocated_bitstream_buffer_size,
1029 s1->allocated_bitstream_buffer_size);
1030 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1031 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1032 s1->bitstream_buffer_size);
1033 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1034 FF_INPUT_BUFFER_PADDING_SIZE);
1037 // linesize dependend scratch buffer allocation
1038 if (!s->edge_emu_buffer)
1040 if (frame_size_alloc(s, s1->linesize) < 0) {
1041 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1042 "scratch buffers.\n");
1043 return AVERROR(ENOMEM);
1046 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1047 "be allocated due to unknown size.\n");
1050 // MPEG2/interlacing info
1051 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1052 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1054 if (!s1->first_field) {
1055 s->last_pict_type = s1->pict_type;
1056 if (s1->current_picture_ptr)
1057 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1064 * Set the given MpegEncContext to common defaults
1065 * (same for encoding and decoding).
1066 * The changed fields will not depend upon the
1067 * prior state of the MpegEncContext.
1069 void ff_MPV_common_defaults(MpegEncContext *s)
1071 s->y_dc_scale_table =
1072 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1073 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1074 s->progressive_frame = 1;
1075 s->progressive_sequence = 1;
1076 s->picture_structure = PICT_FRAME;
1078 s->coded_picture_number = 0;
1079 s->picture_number = 0;
1084 s->slice_context_count = 1;
1088 * Set the given MpegEncContext to defaults for decoding.
1089 * the changed fields will not depend upon
1090 * the prior state of the MpegEncContext.
1092 void ff_MPV_decode_defaults(MpegEncContext *s)
1094 ff_MPV_common_defaults(s);
1097 static int init_er(MpegEncContext *s)
1099 ERContext *er = &s->er;
1100 int mb_array_size = s->mb_height * s->mb_stride;
1103 er->avctx = s->avctx;
1106 er->mb_index2xy = s->mb_index2xy;
1107 er->mb_num = s->mb_num;
1108 er->mb_width = s->mb_width;
1109 er->mb_height = s->mb_height;
1110 er->mb_stride = s->mb_stride;
1111 er->b8_stride = s->b8_stride;
1113 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1114 er->error_status_table = av_mallocz(mb_array_size);
1115 if (!er->er_temp_buffer || !er->error_status_table)
1118 er->mbskip_table = s->mbskip_table;
1119 er->mbintra_table = s->mbintra_table;
1121 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1122 er->dc_val[i] = s->dc_val[i];
1124 er->decode_mb = mpeg_er_decode_mb;
1129 av_freep(&er->er_temp_buffer);
1130 av_freep(&er->error_status_table);
1131 return AVERROR(ENOMEM);
1135 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1137 static int init_context_frame(MpegEncContext *s)
1139 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1141 s->mb_width = (s->width + 15) / 16;
1142 s->mb_stride = s->mb_width + 1;
1143 s->b8_stride = s->mb_width * 2 + 1;
1144 mb_array_size = s->mb_height * s->mb_stride;
1145 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1147 /* set default edge pos, will be overridden
1148 * in decode_header if needed */
1149 s->h_edge_pos = s->mb_width * 16;
1150 s->v_edge_pos = s->mb_height * 16;
1152 s->mb_num = s->mb_width * s->mb_height;
1157 s->block_wrap[3] = s->b8_stride;
1159 s->block_wrap[5] = s->mb_stride;
1161 y_size = s->b8_stride * (2 * s->mb_height + 1);
1162 c_size = s->mb_stride * (s->mb_height + 1);
1163 yc_size = y_size + 2 * c_size;
1165 if (s->mb_height & 1)
1166 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1168 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1169 for (y = 0; y < s->mb_height; y++)
1170 for (x = 0; x < s->mb_width; x++)
1171 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1173 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1176 /* Allocate MV tables */
1177 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1178 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1179 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1180 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1181 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1182 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1183 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1184 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1185 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1186 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1187 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1188 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1190 /* Allocate MB type table */
1191 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1193 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1195 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1196 mb_array_size * sizeof(float), fail);
1197 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1198 mb_array_size * sizeof(float), fail);
1202 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1203 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1204 /* interlaced direct mode decoding tables */
1205 for (i = 0; i < 2; i++) {
1207 for (j = 0; j < 2; j++) {
1208 for (k = 0; k < 2; k++) {
1209 FF_ALLOCZ_OR_GOTO(s->avctx,
1210 s->b_field_mv_table_base[i][j][k],
1211 mv_table_size * 2 * sizeof(int16_t),
1213 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1216 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1217 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1218 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1220 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1223 if (s->out_format == FMT_H263) {
1225 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1226 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1228 /* cbp, ac_pred, pred_dir */
1229 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1230 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1233 if (s->h263_pred || s->h263_plus || !s->encoding) {
1235 // MN: we need these for error resilience of intra-frames
1236 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1237 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1238 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1239 s->dc_val[2] = s->dc_val[1] + c_size;
1240 for (i = 0; i < yc_size; i++)
1241 s->dc_val_base[i] = 1024;
1244 /* which mb is a intra block */
1245 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1246 memset(s->mbintra_table, 1, mb_array_size);
1248 /* init macroblock skip table */
1249 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1250 // Note the + 1 is for a quicker mpeg4 slice_end detection
1254 return AVERROR(ENOMEM);
1258 * init common structure for both encoder and decoder.
1259 * this assumes that some variables like width/height are already set
1261 av_cold int ff_MPV_common_init(MpegEncContext *s)
1264 int nb_slices = (HAVE_THREADS &&
1265 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1266 s->avctx->thread_count : 1;
1268 if (s->encoding && s->avctx->slices)
1269 nb_slices = s->avctx->slices;
1271 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1272 s->mb_height = (s->height + 31) / 32 * 2;
1274 s->mb_height = (s->height + 15) / 16;
1276 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1277 av_log(s->avctx, AV_LOG_ERROR,
1278 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1282 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1285 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1287 max_slices = MAX_THREADS;
1288 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1289 " reducing to %d\n", nb_slices, max_slices);
1290 nb_slices = max_slices;
1293 if ((s->width || s->height) &&
1294 av_image_check_size(s->width, s->height, 0, s->avctx))
1297 ff_dct_common_init(s);
1299 s->flags = s->avctx->flags;
1300 s->flags2 = s->avctx->flags2;
1302 /* set chroma shifts */
1303 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1305 &s->chroma_y_shift);
1307 /* convert fourcc to upper case */
1308 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1310 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1312 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1313 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1314 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1315 s->picture[i].f = av_frame_alloc();
1316 if (!s->picture[i].f)
1319 memset(&s->next_picture, 0, sizeof(s->next_picture));
1320 memset(&s->last_picture, 0, sizeof(s->last_picture));
1321 memset(&s->current_picture, 0, sizeof(s->current_picture));
1322 memset(&s->new_picture, 0, sizeof(s->new_picture));
1323 s->next_picture.f = av_frame_alloc();
1324 if (!s->next_picture.f)
1326 s->last_picture.f = av_frame_alloc();
1327 if (!s->last_picture.f)
1329 s->current_picture.f = av_frame_alloc();
1330 if (!s->current_picture.f)
1332 s->new_picture.f = av_frame_alloc();
1333 if (!s->new_picture.f)
1336 if (init_context_frame(s))
1339 s->parse_context.state = -1;
1341 s->context_initialized = 1;
1342 s->thread_context[0] = s;
1344 // if (s->width && s->height) {
1345 if (nb_slices > 1) {
1346 for (i = 1; i < nb_slices; i++) {
1347 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1348 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1351 for (i = 0; i < nb_slices; i++) {
1352 if (init_duplicate_context(s->thread_context[i]) < 0)
1354 s->thread_context[i]->start_mb_y =
1355 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1356 s->thread_context[i]->end_mb_y =
1357 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1360 if (init_duplicate_context(s) < 0)
1363 s->end_mb_y = s->mb_height;
1365 s->slice_context_count = nb_slices;
1370 ff_MPV_common_end(s);
1375 * Frees and resets MpegEncContext fields depending on the resolution.
1376 * Is used during resolution changes to avoid a full reinitialization of the
1379 static int free_context_frame(MpegEncContext *s)
1383 av_freep(&s->mb_type);
1384 av_freep(&s->p_mv_table_base);
1385 av_freep(&s->b_forw_mv_table_base);
1386 av_freep(&s->b_back_mv_table_base);
1387 av_freep(&s->b_bidir_forw_mv_table_base);
1388 av_freep(&s->b_bidir_back_mv_table_base);
1389 av_freep(&s->b_direct_mv_table_base);
1390 s->p_mv_table = NULL;
1391 s->b_forw_mv_table = NULL;
1392 s->b_back_mv_table = NULL;
1393 s->b_bidir_forw_mv_table = NULL;
1394 s->b_bidir_back_mv_table = NULL;
1395 s->b_direct_mv_table = NULL;
1396 for (i = 0; i < 2; i++) {
1397 for (j = 0; j < 2; j++) {
1398 for (k = 0; k < 2; k++) {
1399 av_freep(&s->b_field_mv_table_base[i][j][k]);
1400 s->b_field_mv_table[i][j][k] = NULL;
1402 av_freep(&s->b_field_select_table[i][j]);
1403 av_freep(&s->p_field_mv_table_base[i][j]);
1404 s->p_field_mv_table[i][j] = NULL;
1406 av_freep(&s->p_field_select_table[i]);
1409 av_freep(&s->dc_val_base);
1410 av_freep(&s->coded_block_base);
1411 av_freep(&s->mbintra_table);
1412 av_freep(&s->cbp_table);
1413 av_freep(&s->pred_dir_table);
1415 av_freep(&s->mbskip_table);
1417 av_freep(&s->er.error_status_table);
1418 av_freep(&s->er.er_temp_buffer);
1419 av_freep(&s->mb_index2xy);
1420 av_freep(&s->lambda_table);
1422 av_freep(&s->cplx_tab);
1423 av_freep(&s->bits_tab);
1425 s->linesize = s->uvlinesize = 0;
1430 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1434 if (s->slice_context_count > 1) {
1435 for (i = 0; i < s->slice_context_count; i++) {
1436 free_duplicate_context(s->thread_context[i]);
1438 for (i = 1; i < s->slice_context_count; i++) {
1439 av_freep(&s->thread_context[i]);
1442 free_duplicate_context(s);
1444 if ((err = free_context_frame(s)) < 0)
1448 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1449 s->picture[i].needs_realloc = 1;
1452 s->last_picture_ptr =
1453 s->next_picture_ptr =
1454 s->current_picture_ptr = NULL;
1457 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1458 s->mb_height = (s->height + 31) / 32 * 2;
1460 s->mb_height = (s->height + 15) / 16;
1462 if ((s->width || s->height) &&
1463 av_image_check_size(s->width, s->height, 0, s->avctx))
1464 return AVERROR_INVALIDDATA;
1466 if ((err = init_context_frame(s)))
1469 s->thread_context[0] = s;
1471 if (s->width && s->height) {
1472 int nb_slices = s->slice_context_count;
1473 if (nb_slices > 1) {
1474 for (i = 1; i < nb_slices; i++) {
1475 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1476 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1479 for (i = 0; i < nb_slices; i++) {
1480 if (init_duplicate_context(s->thread_context[i]) < 0)
1482 s->thread_context[i]->start_mb_y =
1483 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1484 s->thread_context[i]->end_mb_y =
1485 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1488 err = init_duplicate_context(s);
1492 s->end_mb_y = s->mb_height;
1494 s->slice_context_count = nb_slices;
1499 ff_MPV_common_end(s);
1503 /* init common structure for both encoder and decoder */
1504 void ff_MPV_common_end(MpegEncContext *s)
1508 if (s->slice_context_count > 1) {
1509 for (i = 0; i < s->slice_context_count; i++) {
1510 free_duplicate_context(s->thread_context[i]);
1512 for (i = 1; i < s->slice_context_count; i++) {
1513 av_freep(&s->thread_context[i]);
1515 s->slice_context_count = 1;
1516 } else free_duplicate_context(s);
1518 av_freep(&s->parse_context.buffer);
1519 s->parse_context.buffer_size = 0;
1521 av_freep(&s->bitstream_buffer);
1522 s->allocated_bitstream_buffer_size = 0;
1525 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1526 ff_free_picture_tables(&s->picture[i]);
1527 ff_mpeg_unref_picture(s, &s->picture[i]);
1528 av_frame_free(&s->picture[i].f);
1531 av_freep(&s->picture);
1532 ff_free_picture_tables(&s->last_picture);
1533 ff_mpeg_unref_picture(s, &s->last_picture);
1534 av_frame_free(&s->last_picture.f);
1535 ff_free_picture_tables(&s->current_picture);
1536 ff_mpeg_unref_picture(s, &s->current_picture);
1537 av_frame_free(&s->current_picture.f);
1538 ff_free_picture_tables(&s->next_picture);
1539 ff_mpeg_unref_picture(s, &s->next_picture);
1540 av_frame_free(&s->next_picture.f);
1541 ff_free_picture_tables(&s->new_picture);
1542 ff_mpeg_unref_picture(s, &s->new_picture);
1543 av_frame_free(&s->new_picture.f);
1545 free_context_frame(s);
1547 s->context_initialized = 0;
1548 s->last_picture_ptr =
1549 s->next_picture_ptr =
1550 s->current_picture_ptr = NULL;
1551 s->linesize = s->uvlinesize = 0;
1554 av_cold void ff_init_rl(RLTable *rl,
1555 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1557 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1558 uint8_t index_run[MAX_RUN + 1];
1559 int last, run, level, start, end, i;
1561 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1562 if (static_store && rl->max_level[0])
1565 /* compute max_level[], max_run[] and index_run[] */
1566 for (last = 0; last < 2; last++) {
1575 memset(max_level, 0, MAX_RUN + 1);
1576 memset(max_run, 0, MAX_LEVEL + 1);
1577 memset(index_run, rl->n, MAX_RUN + 1);
1578 for (i = start; i < end; i++) {
1579 run = rl->table_run[i];
1580 level = rl->table_level[i];
1581 if (index_run[run] == rl->n)
1583 if (level > max_level[run])
1584 max_level[run] = level;
1585 if (run > max_run[level])
1586 max_run[level] = run;
1589 rl->max_level[last] = static_store[last];
1591 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1592 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1594 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1596 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1597 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1599 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1601 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1602 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1606 av_cold void ff_init_vlc_rl(RLTable *rl)
1610 for (q = 0; q < 32; q++) {
1612 int qadd = (q - 1) | 1;
1618 for (i = 0; i < rl->vlc.table_size; i++) {
1619 int code = rl->vlc.table[i][0];
1620 int len = rl->vlc.table[i][1];
1623 if (len == 0) { // illegal code
1626 } else if (len < 0) { // more bits needed
1630 if (code == rl->n) { // esc
1634 run = rl->table_run[code] + 1;
1635 level = rl->table_level[code] * qmul + qadd;
1636 if (code >= rl->last) run += 192;
1639 rl->rl_vlc[q][i].len = len;
1640 rl->rl_vlc[q][i].level = level;
1641 rl->rl_vlc[q][i].run = run;
1646 static void release_unused_pictures(MpegEncContext *s)
1650 /* release non reference frames */
1651 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1652 if (!s->picture[i].reference)
1653 ff_mpeg_unref_picture(s, &s->picture[i]);
1657 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1659 if (pic == s->last_picture_ptr)
1661 if (pic->f->buf[0] == NULL)
1663 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1668 static int find_unused_picture(MpegEncContext *s, int shared)
1673 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1674 if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1678 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1679 if (pic_is_unused(s, &s->picture[i]))
1684 av_log(s->avctx, AV_LOG_FATAL,
1685 "Internal error, picture buffer overflow\n");
1686 /* We could return -1, but the codec would crash trying to draw into a
1687 * non-existing frame anyway. This is safer than waiting for a random crash.
1688 * Also the return of this is never useful, an encoder must only allocate
1689 * as much as allowed in the specification. This has no relationship to how
1690 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1691 * enough for such valid streams).
1692 * Plus, a decoder has to check stream validity and remove frames if too
1693 * many reference frames are around. Waiting for "OOM" is not correct at
1694 * all. Similarly, missing reference frames have to be replaced by
1695 * interpolated/MC frames, anything else is a bug in the codec ...
1701 int ff_find_unused_picture(MpegEncContext *s, int shared)
1703 int ret = find_unused_picture(s, shared);
1705 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1706 if (s->picture[ret].needs_realloc) {
1707 s->picture[ret].needs_realloc = 0;
1708 ff_free_picture_tables(&s->picture[ret]);
1709 ff_mpeg_unref_picture(s, &s->picture[ret]);
1715 static void gray_frame(AVFrame *frame)
1717 int i, h_chroma_shift, v_chroma_shift;
1719 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1721 for(i=0; i<frame->height; i++)
1722 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1723 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1724 memset(frame->data[1] + frame->linesize[1]*i,
1725 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1726 memset(frame->data[2] + frame->linesize[2]*i,
1727 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1732 * generic function called after decoding
1733 * the header and before a frame is decoded.
1735 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1741 if (!ff_thread_can_start_frame(avctx)) {
1742 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1746 /* mark & release old frames */
1747 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1748 s->last_picture_ptr != s->next_picture_ptr &&
1749 s->last_picture_ptr->f->buf[0]) {
1750 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1753 /* release forgotten pictures */
1754 /* if (mpeg124/h263) */
1755 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1756 if (&s->picture[i] != s->last_picture_ptr &&
1757 &s->picture[i] != s->next_picture_ptr &&
1758 s->picture[i].reference && !s->picture[i].needs_realloc) {
1759 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1760 av_log(avctx, AV_LOG_ERROR,
1761 "releasing zombie picture\n");
1762 ff_mpeg_unref_picture(s, &s->picture[i]);
1766 ff_mpeg_unref_picture(s, &s->current_picture);
1768 release_unused_pictures(s);
1770 if (s->current_picture_ptr &&
1771 s->current_picture_ptr->f->buf[0] == NULL) {
1772 // we already have a unused image
1773 // (maybe it was set before reading the header)
1774 pic = s->current_picture_ptr;
1776 i = ff_find_unused_picture(s, 0);
1778 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1781 pic = &s->picture[i];
1785 if (!s->droppable) {
1786 if (s->pict_type != AV_PICTURE_TYPE_B)
1790 pic->f->coded_picture_number = s->coded_picture_number++;
1792 if (ff_alloc_picture(s, pic, 0) < 0)
1795 s->current_picture_ptr = pic;
1796 // FIXME use only the vars from current_pic
1797 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1798 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1799 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1800 if (s->picture_structure != PICT_FRAME)
1801 s->current_picture_ptr->f->top_field_first =
1802 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1804 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1805 !s->progressive_sequence;
1806 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1808 s->current_picture_ptr->f->pict_type = s->pict_type;
1809 // if (s->flags && CODEC_FLAG_QSCALE)
1810 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1811 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1813 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1814 s->current_picture_ptr)) < 0)
1817 if (s->pict_type != AV_PICTURE_TYPE_B) {
1818 s->last_picture_ptr = s->next_picture_ptr;
1820 s->next_picture_ptr = s->current_picture_ptr;
1822 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1823 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1824 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1825 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1826 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1827 s->pict_type, s->droppable);
1829 if ((s->last_picture_ptr == NULL ||
1830 s->last_picture_ptr->f->buf[0] == NULL) &&
1831 (s->pict_type != AV_PICTURE_TYPE_I ||
1832 s->picture_structure != PICT_FRAME)) {
1833 int h_chroma_shift, v_chroma_shift;
1834 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1835 &h_chroma_shift, &v_chroma_shift);
1836 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1837 av_log(avctx, AV_LOG_DEBUG,
1838 "allocating dummy last picture for B frame\n");
1839 else if (s->pict_type != AV_PICTURE_TYPE_I)
1840 av_log(avctx, AV_LOG_ERROR,
1841 "warning: first frame is no keyframe\n");
1842 else if (s->picture_structure != PICT_FRAME)
1843 av_log(avctx, AV_LOG_DEBUG,
1844 "allocate dummy last picture for field based first keyframe\n");
1846 /* Allocate a dummy frame */
1847 i = ff_find_unused_picture(s, 0);
1849 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1852 s->last_picture_ptr = &s->picture[i];
1854 s->last_picture_ptr->reference = 3;
1855 s->last_picture_ptr->f->key_frame = 0;
1856 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1858 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1859 s->last_picture_ptr = NULL;
1863 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1864 for(i=0; i<avctx->height; i++)
1865 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1866 0x80, avctx->width);
1867 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1868 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1869 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1870 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1871 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1874 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1875 for(i=0; i<avctx->height; i++)
1876 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1880 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1881 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1883 if ((s->next_picture_ptr == NULL ||
1884 s->next_picture_ptr->f->buf[0] == NULL) &&
1885 s->pict_type == AV_PICTURE_TYPE_B) {
1886 /* Allocate a dummy frame */
1887 i = ff_find_unused_picture(s, 0);
1889 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1892 s->next_picture_ptr = &s->picture[i];
1894 s->next_picture_ptr->reference = 3;
1895 s->next_picture_ptr->f->key_frame = 0;
1896 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1898 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1899 s->next_picture_ptr = NULL;
1902 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1903 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1906 #if 0 // BUFREF-FIXME
1907 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1908 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1910 if (s->last_picture_ptr) {
1911 ff_mpeg_unref_picture(s, &s->last_picture);
1912 if (s->last_picture_ptr->f->buf[0] &&
1913 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1914 s->last_picture_ptr)) < 0)
1917 if (s->next_picture_ptr) {
1918 ff_mpeg_unref_picture(s, &s->next_picture);
1919 if (s->next_picture_ptr->f->buf[0] &&
1920 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1921 s->next_picture_ptr)) < 0)
1925 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1926 s->last_picture_ptr->f->buf[0]));
1928 if (s->picture_structure!= PICT_FRAME) {
1930 for (i = 0; i < 4; i++) {
1931 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1932 s->current_picture.f->data[i] +=
1933 s->current_picture.f->linesize[i];
1935 s->current_picture.f->linesize[i] *= 2;
1936 s->last_picture.f->linesize[i] *= 2;
1937 s->next_picture.f->linesize[i] *= 2;
1941 s->err_recognition = avctx->err_recognition;
1943 /* set dequantizer, we can't do it during init as
1944 * it might change for mpeg4 and we can't do it in the header
1945 * decode as init is not called for mpeg4 there yet */
1946 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1947 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1948 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1949 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1950 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1951 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1953 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1954 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1957 if (s->avctx->debug & FF_DEBUG_NOMC) {
1958 gray_frame(s->current_picture_ptr->f);
1964 /* called after a frame has been decoded. */
1965 void ff_MPV_frame_end(MpegEncContext *s)
1969 if (s->current_picture.reference)
1970 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1974 * Draw a line from (ex, ey) -> (sx, sy).
1975 * @param w width of the image
1976 * @param h height of the image
1977 * @param stride stride/linesize of the image
1978 * @param color color of the arrow
1980 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1981 int w, int h, int stride, int color)
1985 sx = av_clip(sx, 0, w - 1);
1986 sy = av_clip(sy, 0, h - 1);
1987 ex = av_clip(ex, 0, w - 1);
1988 ey = av_clip(ey, 0, h - 1);
1990 buf[sy * stride + sx] += color;
1992 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1994 FFSWAP(int, sx, ex);
1995 FFSWAP(int, sy, ey);
1997 buf += sx + sy * stride;
1999 f = ((ey - sy) << 16) / ex;
2000 for (x = 0; x <= ex; x++) {
2002 fr = (x * f) & 0xFFFF;
2003 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2004 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2008 FFSWAP(int, sx, ex);
2009 FFSWAP(int, sy, ey);
2011 buf += sx + sy * stride;
2014 f = ((ex - sx) << 16) / ey;
2017 for(y= 0; y <= ey; y++){
2019 fr = (y*f) & 0xFFFF;
2020 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2021 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2027 * Draw an arrow from (ex, ey) -> (sx, sy).
2028 * @param w width of the image
2029 * @param h height of the image
2030 * @param stride stride/linesize of the image
2031 * @param color color of the arrow
2033 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2034 int ey, int w, int h, int stride, int color)
2038 sx = av_clip(sx, -100, w + 100);
2039 sy = av_clip(sy, -100, h + 100);
2040 ex = av_clip(ex, -100, w + 100);
2041 ey = av_clip(ey, -100, h + 100);
2046 if (dx * dx + dy * dy > 3 * 3) {
2049 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2051 // FIXME subpixel accuracy
2052 rx = ROUNDED_DIV(rx * 3 << 4, length);
2053 ry = ROUNDED_DIV(ry * 3 << 4, length);
2055 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2056 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2058 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2062 * Print debugging info for the given picture.
2064 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2065 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2067 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2069 if (avctx->hwaccel || !mbtype_table
2070 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2074 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2077 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2078 av_get_picture_type_char(pict->pict_type));
2079 for (y = 0; y < mb_height; y++) {
2080 for (x = 0; x < mb_width; x++) {
2081 if (avctx->debug & FF_DEBUG_SKIP) {
2082 int count = mbskip_table[x + y * mb_stride];
2085 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2087 if (avctx->debug & FF_DEBUG_QP) {
2088 av_log(avctx, AV_LOG_DEBUG, "%2d",
2089 qscale_table[x + y * mb_stride]);
2091 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2092 int mb_type = mbtype_table[x + y * mb_stride];
2093 // Type & MV direction
2094 if (IS_PCM(mb_type))
2095 av_log(avctx, AV_LOG_DEBUG, "P");
2096 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2097 av_log(avctx, AV_LOG_DEBUG, "A");
2098 else if (IS_INTRA4x4(mb_type))
2099 av_log(avctx, AV_LOG_DEBUG, "i");
2100 else if (IS_INTRA16x16(mb_type))
2101 av_log(avctx, AV_LOG_DEBUG, "I");
2102 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2103 av_log(avctx, AV_LOG_DEBUG, "d");
2104 else if (IS_DIRECT(mb_type))
2105 av_log(avctx, AV_LOG_DEBUG, "D");
2106 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2107 av_log(avctx, AV_LOG_DEBUG, "g");
2108 else if (IS_GMC(mb_type))
2109 av_log(avctx, AV_LOG_DEBUG, "G");
2110 else if (IS_SKIP(mb_type))
2111 av_log(avctx, AV_LOG_DEBUG, "S");
2112 else if (!USES_LIST(mb_type, 1))
2113 av_log(avctx, AV_LOG_DEBUG, ">");
2114 else if (!USES_LIST(mb_type, 0))
2115 av_log(avctx, AV_LOG_DEBUG, "<");
2117 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2118 av_log(avctx, AV_LOG_DEBUG, "X");
2122 if (IS_8X8(mb_type))
2123 av_log(avctx, AV_LOG_DEBUG, "+");
2124 else if (IS_16X8(mb_type))
2125 av_log(avctx, AV_LOG_DEBUG, "-");
2126 else if (IS_8X16(mb_type))
2127 av_log(avctx, AV_LOG_DEBUG, "|");
2128 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2129 av_log(avctx, AV_LOG_DEBUG, " ");
2131 av_log(avctx, AV_LOG_DEBUG, "?");
2134 if (IS_INTERLACED(mb_type))
2135 av_log(avctx, AV_LOG_DEBUG, "=");
2137 av_log(avctx, AV_LOG_DEBUG, " ");
2140 av_log(avctx, AV_LOG_DEBUG, "\n");
2144 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2145 (avctx->debug_mv)) {
2146 const int shift = 1 + quarter_sample;
2150 int h_chroma_shift, v_chroma_shift, block_height;
2151 const int width = avctx->width;
2152 const int height = avctx->height;
2153 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2154 const int mv_stride = (mb_width << mv_sample_log2) +
2155 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2157 *low_delay = 0; // needed to see the vectors without trashing the buffers
2159 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2161 av_frame_make_writable(pict);
2163 pict->opaque = NULL;
2164 ptr = pict->data[0];
2165 block_height = 16 >> v_chroma_shift;
2167 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2169 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2170 const int mb_index = mb_x + mb_y * mb_stride;
2171 if ((avctx->debug_mv) && motion_val[0]) {
2173 for (type = 0; type < 3; type++) {
2177 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2178 (pict->pict_type!= AV_PICTURE_TYPE_P))
2183 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2184 (pict->pict_type!= AV_PICTURE_TYPE_B))
2189 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2190 (pict->pict_type!= AV_PICTURE_TYPE_B))
2195 if (!USES_LIST(mbtype_table[mb_index], direction))
2198 if (IS_8X8(mbtype_table[mb_index])) {
2200 for (i = 0; i < 4; i++) {
2201 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2202 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2203 int xy = (mb_x * 2 + (i & 1) +
2204 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2205 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2206 int my = (motion_val[direction][xy][1] >> shift) + sy;
2207 draw_arrow(ptr, sx, sy, mx, my, width,
2208 height, pict->linesize[0], 100);
2210 } else if (IS_16X8(mbtype_table[mb_index])) {
2212 for (i = 0; i < 2; i++) {
2213 int sx = mb_x * 16 + 8;
2214 int sy = mb_y * 16 + 4 + 8 * i;
2215 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2216 int mx = (motion_val[direction][xy][0] >> shift);
2217 int my = (motion_val[direction][xy][1] >> shift);
2219 if (IS_INTERLACED(mbtype_table[mb_index]))
2222 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2223 height, pict->linesize[0], 100);
2225 } else if (IS_8X16(mbtype_table[mb_index])) {
2227 for (i = 0; i < 2; i++) {
2228 int sx = mb_x * 16 + 4 + 8 * i;
2229 int sy = mb_y * 16 + 8;
2230 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2231 int mx = motion_val[direction][xy][0] >> shift;
2232 int my = motion_val[direction][xy][1] >> shift;
2234 if (IS_INTERLACED(mbtype_table[mb_index]))
2237 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2238 height, pict->linesize[0], 100);
2241 int sx= mb_x * 16 + 8;
2242 int sy= mb_y * 16 + 8;
2243 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2244 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2245 int my= (motion_val[direction][xy][1]>>shift) + sy;
2246 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2250 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2251 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2252 0x0101010101010101ULL;
2254 for (y = 0; y < block_height; y++) {
2255 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2256 (block_height * mb_y + y) *
2257 pict->linesize[1]) = c;
2258 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2259 (block_height * mb_y + y) *
2260 pict->linesize[2]) = c;
2263 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2265 int mb_type = mbtype_table[mb_index];
2268 #define COLOR(theta, r) \
2269 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2270 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2274 if (IS_PCM(mb_type)) {
2276 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2277 IS_INTRA16x16(mb_type)) {
2279 } else if (IS_INTRA4x4(mb_type)) {
2281 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2283 } else if (IS_DIRECT(mb_type)) {
2285 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2287 } else if (IS_GMC(mb_type)) {
2289 } else if (IS_SKIP(mb_type)) {
2291 } else if (!USES_LIST(mb_type, 1)) {
2293 } else if (!USES_LIST(mb_type, 0)) {
2296 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2300 u *= 0x0101010101010101ULL;
2301 v *= 0x0101010101010101ULL;
2302 for (y = 0; y < block_height; y++) {
2303 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2304 (block_height * mb_y + y) * pict->linesize[1]) = u;
2305 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2306 (block_height * mb_y + y) * pict->linesize[2]) = v;
2310 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2311 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2312 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2313 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2314 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2316 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2317 for (y = 0; y < 16; y++)
2318 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2319 pict->linesize[0]] ^= 0x80;
2321 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2322 int dm = 1 << (mv_sample_log2 - 2);
2323 for (i = 0; i < 4; i++) {
2324 int sx = mb_x * 16 + 8 * (i & 1);
2325 int sy = mb_y * 16 + 8 * (i >> 1);
2326 int xy = (mb_x * 2 + (i & 1) +
2327 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2329 int32_t *mv = (int32_t *) &motion_val[0][xy];
2330 if (mv[0] != mv[dm] ||
2331 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2332 for (y = 0; y < 8; y++)
2333 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2334 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2335 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2336 pict->linesize[0]) ^= 0x8080808080808080ULL;
2340 if (IS_INTERLACED(mb_type) &&
2341 avctx->codec->id == AV_CODEC_ID_H264) {
2345 mbskip_table[mb_index] = 0;
2351 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2353 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2354 p->qscale_table, p->motion_val, &s->low_delay,
2355 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2358 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2360 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2361 int offset = 2*s->mb_stride + 1;
2363 return AVERROR(ENOMEM);
2364 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2365 ref->size -= offset;
2366 ref->data += offset;
2367 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2370 static inline int hpel_motion_lowres(MpegEncContext *s,
2371 uint8_t *dest, uint8_t *src,
2372 int field_based, int field_select,
2373 int src_x, int src_y,
2374 int width, int height, ptrdiff_t stride,
2375 int h_edge_pos, int v_edge_pos,
2376 int w, int h, h264_chroma_mc_func *pix_op,
2377 int motion_x, int motion_y)
2379 const int lowres = s->avctx->lowres;
2380 const int op_index = FFMIN(lowres, 3);
2381 const int s_mask = (2 << lowres) - 1;
2385 if (s->quarter_sample) {
2390 sx = motion_x & s_mask;
2391 sy = motion_y & s_mask;
2392 src_x += motion_x >> lowres + 1;
2393 src_y += motion_y >> lowres + 1;
2395 src += src_y * stride + src_x;
2397 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2398 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2399 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2400 s->linesize, s->linesize,
2401 w + 1, (h + 1) << field_based,
2402 src_x, src_y << field_based,
2403 h_edge_pos, v_edge_pos);
2404 src = s->edge_emu_buffer;
2408 sx = (sx << 2) >> lowres;
2409 sy = (sy << 2) >> lowres;
2412 pix_op[op_index](dest, src, stride, h, sx, sy);
2416 /* apply one mpeg motion vector to the three components */
2417 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2424 uint8_t **ref_picture,
2425 h264_chroma_mc_func *pix_op,
2426 int motion_x, int motion_y,
2429 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2430 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2431 ptrdiff_t uvlinesize, linesize;
2432 const int lowres = s->avctx->lowres;
2433 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2434 const int block_s = 8>>lowres;
2435 const int s_mask = (2 << lowres) - 1;
2436 const int h_edge_pos = s->h_edge_pos >> lowres;
2437 const int v_edge_pos = s->v_edge_pos >> lowres;
2438 linesize = s->current_picture.f->linesize[0] << field_based;
2439 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2441 // FIXME obviously not perfect but qpel will not work in lowres anyway
2442 if (s->quarter_sample) {
2448 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2451 sx = motion_x & s_mask;
2452 sy = motion_y & s_mask;
2453 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2454 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2456 if (s->out_format == FMT_H263) {
2457 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2458 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2459 uvsrc_x = src_x >> 1;
2460 uvsrc_y = src_y >> 1;
2461 } else if (s->out_format == FMT_H261) {
2462 // even chroma mv's are full pel in H261
2465 uvsx = (2 * mx) & s_mask;
2466 uvsy = (2 * my) & s_mask;
2467 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2468 uvsrc_y = mb_y * block_s + (my >> lowres);
2470 if(s->chroma_y_shift){
2475 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2476 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2478 if(s->chroma_x_shift){
2482 uvsy = motion_y & s_mask;
2484 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2487 uvsx = motion_x & s_mask;
2488 uvsy = motion_y & s_mask;
2495 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2496 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2497 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2499 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2500 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2501 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2502 linesize >> field_based, linesize >> field_based,
2503 17, 17 + field_based,
2504 src_x, src_y << field_based, h_edge_pos,
2506 ptr_y = s->edge_emu_buffer;
2507 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2508 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2509 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2510 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2511 uvlinesize >> field_based, uvlinesize >> field_based,
2513 uvsrc_x, uvsrc_y << field_based,
2514 h_edge_pos >> 1, v_edge_pos >> 1);
2515 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2516 uvlinesize >> field_based,uvlinesize >> field_based,
2518 uvsrc_x, uvsrc_y << field_based,
2519 h_edge_pos >> 1, v_edge_pos >> 1);
2525 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2527 dest_y += s->linesize;
2528 dest_cb += s->uvlinesize;
2529 dest_cr += s->uvlinesize;
2533 ptr_y += s->linesize;
2534 ptr_cb += s->uvlinesize;
2535 ptr_cr += s->uvlinesize;
2538 sx = (sx << 2) >> lowres;
2539 sy = (sy << 2) >> lowres;
2540 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2542 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2543 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2544 uvsx = (uvsx << 2) >> lowres;
2545 uvsy = (uvsy << 2) >> lowres;
2547 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2548 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2551 // FIXME h261 lowres loop filter
2554 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2555 uint8_t *dest_cb, uint8_t *dest_cr,
2556 uint8_t **ref_picture,
2557 h264_chroma_mc_func * pix_op,
2560 const int lowres = s->avctx->lowres;
2561 const int op_index = FFMIN(lowres, 3);
2562 const int block_s = 8 >> lowres;
2563 const int s_mask = (2 << lowres) - 1;
2564 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2565 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2566 int emu = 0, src_x, src_y, sx, sy;
2570 if (s->quarter_sample) {
2575 /* In case of 8X8, we construct a single chroma motion vector
2576 with a special rounding */
2577 mx = ff_h263_round_chroma(mx);
2578 my = ff_h263_round_chroma(my);
2582 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2583 src_y = s->mb_y * block_s + (my >> lowres + 1);
2585 offset = src_y * s->uvlinesize + src_x;
2586 ptr = ref_picture[1] + offset;
2587 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2588 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2589 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2590 s->uvlinesize, s->uvlinesize,
2592 src_x, src_y, h_edge_pos, v_edge_pos);
2593 ptr = s->edge_emu_buffer;
2596 sx = (sx << 2) >> lowres;
2597 sy = (sy << 2) >> lowres;
2598 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2600 ptr = ref_picture[2] + offset;
2602 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2603 s->uvlinesize, s->uvlinesize,
2605 src_x, src_y, h_edge_pos, v_edge_pos);
2606 ptr = s->edge_emu_buffer;
2608 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2612 * motion compensation of a single macroblock
2614 * @param dest_y luma destination pointer
2615 * @param dest_cb chroma cb/u destination pointer
2616 * @param dest_cr chroma cr/v destination pointer
2617 * @param dir direction (0->forward, 1->backward)
2618 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2619 * @param pix_op halfpel motion compensation function (average or put normally)
2620 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2622 static inline void MPV_motion_lowres(MpegEncContext *s,
2623 uint8_t *dest_y, uint8_t *dest_cb,
2625 int dir, uint8_t **ref_picture,
2626 h264_chroma_mc_func *pix_op)
2630 const int lowres = s->avctx->lowres;
2631 const int block_s = 8 >>lowres;
2636 switch (s->mv_type) {
2638 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2640 ref_picture, pix_op,
2641 s->mv[dir][0][0], s->mv[dir][0][1],
2647 for (i = 0; i < 4; i++) {
2648 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2649 s->linesize) * block_s,
2650 ref_picture[0], 0, 0,
2651 (2 * mb_x + (i & 1)) * block_s,
2652 (2 * mb_y + (i >> 1)) * block_s,
2653 s->width, s->height, s->linesize,
2654 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2655 block_s, block_s, pix_op,
2656 s->mv[dir][i][0], s->mv[dir][i][1]);
2658 mx += s->mv[dir][i][0];
2659 my += s->mv[dir][i][1];
2662 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2663 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2667 if (s->picture_structure == PICT_FRAME) {
2669 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2670 1, 0, s->field_select[dir][0],
2671 ref_picture, pix_op,
2672 s->mv[dir][0][0], s->mv[dir][0][1],
2675 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2676 1, 1, s->field_select[dir][1],
2677 ref_picture, pix_op,
2678 s->mv[dir][1][0], s->mv[dir][1][1],
2681 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2682 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2683 ref_picture = s->current_picture_ptr->f->data;
2686 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2687 0, 0, s->field_select[dir][0],
2688 ref_picture, pix_op,
2690 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2694 for (i = 0; i < 2; i++) {
2695 uint8_t **ref2picture;
2697 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2698 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2699 ref2picture = ref_picture;
2701 ref2picture = s->current_picture_ptr->f->data;
2704 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2705 0, 0, s->field_select[dir][i],
2706 ref2picture, pix_op,
2707 s->mv[dir][i][0], s->mv[dir][i][1] +
2708 2 * block_s * i, block_s, mb_y >> 1);
2710 dest_y += 2 * block_s * s->linesize;
2711 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2712 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2716 if (s->picture_structure == PICT_FRAME) {
2717 for (i = 0; i < 2; i++) {
2719 for (j = 0; j < 2; j++) {
2720 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2722 ref_picture, pix_op,
2723 s->mv[dir][2 * i + j][0],
2724 s->mv[dir][2 * i + j][1],
2727 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2730 for (i = 0; i < 2; i++) {
2731 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2732 0, 0, s->picture_structure != i + 1,
2733 ref_picture, pix_op,
2734 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2735 2 * block_s, mb_y >> 1);
2737 // after put we make avg of the same block
2738 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2740 // opposite parity is always in the same
2741 // frame if this is second field
2742 if (!s->first_field) {
2743 ref_picture = s->current_picture_ptr->f->data;
2754 * find the lowest MB row referenced in the MVs
2756 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2758 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2759 int my, off, i, mvs;
2761 if (s->picture_structure != PICT_FRAME || s->mcsel)
2764 switch (s->mv_type) {
2778 for (i = 0; i < mvs; i++) {
2779 my = s->mv[dir][i][1]<<qpel_shift;
2780 my_max = FFMAX(my_max, my);
2781 my_min = FFMIN(my_min, my);
2784 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2786 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2788 return s->mb_height-1;
2791 /* put block[] to dest[] */
2792 static inline void put_dct(MpegEncContext *s,
2793 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2795 s->dct_unquantize_intra(s, block, i, qscale);
2796 s->dsp.idct_put (dest, line_size, block);
2799 /* add block[] to dest[] */
2800 static inline void add_dct(MpegEncContext *s,
2801 int16_t *block, int i, uint8_t *dest, int line_size)
2803 if (s->block_last_index[i] >= 0) {
2804 s->dsp.idct_add (dest, line_size, block);
2808 static inline void add_dequant_dct(MpegEncContext *s,
2809 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2811 if (s->block_last_index[i] >= 0) {
2812 s->dct_unquantize_inter(s, block, i, qscale);
2814 s->dsp.idct_add (dest, line_size, block);
2819 * Clean dc, ac, coded_block for the current non-intra MB.
2821 void ff_clean_intra_table_entries(MpegEncContext *s)
2823 int wrap = s->b8_stride;
2824 int xy = s->block_index[0];
2827 s->dc_val[0][xy + 1 ] =
2828 s->dc_val[0][xy + wrap] =
2829 s->dc_val[0][xy + 1 + wrap] = 1024;
2831 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2832 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2833 if (s->msmpeg4_version>=3) {
2834 s->coded_block[xy ] =
2835 s->coded_block[xy + 1 ] =
2836 s->coded_block[xy + wrap] =
2837 s->coded_block[xy + 1 + wrap] = 0;
2840 wrap = s->mb_stride;
2841 xy = s->mb_x + s->mb_y * wrap;
2843 s->dc_val[2][xy] = 1024;
2845 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2846 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2848 s->mbintra_table[xy]= 0;
2851 /* generic function called after a macroblock has been parsed by the
2852 decoder or after it has been encoded by the encoder.
2854 Important variables used:
2855 s->mb_intra : true if intra macroblock
2856 s->mv_dir : motion vector direction
2857 s->mv_type : motion vector type
2858 s->mv : motion vector
2859 s->interlaced_dct : true if interlaced dct used (mpeg2)
2861 static av_always_inline
2862 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2863 int lowres_flag, int is_mpeg12)
2865 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2868 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2869 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2873 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2874 /* print DCT coefficients */
2876 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2878 for(j=0; j<64; j++){
2879 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2881 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2885 s->current_picture.qscale_table[mb_xy] = s->qscale;
2887 /* update DC predictors for P macroblocks */
2889 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2890 if(s->mbintra_table[mb_xy])
2891 ff_clean_intra_table_entries(s);
2895 s->last_dc[2] = 128 << s->intra_dc_precision;
2898 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2899 s->mbintra_table[mb_xy]=1;
2901 if ( (s->flags&CODEC_FLAG_PSNR)
2902 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
2903 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2904 uint8_t *dest_y, *dest_cb, *dest_cr;
2905 int dct_linesize, dct_offset;
2906 op_pixels_func (*op_pix)[4];
2907 qpel_mc_func (*op_qpix)[16];
2908 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2909 const int uvlinesize = s->current_picture.f->linesize[1];
2910 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2911 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2913 /* avoid copy if macroblock skipped in last frame too */
2914 /* skip only during decoding as we might trash the buffers during encoding a bit */
2916 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2918 if (s->mb_skipped) {
2920 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2922 } else if(!s->current_picture.reference) {
2925 *mbskip_ptr = 0; /* not skipped */
2929 dct_linesize = linesize << s->interlaced_dct;
2930 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2934 dest_cb= s->dest[1];
2935 dest_cr= s->dest[2];
2937 dest_y = s->b_scratchpad;
2938 dest_cb= s->b_scratchpad+16*linesize;
2939 dest_cr= s->b_scratchpad+32*linesize;
2943 /* motion handling */
2944 /* decoding or more than one mb_type (MC was already done otherwise) */
2947 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2948 if (s->mv_dir & MV_DIR_FORWARD) {
2949 ff_thread_await_progress(&s->last_picture_ptr->tf,
2950 ff_MPV_lowest_referenced_row(s, 0),
2953 if (s->mv_dir & MV_DIR_BACKWARD) {
2954 ff_thread_await_progress(&s->next_picture_ptr->tf,
2955 ff_MPV_lowest_referenced_row(s, 1),
2961 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2963 if (s->mv_dir & MV_DIR_FORWARD) {
2964 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
2965 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2967 if (s->mv_dir & MV_DIR_BACKWARD) {
2968 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
2971 op_qpix = s->me.qpel_put;
2972 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2973 op_pix = s->hdsp.put_pixels_tab;
2975 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2977 if (s->mv_dir & MV_DIR_FORWARD) {
2978 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2979 op_pix = s->hdsp.avg_pixels_tab;
2980 op_qpix= s->me.qpel_avg;
2982 if (s->mv_dir & MV_DIR_BACKWARD) {
2983 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2988 /* skip dequant / idct if we are really late ;) */
2989 if(s->avctx->skip_idct){
2990 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2991 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2992 || s->avctx->skip_idct >= AVDISCARD_ALL)
2996 /* add dct residue */
2997 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2998 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2999 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3000 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3001 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3002 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3004 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3005 if (s->chroma_y_shift){
3006 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3007 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3011 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3012 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3013 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3014 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3017 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3018 add_dct(s, block[0], 0, dest_y , dct_linesize);
3019 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3020 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3021 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3023 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3024 if(s->chroma_y_shift){//Chroma420
3025 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3026 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3029 dct_linesize = uvlinesize << s->interlaced_dct;
3030 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3032 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3033 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3034 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3035 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3036 if(!s->chroma_x_shift){//Chroma444
3037 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3038 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3039 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3040 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3045 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3046 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3049 /* dct only in intra block */
3050 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3051 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3052 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3053 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3054 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3056 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3057 if(s->chroma_y_shift){
3058 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3059 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3063 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3064 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3065 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3066 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3070 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
3071 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3072 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
3073 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3075 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3076 if(s->chroma_y_shift){
3077 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
3078 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
3081 dct_linesize = uvlinesize << s->interlaced_dct;
3082 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3084 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
3085 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
3086 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3087 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3088 if(!s->chroma_x_shift){//Chroma444
3089 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3090 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3091 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3092 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3100 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3101 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3102 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3107 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
3109 if(s->out_format == FMT_MPEG1) {
3110 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
3111 else MPV_decode_mb_internal(s, block, 0, 1);
3114 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
3115 else MPV_decode_mb_internal(s, block, 0, 0);
3118 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3120 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3121 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3122 s->first_field, s->low_delay);
3125 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3126 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3127 const int uvlinesize = s->current_picture.f->linesize[1];
3128 const int mb_size= 4 - s->avctx->lowres;
3130 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3131 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3132 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3133 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3134 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3135 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3136 //block_index is not used by mpeg2, so it is not affected by chroma_format
3138 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3139 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3140 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3142 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3144 if(s->picture_structure==PICT_FRAME){
3145 s->dest[0] += s->mb_y * linesize << mb_size;
3146 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3147 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3149 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3150 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3151 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3152 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3158 * Permute an 8x8 block.
3159 * @param block the block which will be permuted according to the given permutation vector
3160 * @param permutation the permutation vector
3161 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3162 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3163 * (inverse) permutated to scantable order!
3165 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3171 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3173 for(i=0; i<=last; i++){
3174 const int j= scantable[i];
3179 for(i=0; i<=last; i++){
3180 const int j= scantable[i];
3181 const int perm_j= permutation[j];
3182 block[perm_j]= temp[j];
3186 void ff_mpeg_flush(AVCodecContext *avctx){
3188 MpegEncContext *s = avctx->priv_data;
3190 if(s==NULL || s->picture==NULL)
3193 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3194 ff_mpeg_unref_picture(s, &s->picture[i]);
3195 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3197 ff_mpeg_unref_picture(s, &s->current_picture);
3198 ff_mpeg_unref_picture(s, &s->last_picture);
3199 ff_mpeg_unref_picture(s, &s->next_picture);
3201 s->mb_x= s->mb_y= 0;
3204 s->parse_context.state= -1;
3205 s->parse_context.frame_start_found= 0;
3206 s->parse_context.overread= 0;
3207 s->parse_context.overread_index= 0;
3208 s->parse_context.index= 0;
3209 s->parse_context.last_index= 0;
3210 s->bitstream_buffer_size=0;
3215 * set qscale and update qscale dependent variables.
3217 void ff_set_qscale(MpegEncContext * s, int qscale)
3221 else if (qscale > 31)
3225 s->chroma_qscale= s->chroma_qscale_table[qscale];
3227 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3228 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3231 void ff_MPV_report_decode_progress(MpegEncContext *s)
3233 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3234 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3237 #if CONFIG_ERROR_RESILIENCE
3238 void ff_mpeg_set_erpic(ERPicture *dst, Picture *src)
3242 memset(dst, 0, sizeof(*dst));
3252 for (i = 0; i < 2; i++) {
3253 dst->motion_val[i] = src->motion_val[i];
3254 dst->ref_index[i] = src->ref_index[i];
3257 dst->mb_type = src->mb_type;
3258 dst->field_picture = src->field_picture;
3261 void ff_mpeg_er_frame_start(MpegEncContext *s)
3263 ERContext *er = &s->er;
3265 ff_mpeg_set_erpic(&er->cur_pic, s->current_picture_ptr);
3266 ff_mpeg_set_erpic(&er->next_pic, s->next_picture_ptr);
3267 ff_mpeg_set_erpic(&er->last_pic, s->last_picture_ptr);
3269 er->pp_time = s->pp_time;
3270 er->pb_time = s->pb_time;
3271 er->quarter_sample = s->quarter_sample;
3272 er->partitioned_frame = s->partitioned_frame;
3274 ff_er_frame_start(er);
3276 #endif /* CONFIG_ERROR_RESILIENCE */