2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
37 #include "h264chroma.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
48 static const uint8_t ff_default_chroma_qscale_table[32] = {
49 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
50 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
51 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
54 const uint8_t ff_mpeg1_dc_scale_table[128] = {
55 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
56 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
66 static const uint8_t mpeg2_dc_scale_table1[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
78 static const uint8_t mpeg2_dc_scale_table2[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
90 static const uint8_t mpeg2_dc_scale_table3[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
103 ff_mpeg1_dc_scale_table,
104 mpeg2_dc_scale_table1,
105 mpeg2_dc_scale_table2,
106 mpeg2_dc_scale_table3,
109 const uint8_t ff_alternate_horizontal_scan[64] = {
110 0, 1, 2, 3, 8, 9, 16, 17,
111 10, 11, 4, 5, 6, 7, 15, 14,
112 13, 12, 19, 18, 24, 25, 32, 33,
113 26, 27, 20, 21, 22, 23, 28, 29,
114 30, 31, 34, 35, 40, 41, 48, 49,
115 42, 43, 36, 37, 38, 39, 44, 45,
116 46, 47, 50, 51, 56, 57, 58, 59,
117 52, 53, 54, 55, 60, 61, 62, 63,
120 const uint8_t ff_alternate_vertical_scan[64] = {
121 0, 8, 16, 24, 1, 9, 2, 10,
122 17, 25, 32, 40, 48, 56, 57, 49,
123 41, 33, 26, 18, 3, 11, 4, 12,
124 19, 27, 34, 42, 50, 58, 35, 43,
125 51, 59, 20, 28, 5, 13, 6, 14,
126 21, 29, 36, 44, 52, 60, 37, 45,
127 53, 61, 22, 30, 7, 15, 23, 31,
128 38, 46, 54, 62, 39, 47, 55, 63,
131 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
132 int16_t *block, int n, int qscale)
134 int i, level, nCoeffs;
135 const uint16_t *quant_matrix;
137 nCoeffs= s->block_last_index[n];
139 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
140 /* XXX: only mpeg1 */
141 quant_matrix = s->intra_matrix;
142 for(i=1;i<=nCoeffs;i++) {
143 int j= s->intra_scantable.permutated[i];
148 level = (int)(level * qscale * quant_matrix[j]) >> 3;
149 level = (level - 1) | 1;
152 level = (int)(level * qscale * quant_matrix[j]) >> 3;
153 level = (level - 1) | 1;
160 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
161 int16_t *block, int n, int qscale)
163 int i, level, nCoeffs;
164 const uint16_t *quant_matrix;
166 nCoeffs= s->block_last_index[n];
168 quant_matrix = s->inter_matrix;
169 for(i=0; i<=nCoeffs; i++) {
170 int j= s->intra_scantable.permutated[i];
175 level = (((level << 1) + 1) * qscale *
176 ((int) (quant_matrix[j]))) >> 4;
177 level = (level - 1) | 1;
180 level = (((level << 1) + 1) * qscale *
181 ((int) (quant_matrix[j]))) >> 4;
182 level = (level - 1) | 1;
189 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
190 int16_t *block, int n, int qscale)
192 int i, level, nCoeffs;
193 const uint16_t *quant_matrix;
195 if(s->alternate_scan) nCoeffs= 63;
196 else nCoeffs= s->block_last_index[n];
198 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
199 quant_matrix = s->intra_matrix;
200 for(i=1;i<=nCoeffs;i++) {
201 int j= s->intra_scantable.permutated[i];
206 level = (int)(level * qscale * quant_matrix[j]) >> 3;
209 level = (int)(level * qscale * quant_matrix[j]) >> 3;
216 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
217 int16_t *block, int n, int qscale)
219 int i, level, nCoeffs;
220 const uint16_t *quant_matrix;
223 if(s->alternate_scan) nCoeffs= 63;
224 else nCoeffs= s->block_last_index[n];
226 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
228 quant_matrix = s->intra_matrix;
229 for(i=1;i<=nCoeffs;i++) {
230 int j= s->intra_scantable.permutated[i];
235 level = (int)(level * qscale * quant_matrix[j]) >> 3;
238 level = (int)(level * qscale * quant_matrix[j]) >> 3;
247 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
248 int16_t *block, int n, int qscale)
250 int i, level, nCoeffs;
251 const uint16_t *quant_matrix;
254 if(s->alternate_scan) nCoeffs= 63;
255 else nCoeffs= s->block_last_index[n];
257 quant_matrix = s->inter_matrix;
258 for(i=0; i<=nCoeffs; i++) {
259 int j= s->intra_scantable.permutated[i];
264 level = (((level << 1) + 1) * qscale *
265 ((int) (quant_matrix[j]))) >> 4;
268 level = (((level << 1) + 1) * qscale *
269 ((int) (quant_matrix[j]))) >> 4;
278 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
279 int16_t *block, int n, int qscale)
281 int i, level, qmul, qadd;
284 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
289 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
290 qadd = (qscale - 1) | 1;
297 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
299 for(i=1; i<=nCoeffs; i++) {
303 level = level * qmul - qadd;
305 level = level * qmul + qadd;
312 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
313 int16_t *block, int n, int qscale)
315 int i, level, qmul, qadd;
318 av_assert2(s->block_last_index[n]>=0);
320 qadd = (qscale - 1) | 1;
323 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
325 for(i=0; i<=nCoeffs; i++) {
329 level = level * qmul - qadd;
331 level = level * qmul + qadd;
338 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
340 int mb_x, int mb_y, int mb_intra, int mb_skipped)
342 MpegEncContext *s = opaque;
345 s->mv_type = mv_type;
346 s->mb_intra = mb_intra;
347 s->mb_skipped = mb_skipped;
350 memcpy(s->mv, mv, sizeof(*mv));
352 ff_init_block_index(s);
353 ff_update_block_index(s);
355 s->dsp.clear_blocks(s->block[0]);
357 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
358 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
359 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
362 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
363 ff_MPV_decode_mb(s, s->block);
366 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
369 memset(dst + h*linesize, 128, 16);
372 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
375 memset(dst + h*linesize, 128, 8);
378 /* init common dct for both encoder and decoder */
379 av_cold int ff_dct_common_init(MpegEncContext *s)
381 ff_dsputil_init(&s->dsp, s->avctx);
382 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
383 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
384 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
386 if (s->avctx->debug & FF_DEBUG_NOMC) {
388 for (i=0; i<4; i++) {
389 s->hdsp.avg_pixels_tab[0][i] = gray16;
390 s->hdsp.put_pixels_tab[0][i] = gray16;
391 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
393 s->hdsp.avg_pixels_tab[1][i] = gray8;
394 s->hdsp.put_pixels_tab[1][i] = gray8;
395 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
399 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
400 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
401 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
402 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
403 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
404 if (s->flags & CODEC_FLAG_BITEXACT)
405 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
406 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
409 ff_MPV_common_init_axp(s);
411 ff_MPV_common_init_arm(s);
413 ff_MPV_common_init_ppc(s);
415 ff_MPV_common_init_x86(s);
417 /* load & permutate scantables
418 * note: only wmv uses different ones
420 if (s->alternate_scan) {
421 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
422 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
424 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
425 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
427 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
428 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
433 static int frame_size_alloc(MpegEncContext *s, int linesize)
435 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
438 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
439 return AVERROR_PATCHWELCOME;
442 // edge emu needs blocksize + filter length - 1
443 // (= 17x17 for halfpel / 21x21 for h264)
444 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
445 // at uvlinesize. It supports only YUV420 so 24x24 is enough
446 // linesize * interlaced * MBsize
447 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
450 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
452 s->me.temp = s->me.scratchpad;
453 s->rd_scratchpad = s->me.scratchpad;
454 s->b_scratchpad = s->me.scratchpad;
455 s->obmc_scratchpad = s->me.scratchpad + 16;
459 av_freep(&s->edge_emu_buffer);
460 return AVERROR(ENOMEM);
464 * Allocate a frame buffer
466 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
468 int edges_needed = av_codec_is_encoder(s->avctx->codec);
472 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
473 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
474 s->codec_id != AV_CODEC_ID_MSS2) {
476 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
477 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
480 r = ff_thread_get_buffer(s->avctx, &pic->tf,
481 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
483 pic->f->width = s->avctx->width;
484 pic->f->height = s->avctx->height;
485 pic->f->format = s->avctx->pix_fmt;
486 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
489 if (r < 0 || !pic->f->buf[0]) {
490 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
497 for (i = 0; pic->f->data[i]; i++) {
498 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
499 pic->f->linesize[i] +
500 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
501 pic->f->data[i] += offset;
503 pic->f->width = s->avctx->width;
504 pic->f->height = s->avctx->height;
507 if (s->avctx->hwaccel) {
508 assert(!pic->hwaccel_picture_private);
509 if (s->avctx->hwaccel->frame_priv_data_size) {
510 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
511 if (!pic->hwaccel_priv_buf) {
512 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
515 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
519 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
520 s->uvlinesize != pic->f->linesize[1])) {
521 av_log(s->avctx, AV_LOG_ERROR,
522 "get_buffer() failed (stride changed)\n");
523 ff_mpeg_unref_picture(s, pic);
527 if (pic->f->linesize[1] != pic->f->linesize[2]) {
528 av_log(s->avctx, AV_LOG_ERROR,
529 "get_buffer() failed (uv stride mismatch)\n");
530 ff_mpeg_unref_picture(s, pic);
534 if (!s->edge_emu_buffer &&
535 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
536 av_log(s->avctx, AV_LOG_ERROR,
537 "get_buffer() failed to allocate context scratch buffers.\n");
538 ff_mpeg_unref_picture(s, pic);
545 void ff_free_picture_tables(Picture *pic)
549 pic->alloc_mb_width =
550 pic->alloc_mb_height = 0;
552 av_buffer_unref(&pic->mb_var_buf);
553 av_buffer_unref(&pic->mc_mb_var_buf);
554 av_buffer_unref(&pic->mb_mean_buf);
555 av_buffer_unref(&pic->mbskip_table_buf);
556 av_buffer_unref(&pic->qscale_table_buf);
557 av_buffer_unref(&pic->mb_type_buf);
559 for (i = 0; i < 2; i++) {
560 av_buffer_unref(&pic->motion_val_buf[i]);
561 av_buffer_unref(&pic->ref_index_buf[i]);
565 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
567 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
568 const int mb_array_size = s->mb_stride * s->mb_height;
569 const int b8_array_size = s->b8_stride * s->mb_height * 2;
573 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
574 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
575 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
577 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
578 return AVERROR(ENOMEM);
581 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
582 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
583 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
584 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
585 return AVERROR(ENOMEM);
588 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
589 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
590 int ref_index_size = 4 * mb_array_size;
592 for (i = 0; mv_size && i < 2; i++) {
593 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
594 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
595 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
596 return AVERROR(ENOMEM);
600 pic->alloc_mb_width = s->mb_width;
601 pic->alloc_mb_height = s->mb_height;
606 static int make_tables_writable(Picture *pic)
609 #define MAKE_WRITABLE(table) \
612 (ret = av_buffer_make_writable(&pic->table)) < 0)\
616 MAKE_WRITABLE(mb_var_buf);
617 MAKE_WRITABLE(mc_mb_var_buf);
618 MAKE_WRITABLE(mb_mean_buf);
619 MAKE_WRITABLE(mbskip_table_buf);
620 MAKE_WRITABLE(qscale_table_buf);
621 MAKE_WRITABLE(mb_type_buf);
623 for (i = 0; i < 2; i++) {
624 MAKE_WRITABLE(motion_val_buf[i]);
625 MAKE_WRITABLE(ref_index_buf[i]);
632 * Allocate a Picture.
633 * The pixels are allocated/set by calling get_buffer() if shared = 0
635 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
639 if (pic->qscale_table_buf)
640 if ( pic->alloc_mb_width != s->mb_width
641 || pic->alloc_mb_height != s->mb_height)
642 ff_free_picture_tables(pic);
645 av_assert0(pic->f->data[0]);
648 av_assert0(!pic->f->buf[0]);
650 if (alloc_frame_buffer(s, pic) < 0)
653 s->linesize = pic->f->linesize[0];
654 s->uvlinesize = pic->f->linesize[1];
657 if (!pic->qscale_table_buf)
658 ret = alloc_picture_tables(s, pic);
660 ret = make_tables_writable(pic);
665 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
666 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
667 pic->mb_mean = pic->mb_mean_buf->data;
670 pic->mbskip_table = pic->mbskip_table_buf->data;
671 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
672 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
674 if (pic->motion_val_buf[0]) {
675 for (i = 0; i < 2; i++) {
676 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
677 pic->ref_index[i] = pic->ref_index_buf[i]->data;
683 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
684 ff_mpeg_unref_picture(s, pic);
685 ff_free_picture_tables(pic);
686 return AVERROR(ENOMEM);
690 * Deallocate a picture.
692 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
694 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
697 /* WM Image / Screen codecs allocate internal buffers with different
698 * dimensions / colorspaces; ignore user-defined callbacks for these. */
699 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
700 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
701 s->codec_id != AV_CODEC_ID_MSS2)
702 ff_thread_release_buffer(s->avctx, &pic->tf);
704 av_frame_unref(pic->f);
706 av_buffer_unref(&pic->hwaccel_priv_buf);
708 if (pic->needs_realloc)
709 ff_free_picture_tables(pic);
711 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
714 static int update_picture_tables(Picture *dst, Picture *src)
718 #define UPDATE_TABLE(table)\
721 (!dst->table || dst->table->buffer != src->table->buffer)) {\
722 av_buffer_unref(&dst->table);\
723 dst->table = av_buffer_ref(src->table);\
725 ff_free_picture_tables(dst);\
726 return AVERROR(ENOMEM);\
731 UPDATE_TABLE(mb_var_buf);
732 UPDATE_TABLE(mc_mb_var_buf);
733 UPDATE_TABLE(mb_mean_buf);
734 UPDATE_TABLE(mbskip_table_buf);
735 UPDATE_TABLE(qscale_table_buf);
736 UPDATE_TABLE(mb_type_buf);
737 for (i = 0; i < 2; i++) {
738 UPDATE_TABLE(motion_val_buf[i]);
739 UPDATE_TABLE(ref_index_buf[i]);
742 dst->mb_var = src->mb_var;
743 dst->mc_mb_var = src->mc_mb_var;
744 dst->mb_mean = src->mb_mean;
745 dst->mbskip_table = src->mbskip_table;
746 dst->qscale_table = src->qscale_table;
747 dst->mb_type = src->mb_type;
748 for (i = 0; i < 2; i++) {
749 dst->motion_val[i] = src->motion_val[i];
750 dst->ref_index[i] = src->ref_index[i];
753 dst->alloc_mb_width = src->alloc_mb_width;
754 dst->alloc_mb_height = src->alloc_mb_height;
759 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
763 av_assert0(!dst->f->buf[0]);
764 av_assert0(src->f->buf[0]);
768 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
772 ret = update_picture_tables(dst, src);
776 if (src->hwaccel_picture_private) {
777 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
778 if (!dst->hwaccel_priv_buf)
780 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
783 dst->field_picture = src->field_picture;
784 dst->mb_var_sum = src->mb_var_sum;
785 dst->mc_mb_var_sum = src->mc_mb_var_sum;
786 dst->b_frame_score = src->b_frame_score;
787 dst->needs_realloc = src->needs_realloc;
788 dst->reference = src->reference;
789 dst->shared = src->shared;
793 ff_mpeg_unref_picture(s, dst);
797 static void exchange_uv(MpegEncContext *s)
802 s->pblocks[4] = s->pblocks[5];
806 static int init_duplicate_context(MpegEncContext *s)
808 int y_size = s->b8_stride * (2 * s->mb_height + 1);
809 int c_size = s->mb_stride * (s->mb_height + 1);
810 int yc_size = y_size + 2 * c_size;
813 if (s->mb_height & 1)
814 yc_size += 2*s->b8_stride + 2*s->mb_stride;
821 s->obmc_scratchpad = NULL;
824 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
825 ME_MAP_SIZE * sizeof(uint32_t), fail)
826 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
827 ME_MAP_SIZE * sizeof(uint32_t), fail)
828 if (s->avctx->noise_reduction) {
829 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
830 2 * 64 * sizeof(int), fail)
833 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
834 s->block = s->blocks[0];
836 for (i = 0; i < 12; i++) {
837 s->pblocks[i] = &s->block[i];
839 if (s->avctx->codec_tag == AV_RL32("VCR2"))
842 if (s->out_format == FMT_H263) {
844 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
845 yc_size * sizeof(int16_t) * 16, fail);
846 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
847 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
848 s->ac_val[2] = s->ac_val[1] + c_size;
853 return -1; // free() through ff_MPV_common_end()
856 static void free_duplicate_context(MpegEncContext *s)
861 av_freep(&s->edge_emu_buffer);
862 av_freep(&s->me.scratchpad);
866 s->obmc_scratchpad = NULL;
868 av_freep(&s->dct_error_sum);
869 av_freep(&s->me.map);
870 av_freep(&s->me.score_map);
871 av_freep(&s->blocks);
872 av_freep(&s->ac_val_base);
876 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
878 #define COPY(a) bak->a = src->a
879 COPY(edge_emu_buffer);
884 COPY(obmc_scratchpad);
891 COPY(me.map_generation);
903 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
907 // FIXME copy only needed parts
909 backup_duplicate_context(&bak, dst);
910 memcpy(dst, src, sizeof(MpegEncContext));
911 backup_duplicate_context(dst, &bak);
912 for (i = 0; i < 12; i++) {
913 dst->pblocks[i] = &dst->block[i];
915 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
917 if (!dst->edge_emu_buffer &&
918 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
919 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
920 "scratch buffers.\n");
923 // STOP_TIMER("update_duplicate_context")
924 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
928 int ff_mpeg_update_thread_context(AVCodecContext *dst,
929 const AVCodecContext *src)
932 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
939 // FIXME can parameters change on I-frames?
940 // in that case dst may need a reinit
941 if (!s->context_initialized) {
942 memcpy(s, s1, sizeof(MpegEncContext));
945 s->bitstream_buffer = NULL;
946 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
948 if (s1->context_initialized){
949 // s->picture_range_start += MAX_PICTURE_COUNT;
950 // s->picture_range_end += MAX_PICTURE_COUNT;
951 if((ret = ff_MPV_common_init(s)) < 0){
952 memset(s, 0, sizeof(MpegEncContext));
959 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
960 s->context_reinit = 0;
961 s->height = s1->height;
962 s->width = s1->width;
963 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
967 s->avctx->coded_height = s1->avctx->coded_height;
968 s->avctx->coded_width = s1->avctx->coded_width;
969 s->avctx->width = s1->avctx->width;
970 s->avctx->height = s1->avctx->height;
972 s->coded_picture_number = s1->coded_picture_number;
973 s->picture_number = s1->picture_number;
975 av_assert0(!s->picture || s->picture != s1->picture);
977 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
978 ff_mpeg_unref_picture(s, &s->picture[i]);
979 if (s1->picture[i].f->buf[0] &&
980 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
984 #define UPDATE_PICTURE(pic)\
986 ff_mpeg_unref_picture(s, &s->pic);\
987 if (s1->pic.f && s1->pic.f->buf[0])\
988 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
990 ret = update_picture_tables(&s->pic, &s1->pic);\
995 UPDATE_PICTURE(current_picture);
996 UPDATE_PICTURE(last_picture);
997 UPDATE_PICTURE(next_picture);
999 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1000 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1001 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1003 // Error/bug resilience
1004 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1005 s->workaround_bugs = s1->workaround_bugs;
1006 s->padding_bug_score = s1->padding_bug_score;
1008 // MPEG4 timing info
1009 memcpy(&s->last_time_base, &s1->last_time_base,
1010 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1011 (char *) &s1->last_time_base);
1014 s->max_b_frames = s1->max_b_frames;
1015 s->low_delay = s1->low_delay;
1016 s->droppable = s1->droppable;
1018 // DivX handling (doesn't work)
1019 s->divx_packed = s1->divx_packed;
1021 if (s1->bitstream_buffer) {
1022 if (s1->bitstream_buffer_size +
1023 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1024 av_fast_malloc(&s->bitstream_buffer,
1025 &s->allocated_bitstream_buffer_size,
1026 s1->allocated_bitstream_buffer_size);
1027 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1028 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1029 s1->bitstream_buffer_size);
1030 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1031 FF_INPUT_BUFFER_PADDING_SIZE);
1034 // linesize dependend scratch buffer allocation
1035 if (!s->edge_emu_buffer)
1037 if (frame_size_alloc(s, s1->linesize) < 0) {
1038 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1039 "scratch buffers.\n");
1040 return AVERROR(ENOMEM);
1043 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1044 "be allocated due to unknown size.\n");
1047 // MPEG2/interlacing info
1048 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1049 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1051 if (!s1->first_field) {
1052 s->last_pict_type = s1->pict_type;
1053 if (s1->current_picture_ptr)
1054 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1061 * Set the given MpegEncContext to common defaults
1062 * (same for encoding and decoding).
1063 * The changed fields will not depend upon the
1064 * prior state of the MpegEncContext.
1066 void ff_MPV_common_defaults(MpegEncContext *s)
1068 s->y_dc_scale_table =
1069 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1070 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1071 s->progressive_frame = 1;
1072 s->progressive_sequence = 1;
1073 s->picture_structure = PICT_FRAME;
1075 s->coded_picture_number = 0;
1076 s->picture_number = 0;
1081 s->slice_context_count = 1;
1085 * Set the given MpegEncContext to defaults for decoding.
1086 * the changed fields will not depend upon
1087 * the prior state of the MpegEncContext.
1089 void ff_MPV_decode_defaults(MpegEncContext *s)
1091 ff_MPV_common_defaults(s);
1094 static int init_er(MpegEncContext *s)
1096 ERContext *er = &s->er;
1097 int mb_array_size = s->mb_height * s->mb_stride;
1100 er->avctx = s->avctx;
1103 er->mb_index2xy = s->mb_index2xy;
1104 er->mb_num = s->mb_num;
1105 er->mb_width = s->mb_width;
1106 er->mb_height = s->mb_height;
1107 er->mb_stride = s->mb_stride;
1108 er->b8_stride = s->b8_stride;
1110 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1111 er->error_status_table = av_mallocz(mb_array_size);
1112 if (!er->er_temp_buffer || !er->error_status_table)
1115 er->mbskip_table = s->mbskip_table;
1116 er->mbintra_table = s->mbintra_table;
1118 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1119 er->dc_val[i] = s->dc_val[i];
1121 er->decode_mb = mpeg_er_decode_mb;
1126 av_freep(&er->er_temp_buffer);
1127 av_freep(&er->error_status_table);
1128 return AVERROR(ENOMEM);
1132 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1134 static int init_context_frame(MpegEncContext *s)
1136 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1138 s->mb_width = (s->width + 15) / 16;
1139 s->mb_stride = s->mb_width + 1;
1140 s->b8_stride = s->mb_width * 2 + 1;
1141 mb_array_size = s->mb_height * s->mb_stride;
1142 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1144 /* set default edge pos, will be overridden
1145 * in decode_header if needed */
1146 s->h_edge_pos = s->mb_width * 16;
1147 s->v_edge_pos = s->mb_height * 16;
1149 s->mb_num = s->mb_width * s->mb_height;
1154 s->block_wrap[3] = s->b8_stride;
1156 s->block_wrap[5] = s->mb_stride;
1158 y_size = s->b8_stride * (2 * s->mb_height + 1);
1159 c_size = s->mb_stride * (s->mb_height + 1);
1160 yc_size = y_size + 2 * c_size;
1162 if (s->mb_height & 1)
1163 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1165 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1166 for (y = 0; y < s->mb_height; y++)
1167 for (x = 0; x < s->mb_width; x++)
1168 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1170 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1173 /* Allocate MV tables */
1174 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1175 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1176 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1177 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1178 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1179 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1180 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1181 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1182 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1183 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1184 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1185 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1187 /* Allocate MB type table */
1188 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1190 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1192 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1193 mb_array_size * sizeof(float), fail);
1194 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1195 mb_array_size * sizeof(float), fail);
1199 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1200 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1201 /* interlaced direct mode decoding tables */
1202 for (i = 0; i < 2; i++) {
1204 for (j = 0; j < 2; j++) {
1205 for (k = 0; k < 2; k++) {
1206 FF_ALLOCZ_OR_GOTO(s->avctx,
1207 s->b_field_mv_table_base[i][j][k],
1208 mv_table_size * 2 * sizeof(int16_t),
1210 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1213 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1214 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1215 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1217 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1220 if (s->out_format == FMT_H263) {
1222 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1223 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1225 /* cbp, ac_pred, pred_dir */
1226 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1227 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1230 if (s->h263_pred || s->h263_plus || !s->encoding) {
1232 // MN: we need these for error resilience of intra-frames
1233 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1234 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1235 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1236 s->dc_val[2] = s->dc_val[1] + c_size;
1237 for (i = 0; i < yc_size; i++)
1238 s->dc_val_base[i] = 1024;
1241 /* which mb is a intra block */
1242 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1243 memset(s->mbintra_table, 1, mb_array_size);
1245 /* init macroblock skip table */
1246 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1247 // Note the + 1 is for a quicker mpeg4 slice_end detection
1251 return AVERROR(ENOMEM);
1255 * init common structure for both encoder and decoder.
1256 * this assumes that some variables like width/height are already set
1258 av_cold int ff_MPV_common_init(MpegEncContext *s)
1261 int nb_slices = (HAVE_THREADS &&
1262 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1263 s->avctx->thread_count : 1;
1265 if (s->encoding && s->avctx->slices)
1266 nb_slices = s->avctx->slices;
1268 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1269 s->mb_height = (s->height + 31) / 32 * 2;
1271 s->mb_height = (s->height + 15) / 16;
1273 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1274 av_log(s->avctx, AV_LOG_ERROR,
1275 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1279 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1282 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1284 max_slices = MAX_THREADS;
1285 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1286 " reducing to %d\n", nb_slices, max_slices);
1287 nb_slices = max_slices;
1290 if ((s->width || s->height) &&
1291 av_image_check_size(s->width, s->height, 0, s->avctx))
1294 ff_dct_common_init(s);
1296 s->flags = s->avctx->flags;
1297 s->flags2 = s->avctx->flags2;
1299 /* set chroma shifts */
1300 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1302 &s->chroma_y_shift);
1304 /* convert fourcc to upper case */
1305 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1307 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1309 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1310 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1311 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1312 s->picture[i].f = av_frame_alloc();
1313 if (!s->picture[i].f)
1316 memset(&s->next_picture, 0, sizeof(s->next_picture));
1317 memset(&s->last_picture, 0, sizeof(s->last_picture));
1318 memset(&s->current_picture, 0, sizeof(s->current_picture));
1319 memset(&s->new_picture, 0, sizeof(s->new_picture));
1320 s->next_picture.f = av_frame_alloc();
1321 if (!s->next_picture.f)
1323 s->last_picture.f = av_frame_alloc();
1324 if (!s->last_picture.f)
1326 s->current_picture.f = av_frame_alloc();
1327 if (!s->current_picture.f)
1329 s->new_picture.f = av_frame_alloc();
1330 if (!s->new_picture.f)
1333 if (init_context_frame(s))
1336 s->parse_context.state = -1;
1338 s->context_initialized = 1;
1339 s->thread_context[0] = s;
1341 // if (s->width && s->height) {
1342 if (nb_slices > 1) {
1343 for (i = 1; i < nb_slices; i++) {
1344 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1345 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1348 for (i = 0; i < nb_slices; i++) {
1349 if (init_duplicate_context(s->thread_context[i]) < 0)
1351 s->thread_context[i]->start_mb_y =
1352 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1353 s->thread_context[i]->end_mb_y =
1354 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1357 if (init_duplicate_context(s) < 0)
1360 s->end_mb_y = s->mb_height;
1362 s->slice_context_count = nb_slices;
1367 ff_MPV_common_end(s);
1372 * Frees and resets MpegEncContext fields depending on the resolution.
1373 * Is used during resolution changes to avoid a full reinitialization of the
1376 static int free_context_frame(MpegEncContext *s)
1380 av_freep(&s->mb_type);
1381 av_freep(&s->p_mv_table_base);
1382 av_freep(&s->b_forw_mv_table_base);
1383 av_freep(&s->b_back_mv_table_base);
1384 av_freep(&s->b_bidir_forw_mv_table_base);
1385 av_freep(&s->b_bidir_back_mv_table_base);
1386 av_freep(&s->b_direct_mv_table_base);
1387 s->p_mv_table = NULL;
1388 s->b_forw_mv_table = NULL;
1389 s->b_back_mv_table = NULL;
1390 s->b_bidir_forw_mv_table = NULL;
1391 s->b_bidir_back_mv_table = NULL;
1392 s->b_direct_mv_table = NULL;
1393 for (i = 0; i < 2; i++) {
1394 for (j = 0; j < 2; j++) {
1395 for (k = 0; k < 2; k++) {
1396 av_freep(&s->b_field_mv_table_base[i][j][k]);
1397 s->b_field_mv_table[i][j][k] = NULL;
1399 av_freep(&s->b_field_select_table[i][j]);
1400 av_freep(&s->p_field_mv_table_base[i][j]);
1401 s->p_field_mv_table[i][j] = NULL;
1403 av_freep(&s->p_field_select_table[i]);
1406 av_freep(&s->dc_val_base);
1407 av_freep(&s->coded_block_base);
1408 av_freep(&s->mbintra_table);
1409 av_freep(&s->cbp_table);
1410 av_freep(&s->pred_dir_table);
1412 av_freep(&s->mbskip_table);
1414 av_freep(&s->er.error_status_table);
1415 av_freep(&s->er.er_temp_buffer);
1416 av_freep(&s->mb_index2xy);
1417 av_freep(&s->lambda_table);
1419 av_freep(&s->cplx_tab);
1420 av_freep(&s->bits_tab);
1422 s->linesize = s->uvlinesize = 0;
1427 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1431 if (s->slice_context_count > 1) {
1432 for (i = 0; i < s->slice_context_count; i++) {
1433 free_duplicate_context(s->thread_context[i]);
1435 for (i = 1; i < s->slice_context_count; i++) {
1436 av_freep(&s->thread_context[i]);
1439 free_duplicate_context(s);
1441 if ((err = free_context_frame(s)) < 0)
1445 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1446 s->picture[i].needs_realloc = 1;
1449 s->last_picture_ptr =
1450 s->next_picture_ptr =
1451 s->current_picture_ptr = NULL;
1454 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1455 s->mb_height = (s->height + 31) / 32 * 2;
1457 s->mb_height = (s->height + 15) / 16;
1459 if ((s->width || s->height) &&
1460 av_image_check_size(s->width, s->height, 0, s->avctx))
1461 return AVERROR_INVALIDDATA;
1463 if ((err = init_context_frame(s)))
1466 s->thread_context[0] = s;
1468 if (s->width && s->height) {
1469 int nb_slices = s->slice_context_count;
1470 if (nb_slices > 1) {
1471 for (i = 1; i < nb_slices; i++) {
1472 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1473 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1476 for (i = 0; i < nb_slices; i++) {
1477 if (init_duplicate_context(s->thread_context[i]) < 0)
1479 s->thread_context[i]->start_mb_y =
1480 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1481 s->thread_context[i]->end_mb_y =
1482 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1485 err = init_duplicate_context(s);
1489 s->end_mb_y = s->mb_height;
1491 s->slice_context_count = nb_slices;
1496 ff_MPV_common_end(s);
1500 /* init common structure for both encoder and decoder */
1501 void ff_MPV_common_end(MpegEncContext *s)
1505 if (s->slice_context_count > 1) {
1506 for (i = 0; i < s->slice_context_count; i++) {
1507 free_duplicate_context(s->thread_context[i]);
1509 for (i = 1; i < s->slice_context_count; i++) {
1510 av_freep(&s->thread_context[i]);
1512 s->slice_context_count = 1;
1513 } else free_duplicate_context(s);
1515 av_freep(&s->parse_context.buffer);
1516 s->parse_context.buffer_size = 0;
1518 av_freep(&s->bitstream_buffer);
1519 s->allocated_bitstream_buffer_size = 0;
1522 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1523 ff_free_picture_tables(&s->picture[i]);
1524 ff_mpeg_unref_picture(s, &s->picture[i]);
1525 av_frame_free(&s->picture[i].f);
1528 av_freep(&s->picture);
1529 ff_free_picture_tables(&s->last_picture);
1530 ff_mpeg_unref_picture(s, &s->last_picture);
1531 av_frame_free(&s->last_picture.f);
1532 ff_free_picture_tables(&s->current_picture);
1533 ff_mpeg_unref_picture(s, &s->current_picture);
1534 av_frame_free(&s->current_picture.f);
1535 ff_free_picture_tables(&s->next_picture);
1536 ff_mpeg_unref_picture(s, &s->next_picture);
1537 av_frame_free(&s->next_picture.f);
1538 ff_free_picture_tables(&s->new_picture);
1539 ff_mpeg_unref_picture(s, &s->new_picture);
1540 av_frame_free(&s->new_picture.f);
1542 free_context_frame(s);
1544 s->context_initialized = 0;
1545 s->last_picture_ptr =
1546 s->next_picture_ptr =
1547 s->current_picture_ptr = NULL;
1548 s->linesize = s->uvlinesize = 0;
1551 av_cold void ff_init_rl(RLTable *rl,
1552 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1554 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1555 uint8_t index_run[MAX_RUN + 1];
1556 int last, run, level, start, end, i;
1558 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1559 if (static_store && rl->max_level[0])
1562 /* compute max_level[], max_run[] and index_run[] */
1563 for (last = 0; last < 2; last++) {
1572 memset(max_level, 0, MAX_RUN + 1);
1573 memset(max_run, 0, MAX_LEVEL + 1);
1574 memset(index_run, rl->n, MAX_RUN + 1);
1575 for (i = start; i < end; i++) {
1576 run = rl->table_run[i];
1577 level = rl->table_level[i];
1578 if (index_run[run] == rl->n)
1580 if (level > max_level[run])
1581 max_level[run] = level;
1582 if (run > max_run[level])
1583 max_run[level] = run;
1586 rl->max_level[last] = static_store[last];
1588 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1589 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1591 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1593 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1594 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1596 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1598 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1599 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1603 av_cold void ff_init_vlc_rl(RLTable *rl)
1607 for (q = 0; q < 32; q++) {
1609 int qadd = (q - 1) | 1;
1615 for (i = 0; i < rl->vlc.table_size; i++) {
1616 int code = rl->vlc.table[i][0];
1617 int len = rl->vlc.table[i][1];
1620 if (len == 0) { // illegal code
1623 } else if (len < 0) { // more bits needed
1627 if (code == rl->n) { // esc
1631 run = rl->table_run[code] + 1;
1632 level = rl->table_level[code] * qmul + qadd;
1633 if (code >= rl->last) run += 192;
1636 rl->rl_vlc[q][i].len = len;
1637 rl->rl_vlc[q][i].level = level;
1638 rl->rl_vlc[q][i].run = run;
1643 static void release_unused_pictures(MpegEncContext *s)
1647 /* release non reference frames */
1648 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1649 if (!s->picture[i].reference)
1650 ff_mpeg_unref_picture(s, &s->picture[i]);
1654 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1656 if (pic == s->last_picture_ptr)
1658 if (pic->f->buf[0] == NULL)
1660 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1665 static int find_unused_picture(MpegEncContext *s, int shared)
1670 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1671 if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1675 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1676 if (pic_is_unused(s, &s->picture[i]))
1681 av_log(s->avctx, AV_LOG_FATAL,
1682 "Internal error, picture buffer overflow\n");
1683 /* We could return -1, but the codec would crash trying to draw into a
1684 * non-existing frame anyway. This is safer than waiting for a random crash.
1685 * Also the return of this is never useful, an encoder must only allocate
1686 * as much as allowed in the specification. This has no relationship to how
1687 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1688 * enough for such valid streams).
1689 * Plus, a decoder has to check stream validity and remove frames if too
1690 * many reference frames are around. Waiting for "OOM" is not correct at
1691 * all. Similarly, missing reference frames have to be replaced by
1692 * interpolated/MC frames, anything else is a bug in the codec ...
1698 int ff_find_unused_picture(MpegEncContext *s, int shared)
1700 int ret = find_unused_picture(s, shared);
1702 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1703 if (s->picture[ret].needs_realloc) {
1704 s->picture[ret].needs_realloc = 0;
1705 ff_free_picture_tables(&s->picture[ret]);
1706 ff_mpeg_unref_picture(s, &s->picture[ret]);
1712 static void gray_frame(AVFrame *frame)
1714 int i, h_chroma_shift, v_chroma_shift;
1716 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1718 for(i=0; i<frame->height; i++)
1719 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1720 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1721 memset(frame->data[1] + frame->linesize[1]*i,
1722 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1723 memset(frame->data[2] + frame->linesize[2]*i,
1724 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1729 * generic function called after decoding
1730 * the header and before a frame is decoded.
1732 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1738 if (!ff_thread_can_start_frame(avctx)) {
1739 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1743 /* mark & release old frames */
1744 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1745 s->last_picture_ptr != s->next_picture_ptr &&
1746 s->last_picture_ptr->f->buf[0]) {
1747 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1750 /* release forgotten pictures */
1751 /* if (mpeg124/h263) */
1752 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1753 if (&s->picture[i] != s->last_picture_ptr &&
1754 &s->picture[i] != s->next_picture_ptr &&
1755 s->picture[i].reference && !s->picture[i].needs_realloc) {
1756 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1757 av_log(avctx, AV_LOG_ERROR,
1758 "releasing zombie picture\n");
1759 ff_mpeg_unref_picture(s, &s->picture[i]);
1763 ff_mpeg_unref_picture(s, &s->current_picture);
1765 release_unused_pictures(s);
1767 if (s->current_picture_ptr &&
1768 s->current_picture_ptr->f->buf[0] == NULL) {
1769 // we already have a unused image
1770 // (maybe it was set before reading the header)
1771 pic = s->current_picture_ptr;
1773 i = ff_find_unused_picture(s, 0);
1775 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1778 pic = &s->picture[i];
1782 if (!s->droppable) {
1783 if (s->pict_type != AV_PICTURE_TYPE_B)
1787 pic->f->coded_picture_number = s->coded_picture_number++;
1789 if (ff_alloc_picture(s, pic, 0) < 0)
1792 s->current_picture_ptr = pic;
1793 // FIXME use only the vars from current_pic
1794 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1795 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1796 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1797 if (s->picture_structure != PICT_FRAME)
1798 s->current_picture_ptr->f->top_field_first =
1799 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1801 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1802 !s->progressive_sequence;
1803 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1805 s->current_picture_ptr->f->pict_type = s->pict_type;
1806 // if (s->flags && CODEC_FLAG_QSCALE)
1807 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1808 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1810 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1811 s->current_picture_ptr)) < 0)
1814 if (s->pict_type != AV_PICTURE_TYPE_B) {
1815 s->last_picture_ptr = s->next_picture_ptr;
1817 s->next_picture_ptr = s->current_picture_ptr;
1819 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1820 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1821 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1822 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1823 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1824 s->pict_type, s->droppable);
1826 if ((s->last_picture_ptr == NULL ||
1827 s->last_picture_ptr->f->buf[0] == NULL) &&
1828 (s->pict_type != AV_PICTURE_TYPE_I ||
1829 s->picture_structure != PICT_FRAME)) {
1830 int h_chroma_shift, v_chroma_shift;
1831 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1832 &h_chroma_shift, &v_chroma_shift);
1833 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1834 av_log(avctx, AV_LOG_DEBUG,
1835 "allocating dummy last picture for B frame\n");
1836 else if (s->pict_type != AV_PICTURE_TYPE_I)
1837 av_log(avctx, AV_LOG_ERROR,
1838 "warning: first frame is no keyframe\n");
1839 else if (s->picture_structure != PICT_FRAME)
1840 av_log(avctx, AV_LOG_DEBUG,
1841 "allocate dummy last picture for field based first keyframe\n");
1843 /* Allocate a dummy frame */
1844 i = ff_find_unused_picture(s, 0);
1846 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1849 s->last_picture_ptr = &s->picture[i];
1851 s->last_picture_ptr->reference = 3;
1852 s->last_picture_ptr->f->key_frame = 0;
1853 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1855 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1856 s->last_picture_ptr = NULL;
1860 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1861 for(i=0; i<avctx->height; i++)
1862 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1863 0x80, avctx->width);
1864 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1865 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1866 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1867 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1868 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1871 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1872 for(i=0; i<avctx->height; i++)
1873 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1877 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1878 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1880 if ((s->next_picture_ptr == NULL ||
1881 s->next_picture_ptr->f->buf[0] == NULL) &&
1882 s->pict_type == AV_PICTURE_TYPE_B) {
1883 /* Allocate a dummy frame */
1884 i = ff_find_unused_picture(s, 0);
1886 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1889 s->next_picture_ptr = &s->picture[i];
1891 s->next_picture_ptr->reference = 3;
1892 s->next_picture_ptr->f->key_frame = 0;
1893 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1895 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1896 s->next_picture_ptr = NULL;
1899 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1900 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1903 #if 0 // BUFREF-FIXME
1904 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1905 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1907 if (s->last_picture_ptr) {
1908 ff_mpeg_unref_picture(s, &s->last_picture);
1909 if (s->last_picture_ptr->f->buf[0] &&
1910 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1911 s->last_picture_ptr)) < 0)
1914 if (s->next_picture_ptr) {
1915 ff_mpeg_unref_picture(s, &s->next_picture);
1916 if (s->next_picture_ptr->f->buf[0] &&
1917 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1918 s->next_picture_ptr)) < 0)
1922 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1923 s->last_picture_ptr->f->buf[0]));
1925 if (s->picture_structure!= PICT_FRAME) {
1927 for (i = 0; i < 4; i++) {
1928 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1929 s->current_picture.f->data[i] +=
1930 s->current_picture.f->linesize[i];
1932 s->current_picture.f->linesize[i] *= 2;
1933 s->last_picture.f->linesize[i] *= 2;
1934 s->next_picture.f->linesize[i] *= 2;
1938 s->err_recognition = avctx->err_recognition;
1940 /* set dequantizer, we can't do it during init as
1941 * it might change for mpeg4 and we can't do it in the header
1942 * decode as init is not called for mpeg4 there yet */
1943 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1944 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1945 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1946 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1947 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1948 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1950 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1951 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1954 if (s->avctx->debug & FF_DEBUG_NOMC) {
1955 gray_frame(s->current_picture_ptr->f);
1961 /* called after a frame has been decoded. */
1962 void ff_MPV_frame_end(MpegEncContext *s)
1966 if (s->current_picture.reference)
1967 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1971 * Draw a line from (ex, ey) -> (sx, sy).
1972 * @param w width of the image
1973 * @param h height of the image
1974 * @param stride stride/linesize of the image
1975 * @param color color of the arrow
1977 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1978 int w, int h, int stride, int color)
1982 sx = av_clip(sx, 0, w - 1);
1983 sy = av_clip(sy, 0, h - 1);
1984 ex = av_clip(ex, 0, w - 1);
1985 ey = av_clip(ey, 0, h - 1);
1987 buf[sy * stride + sx] += color;
1989 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1991 FFSWAP(int, sx, ex);
1992 FFSWAP(int, sy, ey);
1994 buf += sx + sy * stride;
1996 f = ((ey - sy) << 16) / ex;
1997 for (x = 0; x <= ex; x++) {
1999 fr = (x * f) & 0xFFFF;
2000 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2001 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2005 FFSWAP(int, sx, ex);
2006 FFSWAP(int, sy, ey);
2008 buf += sx + sy * stride;
2011 f = ((ex - sx) << 16) / ey;
2014 for(y= 0; y <= ey; y++){
2016 fr = (y*f) & 0xFFFF;
2017 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2018 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2024 * Draw an arrow from (ex, ey) -> (sx, sy).
2025 * @param w width of the image
2026 * @param h height of the image
2027 * @param stride stride/linesize of the image
2028 * @param color color of the arrow
2030 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2031 int ey, int w, int h, int stride, int color)
2035 sx = av_clip(sx, -100, w + 100);
2036 sy = av_clip(sy, -100, h + 100);
2037 ex = av_clip(ex, -100, w + 100);
2038 ey = av_clip(ey, -100, h + 100);
2043 if (dx * dx + dy * dy > 3 * 3) {
2046 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2048 // FIXME subpixel accuracy
2049 rx = ROUNDED_DIV(rx * 3 << 4, length);
2050 ry = ROUNDED_DIV(ry * 3 << 4, length);
2052 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2053 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2055 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2059 * Print debugging info for the given picture.
2061 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2062 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2064 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2066 if (avctx->hwaccel || !mbtype_table
2067 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2071 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2074 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2075 av_get_picture_type_char(pict->pict_type));
2076 for (y = 0; y < mb_height; y++) {
2077 for (x = 0; x < mb_width; x++) {
2078 if (avctx->debug & FF_DEBUG_SKIP) {
2079 int count = mbskip_table[x + y * mb_stride];
2082 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2084 if (avctx->debug & FF_DEBUG_QP) {
2085 av_log(avctx, AV_LOG_DEBUG, "%2d",
2086 qscale_table[x + y * mb_stride]);
2088 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2089 int mb_type = mbtype_table[x + y * mb_stride];
2090 // Type & MV direction
2091 if (IS_PCM(mb_type))
2092 av_log(avctx, AV_LOG_DEBUG, "P");
2093 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2094 av_log(avctx, AV_LOG_DEBUG, "A");
2095 else if (IS_INTRA4x4(mb_type))
2096 av_log(avctx, AV_LOG_DEBUG, "i");
2097 else if (IS_INTRA16x16(mb_type))
2098 av_log(avctx, AV_LOG_DEBUG, "I");
2099 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2100 av_log(avctx, AV_LOG_DEBUG, "d");
2101 else if (IS_DIRECT(mb_type))
2102 av_log(avctx, AV_LOG_DEBUG, "D");
2103 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2104 av_log(avctx, AV_LOG_DEBUG, "g");
2105 else if (IS_GMC(mb_type))
2106 av_log(avctx, AV_LOG_DEBUG, "G");
2107 else if (IS_SKIP(mb_type))
2108 av_log(avctx, AV_LOG_DEBUG, "S");
2109 else if (!USES_LIST(mb_type, 1))
2110 av_log(avctx, AV_LOG_DEBUG, ">");
2111 else if (!USES_LIST(mb_type, 0))
2112 av_log(avctx, AV_LOG_DEBUG, "<");
2114 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2115 av_log(avctx, AV_LOG_DEBUG, "X");
2119 if (IS_8X8(mb_type))
2120 av_log(avctx, AV_LOG_DEBUG, "+");
2121 else if (IS_16X8(mb_type))
2122 av_log(avctx, AV_LOG_DEBUG, "-");
2123 else if (IS_8X16(mb_type))
2124 av_log(avctx, AV_LOG_DEBUG, "|");
2125 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2126 av_log(avctx, AV_LOG_DEBUG, " ");
2128 av_log(avctx, AV_LOG_DEBUG, "?");
2131 if (IS_INTERLACED(mb_type))
2132 av_log(avctx, AV_LOG_DEBUG, "=");
2134 av_log(avctx, AV_LOG_DEBUG, " ");
2137 av_log(avctx, AV_LOG_DEBUG, "\n");
2141 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2142 (avctx->debug_mv)) {
2143 const int shift = 1 + quarter_sample;
2147 int h_chroma_shift, v_chroma_shift, block_height;
2148 const int width = avctx->width;
2149 const int height = avctx->height;
2150 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2151 const int mv_stride = (mb_width << mv_sample_log2) +
2152 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2154 *low_delay = 0; // needed to see the vectors without trashing the buffers
2156 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2158 av_frame_make_writable(pict);
2160 pict->opaque = NULL;
2161 ptr = pict->data[0];
2162 block_height = 16 >> v_chroma_shift;
2164 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2166 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2167 const int mb_index = mb_x + mb_y * mb_stride;
2168 if ((avctx->debug_mv) && motion_val[0]) {
2170 for (type = 0; type < 3; type++) {
2174 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2175 (pict->pict_type!= AV_PICTURE_TYPE_P))
2180 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2181 (pict->pict_type!= AV_PICTURE_TYPE_B))
2186 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2187 (pict->pict_type!= AV_PICTURE_TYPE_B))
2192 if (!USES_LIST(mbtype_table[mb_index], direction))
2195 if (IS_8X8(mbtype_table[mb_index])) {
2197 for (i = 0; i < 4; i++) {
2198 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2199 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2200 int xy = (mb_x * 2 + (i & 1) +
2201 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2202 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2203 int my = (motion_val[direction][xy][1] >> shift) + sy;
2204 draw_arrow(ptr, sx, sy, mx, my, width,
2205 height, pict->linesize[0], 100);
2207 } else if (IS_16X8(mbtype_table[mb_index])) {
2209 for (i = 0; i < 2; i++) {
2210 int sx = mb_x * 16 + 8;
2211 int sy = mb_y * 16 + 4 + 8 * i;
2212 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2213 int mx = (motion_val[direction][xy][0] >> shift);
2214 int my = (motion_val[direction][xy][1] >> shift);
2216 if (IS_INTERLACED(mbtype_table[mb_index]))
2219 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2220 height, pict->linesize[0], 100);
2222 } else if (IS_8X16(mbtype_table[mb_index])) {
2224 for (i = 0; i < 2; i++) {
2225 int sx = mb_x * 16 + 4 + 8 * i;
2226 int sy = mb_y * 16 + 8;
2227 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2228 int mx = motion_val[direction][xy][0] >> shift;
2229 int my = motion_val[direction][xy][1] >> shift;
2231 if (IS_INTERLACED(mbtype_table[mb_index]))
2234 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2235 height, pict->linesize[0], 100);
2238 int sx= mb_x * 16 + 8;
2239 int sy= mb_y * 16 + 8;
2240 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2241 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2242 int my= (motion_val[direction][xy][1]>>shift) + sy;
2243 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2247 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2248 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2249 0x0101010101010101ULL;
2251 for (y = 0; y < block_height; y++) {
2252 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2253 (block_height * mb_y + y) *
2254 pict->linesize[1]) = c;
2255 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2256 (block_height * mb_y + y) *
2257 pict->linesize[2]) = c;
2260 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2262 int mb_type = mbtype_table[mb_index];
2265 #define COLOR(theta, r) \
2266 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2267 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2271 if (IS_PCM(mb_type)) {
2273 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2274 IS_INTRA16x16(mb_type)) {
2276 } else if (IS_INTRA4x4(mb_type)) {
2278 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2280 } else if (IS_DIRECT(mb_type)) {
2282 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2284 } else if (IS_GMC(mb_type)) {
2286 } else if (IS_SKIP(mb_type)) {
2288 } else if (!USES_LIST(mb_type, 1)) {
2290 } else if (!USES_LIST(mb_type, 0)) {
2293 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2297 u *= 0x0101010101010101ULL;
2298 v *= 0x0101010101010101ULL;
2299 for (y = 0; y < block_height; y++) {
2300 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2301 (block_height * mb_y + y) * pict->linesize[1]) = u;
2302 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2303 (block_height * mb_y + y) * pict->linesize[2]) = v;
2307 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2308 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2309 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2310 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2311 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2313 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2314 for (y = 0; y < 16; y++)
2315 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2316 pict->linesize[0]] ^= 0x80;
2318 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2319 int dm = 1 << (mv_sample_log2 - 2);
2320 for (i = 0; i < 4; i++) {
2321 int sx = mb_x * 16 + 8 * (i & 1);
2322 int sy = mb_y * 16 + 8 * (i >> 1);
2323 int xy = (mb_x * 2 + (i & 1) +
2324 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2326 int32_t *mv = (int32_t *) &motion_val[0][xy];
2327 if (mv[0] != mv[dm] ||
2328 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2329 for (y = 0; y < 8; y++)
2330 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2331 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2332 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2333 pict->linesize[0]) ^= 0x8080808080808080ULL;
2337 if (IS_INTERLACED(mb_type) &&
2338 avctx->codec->id == AV_CODEC_ID_H264) {
2342 mbskip_table[mb_index] = 0;
2348 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2350 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2351 p->qscale_table, p->motion_val, &s->low_delay,
2352 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2355 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2357 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2358 int offset = 2*s->mb_stride + 1;
2360 return AVERROR(ENOMEM);
2361 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2362 ref->size -= offset;
2363 ref->data += offset;
2364 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2367 static inline int hpel_motion_lowres(MpegEncContext *s,
2368 uint8_t *dest, uint8_t *src,
2369 int field_based, int field_select,
2370 int src_x, int src_y,
2371 int width, int height, ptrdiff_t stride,
2372 int h_edge_pos, int v_edge_pos,
2373 int w, int h, h264_chroma_mc_func *pix_op,
2374 int motion_x, int motion_y)
2376 const int lowres = s->avctx->lowres;
2377 const int op_index = FFMIN(lowres, 3);
2378 const int s_mask = (2 << lowres) - 1;
2382 if (s->quarter_sample) {
2387 sx = motion_x & s_mask;
2388 sy = motion_y & s_mask;
2389 src_x += motion_x >> lowres + 1;
2390 src_y += motion_y >> lowres + 1;
2392 src += src_y * stride + src_x;
2394 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2395 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2396 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2397 s->linesize, s->linesize,
2398 w + 1, (h + 1) << field_based,
2399 src_x, src_y << field_based,
2400 h_edge_pos, v_edge_pos);
2401 src = s->edge_emu_buffer;
2405 sx = (sx << 2) >> lowres;
2406 sy = (sy << 2) >> lowres;
2409 pix_op[op_index](dest, src, stride, h, sx, sy);
2413 /* apply one mpeg motion vector to the three components */
2414 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2421 uint8_t **ref_picture,
2422 h264_chroma_mc_func *pix_op,
2423 int motion_x, int motion_y,
2426 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2427 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2428 ptrdiff_t uvlinesize, linesize;
2429 const int lowres = s->avctx->lowres;
2430 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2431 const int block_s = 8>>lowres;
2432 const int s_mask = (2 << lowres) - 1;
2433 const int h_edge_pos = s->h_edge_pos >> lowres;
2434 const int v_edge_pos = s->v_edge_pos >> lowres;
2435 linesize = s->current_picture.f->linesize[0] << field_based;
2436 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2438 // FIXME obviously not perfect but qpel will not work in lowres anyway
2439 if (s->quarter_sample) {
2445 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2448 sx = motion_x & s_mask;
2449 sy = motion_y & s_mask;
2450 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2451 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2453 if (s->out_format == FMT_H263) {
2454 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2455 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2456 uvsrc_x = src_x >> 1;
2457 uvsrc_y = src_y >> 1;
2458 } else if (s->out_format == FMT_H261) {
2459 // even chroma mv's are full pel in H261
2462 uvsx = (2 * mx) & s_mask;
2463 uvsy = (2 * my) & s_mask;
2464 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2465 uvsrc_y = mb_y * block_s + (my >> lowres);
2467 if(s->chroma_y_shift){
2472 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2473 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2475 if(s->chroma_x_shift){
2479 uvsy = motion_y & s_mask;
2481 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2484 uvsx = motion_x & s_mask;
2485 uvsy = motion_y & s_mask;
2492 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2493 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2494 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2496 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2497 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2498 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2499 linesize >> field_based, linesize >> field_based,
2500 17, 17 + field_based,
2501 src_x, src_y << field_based, h_edge_pos,
2503 ptr_y = s->edge_emu_buffer;
2504 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2505 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2506 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2507 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2508 uvlinesize >> field_based, uvlinesize >> field_based,
2510 uvsrc_x, uvsrc_y << field_based,
2511 h_edge_pos >> 1, v_edge_pos >> 1);
2512 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2513 uvlinesize >> field_based,uvlinesize >> field_based,
2515 uvsrc_x, uvsrc_y << field_based,
2516 h_edge_pos >> 1, v_edge_pos >> 1);
2522 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2524 dest_y += s->linesize;
2525 dest_cb += s->uvlinesize;
2526 dest_cr += s->uvlinesize;
2530 ptr_y += s->linesize;
2531 ptr_cb += s->uvlinesize;
2532 ptr_cr += s->uvlinesize;
2535 sx = (sx << 2) >> lowres;
2536 sy = (sy << 2) >> lowres;
2537 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2539 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2540 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2541 uvsx = (uvsx << 2) >> lowres;
2542 uvsy = (uvsy << 2) >> lowres;
2544 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2545 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2548 // FIXME h261 lowres loop filter
2551 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2552 uint8_t *dest_cb, uint8_t *dest_cr,
2553 uint8_t **ref_picture,
2554 h264_chroma_mc_func * pix_op,
2557 const int lowres = s->avctx->lowres;
2558 const int op_index = FFMIN(lowres, 3);
2559 const int block_s = 8 >> lowres;
2560 const int s_mask = (2 << lowres) - 1;
2561 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2562 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2563 int emu = 0, src_x, src_y, sx, sy;
2567 if (s->quarter_sample) {
2572 /* In case of 8X8, we construct a single chroma motion vector
2573 with a special rounding */
2574 mx = ff_h263_round_chroma(mx);
2575 my = ff_h263_round_chroma(my);
2579 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2580 src_y = s->mb_y * block_s + (my >> lowres + 1);
2582 offset = src_y * s->uvlinesize + src_x;
2583 ptr = ref_picture[1] + offset;
2584 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2585 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2586 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2587 s->uvlinesize, s->uvlinesize,
2589 src_x, src_y, h_edge_pos, v_edge_pos);
2590 ptr = s->edge_emu_buffer;
2593 sx = (sx << 2) >> lowres;
2594 sy = (sy << 2) >> lowres;
2595 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2597 ptr = ref_picture[2] + offset;
2599 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2600 s->uvlinesize, s->uvlinesize,
2602 src_x, src_y, h_edge_pos, v_edge_pos);
2603 ptr = s->edge_emu_buffer;
2605 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2609 * motion compensation of a single macroblock
2611 * @param dest_y luma destination pointer
2612 * @param dest_cb chroma cb/u destination pointer
2613 * @param dest_cr chroma cr/v destination pointer
2614 * @param dir direction (0->forward, 1->backward)
2615 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2616 * @param pix_op halfpel motion compensation function (average or put normally)
2617 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2619 static inline void MPV_motion_lowres(MpegEncContext *s,
2620 uint8_t *dest_y, uint8_t *dest_cb,
2622 int dir, uint8_t **ref_picture,
2623 h264_chroma_mc_func *pix_op)
2627 const int lowres = s->avctx->lowres;
2628 const int block_s = 8 >>lowres;
2633 switch (s->mv_type) {
2635 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2637 ref_picture, pix_op,
2638 s->mv[dir][0][0], s->mv[dir][0][1],
2644 for (i = 0; i < 4; i++) {
2645 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2646 s->linesize) * block_s,
2647 ref_picture[0], 0, 0,
2648 (2 * mb_x + (i & 1)) * block_s,
2649 (2 * mb_y + (i >> 1)) * block_s,
2650 s->width, s->height, s->linesize,
2651 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2652 block_s, block_s, pix_op,
2653 s->mv[dir][i][0], s->mv[dir][i][1]);
2655 mx += s->mv[dir][i][0];
2656 my += s->mv[dir][i][1];
2659 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2660 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2664 if (s->picture_structure == PICT_FRAME) {
2666 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2667 1, 0, s->field_select[dir][0],
2668 ref_picture, pix_op,
2669 s->mv[dir][0][0], s->mv[dir][0][1],
2672 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2673 1, 1, s->field_select[dir][1],
2674 ref_picture, pix_op,
2675 s->mv[dir][1][0], s->mv[dir][1][1],
2678 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2679 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2680 ref_picture = s->current_picture_ptr->f->data;
2683 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2684 0, 0, s->field_select[dir][0],
2685 ref_picture, pix_op,
2687 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2691 for (i = 0; i < 2; i++) {
2692 uint8_t **ref2picture;
2694 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2695 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2696 ref2picture = ref_picture;
2698 ref2picture = s->current_picture_ptr->f->data;
2701 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2702 0, 0, s->field_select[dir][i],
2703 ref2picture, pix_op,
2704 s->mv[dir][i][0], s->mv[dir][i][1] +
2705 2 * block_s * i, block_s, mb_y >> 1);
2707 dest_y += 2 * block_s * s->linesize;
2708 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2709 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2713 if (s->picture_structure == PICT_FRAME) {
2714 for (i = 0; i < 2; i++) {
2716 for (j = 0; j < 2; j++) {
2717 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2719 ref_picture, pix_op,
2720 s->mv[dir][2 * i + j][0],
2721 s->mv[dir][2 * i + j][1],
2724 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2727 for (i = 0; i < 2; i++) {
2728 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2729 0, 0, s->picture_structure != i + 1,
2730 ref_picture, pix_op,
2731 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2732 2 * block_s, mb_y >> 1);
2734 // after put we make avg of the same block
2735 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2737 // opposite parity is always in the same
2738 // frame if this is second field
2739 if (!s->first_field) {
2740 ref_picture = s->current_picture_ptr->f->data;
2751 * find the lowest MB row referenced in the MVs
2753 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2755 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2756 int my, off, i, mvs;
2758 if (s->picture_structure != PICT_FRAME || s->mcsel)
2761 switch (s->mv_type) {
2775 for (i = 0; i < mvs; i++) {
2776 my = s->mv[dir][i][1]<<qpel_shift;
2777 my_max = FFMAX(my_max, my);
2778 my_min = FFMIN(my_min, my);
2781 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2783 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2785 return s->mb_height-1;
2788 /* put block[] to dest[] */
2789 static inline void put_dct(MpegEncContext *s,
2790 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2792 s->dct_unquantize_intra(s, block, i, qscale);
2793 s->dsp.idct_put (dest, line_size, block);
2796 /* add block[] to dest[] */
2797 static inline void add_dct(MpegEncContext *s,
2798 int16_t *block, int i, uint8_t *dest, int line_size)
2800 if (s->block_last_index[i] >= 0) {
2801 s->dsp.idct_add (dest, line_size, block);
2805 static inline void add_dequant_dct(MpegEncContext *s,
2806 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2808 if (s->block_last_index[i] >= 0) {
2809 s->dct_unquantize_inter(s, block, i, qscale);
2811 s->dsp.idct_add (dest, line_size, block);
2816 * Clean dc, ac, coded_block for the current non-intra MB.
2818 void ff_clean_intra_table_entries(MpegEncContext *s)
2820 int wrap = s->b8_stride;
2821 int xy = s->block_index[0];
2824 s->dc_val[0][xy + 1 ] =
2825 s->dc_val[0][xy + wrap] =
2826 s->dc_val[0][xy + 1 + wrap] = 1024;
2828 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2829 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2830 if (s->msmpeg4_version>=3) {
2831 s->coded_block[xy ] =
2832 s->coded_block[xy + 1 ] =
2833 s->coded_block[xy + wrap] =
2834 s->coded_block[xy + 1 + wrap] = 0;
2837 wrap = s->mb_stride;
2838 xy = s->mb_x + s->mb_y * wrap;
2840 s->dc_val[2][xy] = 1024;
2842 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2843 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2845 s->mbintra_table[xy]= 0;
2848 /* generic function called after a macroblock has been parsed by the
2849 decoder or after it has been encoded by the encoder.
2851 Important variables used:
2852 s->mb_intra : true if intra macroblock
2853 s->mv_dir : motion vector direction
2854 s->mv_type : motion vector type
2855 s->mv : motion vector
2856 s->interlaced_dct : true if interlaced dct used (mpeg2)
2858 static av_always_inline
2859 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2860 int lowres_flag, int is_mpeg12)
2862 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2865 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2866 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2870 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2871 /* print DCT coefficients */
2873 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2875 for(j=0; j<64; j++){
2876 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2878 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2882 s->current_picture.qscale_table[mb_xy] = s->qscale;
2884 /* update DC predictors for P macroblocks */
2886 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2887 if(s->mbintra_table[mb_xy])
2888 ff_clean_intra_table_entries(s);
2892 s->last_dc[2] = 128 << s->intra_dc_precision;
2895 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2896 s->mbintra_table[mb_xy]=1;
2898 if ( (s->flags&CODEC_FLAG_PSNR)
2899 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
2900 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2901 uint8_t *dest_y, *dest_cb, *dest_cr;
2902 int dct_linesize, dct_offset;
2903 op_pixels_func (*op_pix)[4];
2904 qpel_mc_func (*op_qpix)[16];
2905 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2906 const int uvlinesize = s->current_picture.f->linesize[1];
2907 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2908 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2910 /* avoid copy if macroblock skipped in last frame too */
2911 /* skip only during decoding as we might trash the buffers during encoding a bit */
2913 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2915 if (s->mb_skipped) {
2917 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2919 } else if(!s->current_picture.reference) {
2922 *mbskip_ptr = 0; /* not skipped */
2926 dct_linesize = linesize << s->interlaced_dct;
2927 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2931 dest_cb= s->dest[1];
2932 dest_cr= s->dest[2];
2934 dest_y = s->b_scratchpad;
2935 dest_cb= s->b_scratchpad+16*linesize;
2936 dest_cr= s->b_scratchpad+32*linesize;
2940 /* motion handling */
2941 /* decoding or more than one mb_type (MC was already done otherwise) */
2944 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2945 if (s->mv_dir & MV_DIR_FORWARD) {
2946 ff_thread_await_progress(&s->last_picture_ptr->tf,
2947 ff_MPV_lowest_referenced_row(s, 0),
2950 if (s->mv_dir & MV_DIR_BACKWARD) {
2951 ff_thread_await_progress(&s->next_picture_ptr->tf,
2952 ff_MPV_lowest_referenced_row(s, 1),
2958 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2960 if (s->mv_dir & MV_DIR_FORWARD) {
2961 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
2962 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2964 if (s->mv_dir & MV_DIR_BACKWARD) {
2965 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
2968 op_qpix = s->me.qpel_put;
2969 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2970 op_pix = s->hdsp.put_pixels_tab;
2972 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2974 if (s->mv_dir & MV_DIR_FORWARD) {
2975 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2976 op_pix = s->hdsp.avg_pixels_tab;
2977 op_qpix= s->me.qpel_avg;
2979 if (s->mv_dir & MV_DIR_BACKWARD) {
2980 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2985 /* skip dequant / idct if we are really late ;) */
2986 if(s->avctx->skip_idct){
2987 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2988 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2989 || s->avctx->skip_idct >= AVDISCARD_ALL)
2993 /* add dct residue */
2994 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2995 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2996 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2997 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2998 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2999 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3001 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3002 if (s->chroma_y_shift){
3003 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3004 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3008 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3009 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3010 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3011 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3014 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3015 add_dct(s, block[0], 0, dest_y , dct_linesize);
3016 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3017 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3018 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3020 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3021 if(s->chroma_y_shift){//Chroma420
3022 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3023 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3026 dct_linesize = uvlinesize << s->interlaced_dct;
3027 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3029 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3030 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3031 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3032 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3033 if(!s->chroma_x_shift){//Chroma444
3034 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3035 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3036 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3037 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3042 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3043 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3046 /* dct only in intra block */
3047 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3048 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3049 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3050 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3051 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3053 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3054 if(s->chroma_y_shift){
3055 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3056 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3060 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3061 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3062 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3063 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3067 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
3068 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3069 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
3070 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3072 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3073 if(s->chroma_y_shift){
3074 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
3075 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
3078 dct_linesize = uvlinesize << s->interlaced_dct;
3079 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3081 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
3082 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
3083 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3084 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3085 if(!s->chroma_x_shift){//Chroma444
3086 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3087 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3088 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3089 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3097 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3098 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3099 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3104 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
3106 if(s->out_format == FMT_MPEG1) {
3107 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
3108 else MPV_decode_mb_internal(s, block, 0, 1);
3111 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
3112 else MPV_decode_mb_internal(s, block, 0, 0);
3115 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3117 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3118 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3119 s->first_field, s->low_delay);
3122 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3123 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3124 const int uvlinesize = s->current_picture.f->linesize[1];
3125 const int mb_size= 4 - s->avctx->lowres;
3127 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3128 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3129 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3130 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3131 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3132 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3133 //block_index is not used by mpeg2, so it is not affected by chroma_format
3135 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3136 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3137 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3139 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3141 if(s->picture_structure==PICT_FRAME){
3142 s->dest[0] += s->mb_y * linesize << mb_size;
3143 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3144 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3146 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3147 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3148 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3149 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3155 * Permute an 8x8 block.
3156 * @param block the block which will be permuted according to the given permutation vector
3157 * @param permutation the permutation vector
3158 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3159 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3160 * (inverse) permutated to scantable order!
3162 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3168 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3170 for(i=0; i<=last; i++){
3171 const int j= scantable[i];
3176 for(i=0; i<=last; i++){
3177 const int j= scantable[i];
3178 const int perm_j= permutation[j];
3179 block[perm_j]= temp[j];
3183 void ff_mpeg_flush(AVCodecContext *avctx){
3185 MpegEncContext *s = avctx->priv_data;
3187 if(s==NULL || s->picture==NULL)
3190 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3191 ff_mpeg_unref_picture(s, &s->picture[i]);
3192 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3194 ff_mpeg_unref_picture(s, &s->current_picture);
3195 ff_mpeg_unref_picture(s, &s->last_picture);
3196 ff_mpeg_unref_picture(s, &s->next_picture);
3198 s->mb_x= s->mb_y= 0;
3201 s->parse_context.state= -1;
3202 s->parse_context.frame_start_found= 0;
3203 s->parse_context.overread= 0;
3204 s->parse_context.overread_index= 0;
3205 s->parse_context.index= 0;
3206 s->parse_context.last_index= 0;
3207 s->bitstream_buffer_size=0;
3212 * set qscale and update qscale dependent variables.
3214 void ff_set_qscale(MpegEncContext * s, int qscale)
3218 else if (qscale > 31)
3222 s->chroma_qscale= s->chroma_qscale_table[qscale];
3224 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3225 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3228 void ff_MPV_report_decode_progress(MpegEncContext *s)
3230 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3231 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3234 #if CONFIG_ERROR_RESILIENCE
3235 void ff_mpeg_set_erpic(ERPicture *dst, Picture *src)
3239 memset(dst, 0, sizeof(*dst));
3249 for (i = 0; i < 2; i++) {
3250 dst->motion_val[i] = src->motion_val[i];
3251 dst->ref_index[i] = src->ref_index[i];
3254 dst->mb_type = src->mb_type;
3255 dst->field_picture = src->field_picture;
3258 void ff_mpeg_er_frame_start(MpegEncContext *s)
3260 ERContext *er = &s->er;
3262 ff_mpeg_set_erpic(&er->cur_pic, s->current_picture_ptr);
3263 ff_mpeg_set_erpic(&er->next_pic, s->next_picture_ptr);
3264 ff_mpeg_set_erpic(&er->last_pic, s->last_picture_ptr);
3266 er->pp_time = s->pp_time;
3267 er->pb_time = s->pb_time;
3268 er->quarter_sample = s->quarter_sample;
3269 er->partitioned_frame = s->partitioned_frame;
3271 ff_er_frame_start(er);
3273 #endif /* CONFIG_ERROR_RESILIENCE */