2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
37 #include "h264chroma.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
48 static const uint8_t ff_default_chroma_qscale_table[32] = {
49 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
50 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
51 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
54 const uint8_t ff_mpeg1_dc_scale_table[128] = {
55 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
56 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
66 static const uint8_t mpeg2_dc_scale_table1[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
78 static const uint8_t mpeg2_dc_scale_table2[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
90 static const uint8_t mpeg2_dc_scale_table3[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
103 ff_mpeg1_dc_scale_table,
104 mpeg2_dc_scale_table1,
105 mpeg2_dc_scale_table2,
106 mpeg2_dc_scale_table3,
109 const uint8_t ff_alternate_horizontal_scan[64] = {
110 0, 1, 2, 3, 8, 9, 16, 17,
111 10, 11, 4, 5, 6, 7, 15, 14,
112 13, 12, 19, 18, 24, 25, 32, 33,
113 26, 27, 20, 21, 22, 23, 28, 29,
114 30, 31, 34, 35, 40, 41, 48, 49,
115 42, 43, 36, 37, 38, 39, 44, 45,
116 46, 47, 50, 51, 56, 57, 58, 59,
117 52, 53, 54, 55, 60, 61, 62, 63,
120 const uint8_t ff_alternate_vertical_scan[64] = {
121 0, 8, 16, 24, 1, 9, 2, 10,
122 17, 25, 32, 40, 48, 56, 57, 49,
123 41, 33, 26, 18, 3, 11, 4, 12,
124 19, 27, 34, 42, 50, 58, 35, 43,
125 51, 59, 20, 28, 5, 13, 6, 14,
126 21, 29, 36, 44, 52, 60, 37, 45,
127 53, 61, 22, 30, 7, 15, 23, 31,
128 38, 46, 54, 62, 39, 47, 55, 63,
131 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
132 int16_t *block, int n, int qscale)
134 int i, level, nCoeffs;
135 const uint16_t *quant_matrix;
137 nCoeffs= s->block_last_index[n];
139 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
140 /* XXX: only mpeg1 */
141 quant_matrix = s->intra_matrix;
142 for(i=1;i<=nCoeffs;i++) {
143 int j= s->intra_scantable.permutated[i];
148 level = (int)(level * qscale * quant_matrix[j]) >> 3;
149 level = (level - 1) | 1;
152 level = (int)(level * qscale * quant_matrix[j]) >> 3;
153 level = (level - 1) | 1;
160 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
161 int16_t *block, int n, int qscale)
163 int i, level, nCoeffs;
164 const uint16_t *quant_matrix;
166 nCoeffs= s->block_last_index[n];
168 quant_matrix = s->inter_matrix;
169 for(i=0; i<=nCoeffs; i++) {
170 int j= s->intra_scantable.permutated[i];
175 level = (((level << 1) + 1) * qscale *
176 ((int) (quant_matrix[j]))) >> 4;
177 level = (level - 1) | 1;
180 level = (((level << 1) + 1) * qscale *
181 ((int) (quant_matrix[j]))) >> 4;
182 level = (level - 1) | 1;
189 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
190 int16_t *block, int n, int qscale)
192 int i, level, nCoeffs;
193 const uint16_t *quant_matrix;
195 if(s->alternate_scan) nCoeffs= 63;
196 else nCoeffs= s->block_last_index[n];
198 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
199 quant_matrix = s->intra_matrix;
200 for(i=1;i<=nCoeffs;i++) {
201 int j= s->intra_scantable.permutated[i];
206 level = (int)(level * qscale * quant_matrix[j]) >> 3;
209 level = (int)(level * qscale * quant_matrix[j]) >> 3;
216 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
217 int16_t *block, int n, int qscale)
219 int i, level, nCoeffs;
220 const uint16_t *quant_matrix;
223 if(s->alternate_scan) nCoeffs= 63;
224 else nCoeffs= s->block_last_index[n];
226 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
228 quant_matrix = s->intra_matrix;
229 for(i=1;i<=nCoeffs;i++) {
230 int j= s->intra_scantable.permutated[i];
235 level = (int)(level * qscale * quant_matrix[j]) >> 3;
238 level = (int)(level * qscale * quant_matrix[j]) >> 3;
247 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
248 int16_t *block, int n, int qscale)
250 int i, level, nCoeffs;
251 const uint16_t *quant_matrix;
254 if(s->alternate_scan) nCoeffs= 63;
255 else nCoeffs= s->block_last_index[n];
257 quant_matrix = s->inter_matrix;
258 for(i=0; i<=nCoeffs; i++) {
259 int j= s->intra_scantable.permutated[i];
264 level = (((level << 1) + 1) * qscale *
265 ((int) (quant_matrix[j]))) >> 4;
268 level = (((level << 1) + 1) * qscale *
269 ((int) (quant_matrix[j]))) >> 4;
278 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
279 int16_t *block, int n, int qscale)
281 int i, level, qmul, qadd;
284 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
289 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
290 qadd = (qscale - 1) | 1;
297 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
299 for(i=1; i<=nCoeffs; i++) {
303 level = level * qmul - qadd;
305 level = level * qmul + qadd;
312 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
313 int16_t *block, int n, int qscale)
315 int i, level, qmul, qadd;
318 av_assert2(s->block_last_index[n]>=0);
320 qadd = (qscale - 1) | 1;
323 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
325 for(i=0; i<=nCoeffs; i++) {
329 level = level * qmul - qadd;
331 level = level * qmul + qadd;
338 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
340 int mb_x, int mb_y, int mb_intra, int mb_skipped)
342 MpegEncContext *s = opaque;
345 s->mv_type = mv_type;
346 s->mb_intra = mb_intra;
347 s->mb_skipped = mb_skipped;
350 memcpy(s->mv, mv, sizeof(*mv));
352 ff_init_block_index(s);
353 ff_update_block_index(s);
355 s->dsp.clear_blocks(s->block[0]);
357 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
358 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
359 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
362 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
363 ff_MPV_decode_mb(s, s->block);
366 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
369 memset(dst + h*linesize, 128, 16);
372 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
375 memset(dst + h*linesize, 128, 8);
378 /* init common dct for both encoder and decoder */
379 av_cold int ff_dct_common_init(MpegEncContext *s)
381 ff_dsputil_init(&s->dsp, s->avctx);
382 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
383 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
384 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
386 if (s->avctx->debug & FF_DEBUG_NOMC) {
388 for (i=0; i<4; i++) {
389 s->hdsp.avg_pixels_tab[0][i] = gray16;
390 s->hdsp.put_pixels_tab[0][i] = gray16;
391 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
393 s->hdsp.avg_pixels_tab[1][i] = gray8;
394 s->hdsp.put_pixels_tab[1][i] = gray8;
395 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
399 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
400 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
401 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
402 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
403 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
404 if (s->flags & CODEC_FLAG_BITEXACT)
405 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
406 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
409 ff_MPV_common_init_axp(s);
411 ff_MPV_common_init_arm(s);
413 ff_MPV_common_init_ppc(s);
415 ff_MPV_common_init_x86(s);
417 /* load & permutate scantables
418 * note: only wmv uses different ones
420 if (s->alternate_scan) {
421 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
422 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
424 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
425 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
427 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
428 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
433 static int frame_size_alloc(MpegEncContext *s, int linesize)
435 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
437 // edge emu needs blocksize + filter length - 1
438 // (= 17x17 for halfpel / 21x21 for h264)
439 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
440 // at uvlinesize. It supports only YUV420 so 24x24 is enough
441 // linesize * interlaced * MBsize
442 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
445 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
447 s->me.temp = s->me.scratchpad;
448 s->rd_scratchpad = s->me.scratchpad;
449 s->b_scratchpad = s->me.scratchpad;
450 s->obmc_scratchpad = s->me.scratchpad + 16;
454 av_freep(&s->edge_emu_buffer);
455 return AVERROR(ENOMEM);
459 * Allocate a frame buffer
461 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
463 int edges_needed = av_codec_is_encoder(s->avctx->codec);
467 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
468 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
469 s->codec_id != AV_CODEC_ID_MSS2) {
471 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
472 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
475 r = ff_thread_get_buffer(s->avctx, &pic->tf,
476 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
478 pic->f->width = s->avctx->width;
479 pic->f->height = s->avctx->height;
480 pic->f->format = s->avctx->pix_fmt;
481 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
484 if (r < 0 || !pic->f->buf[0]) {
485 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
492 for (i = 0; pic->f->data[i]; i++) {
493 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
494 pic->f->linesize[i] +
495 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
496 pic->f->data[i] += offset;
498 pic->f->width = s->avctx->width;
499 pic->f->height = s->avctx->height;
502 if (s->avctx->hwaccel) {
503 assert(!pic->hwaccel_picture_private);
504 if (s->avctx->hwaccel->frame_priv_data_size) {
505 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
506 if (!pic->hwaccel_priv_buf) {
507 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
510 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
514 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
515 s->uvlinesize != pic->f->linesize[1])) {
516 av_log(s->avctx, AV_LOG_ERROR,
517 "get_buffer() failed (stride changed)\n");
518 ff_mpeg_unref_picture(s, pic);
522 if (pic->f->linesize[1] != pic->f->linesize[2]) {
523 av_log(s->avctx, AV_LOG_ERROR,
524 "get_buffer() failed (uv stride mismatch)\n");
525 ff_mpeg_unref_picture(s, pic);
529 if (!s->edge_emu_buffer &&
530 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
531 av_log(s->avctx, AV_LOG_ERROR,
532 "get_buffer() failed to allocate context scratch buffers.\n");
533 ff_mpeg_unref_picture(s, pic);
540 void ff_free_picture_tables(Picture *pic)
544 pic->alloc_mb_width =
545 pic->alloc_mb_height = 0;
547 av_buffer_unref(&pic->mb_var_buf);
548 av_buffer_unref(&pic->mc_mb_var_buf);
549 av_buffer_unref(&pic->mb_mean_buf);
550 av_buffer_unref(&pic->mbskip_table_buf);
551 av_buffer_unref(&pic->qscale_table_buf);
552 av_buffer_unref(&pic->mb_type_buf);
554 for (i = 0; i < 2; i++) {
555 av_buffer_unref(&pic->motion_val_buf[i]);
556 av_buffer_unref(&pic->ref_index_buf[i]);
560 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
562 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
563 const int mb_array_size = s->mb_stride * s->mb_height;
564 const int b8_array_size = s->b8_stride * s->mb_height * 2;
568 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
569 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
570 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
572 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
573 return AVERROR(ENOMEM);
576 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
577 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
578 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
579 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
580 return AVERROR(ENOMEM);
583 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
584 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
585 int ref_index_size = 4 * mb_array_size;
587 for (i = 0; mv_size && i < 2; i++) {
588 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
589 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
590 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
591 return AVERROR(ENOMEM);
595 pic->alloc_mb_width = s->mb_width;
596 pic->alloc_mb_height = s->mb_height;
601 static int make_tables_writable(Picture *pic)
604 #define MAKE_WRITABLE(table) \
607 (ret = av_buffer_make_writable(&pic->table)) < 0)\
611 MAKE_WRITABLE(mb_var_buf);
612 MAKE_WRITABLE(mc_mb_var_buf);
613 MAKE_WRITABLE(mb_mean_buf);
614 MAKE_WRITABLE(mbskip_table_buf);
615 MAKE_WRITABLE(qscale_table_buf);
616 MAKE_WRITABLE(mb_type_buf);
618 for (i = 0; i < 2; i++) {
619 MAKE_WRITABLE(motion_val_buf[i]);
620 MAKE_WRITABLE(ref_index_buf[i]);
627 * Allocate a Picture.
628 * The pixels are allocated/set by calling get_buffer() if shared = 0
630 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
634 if (pic->qscale_table_buf)
635 if ( pic->alloc_mb_width != s->mb_width
636 || pic->alloc_mb_height != s->mb_height)
637 ff_free_picture_tables(pic);
640 av_assert0(pic->f->data[0]);
643 av_assert0(!pic->f->buf[0]);
645 if (alloc_frame_buffer(s, pic) < 0)
648 s->linesize = pic->f->linesize[0];
649 s->uvlinesize = pic->f->linesize[1];
652 if (!pic->qscale_table_buf)
653 ret = alloc_picture_tables(s, pic);
655 ret = make_tables_writable(pic);
660 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
661 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
662 pic->mb_mean = pic->mb_mean_buf->data;
665 pic->mbskip_table = pic->mbskip_table_buf->data;
666 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
667 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
669 if (pic->motion_val_buf[0]) {
670 for (i = 0; i < 2; i++) {
671 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
672 pic->ref_index[i] = pic->ref_index_buf[i]->data;
678 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
679 ff_mpeg_unref_picture(s, pic);
680 ff_free_picture_tables(pic);
681 return AVERROR(ENOMEM);
685 * Deallocate a picture.
687 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
689 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
692 /* WM Image / Screen codecs allocate internal buffers with different
693 * dimensions / colorspaces; ignore user-defined callbacks for these. */
694 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
695 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
696 s->codec_id != AV_CODEC_ID_MSS2)
697 ff_thread_release_buffer(s->avctx, &pic->tf);
699 av_frame_unref(pic->f);
701 av_buffer_unref(&pic->hwaccel_priv_buf);
703 if (pic->needs_realloc)
704 ff_free_picture_tables(pic);
706 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
709 static int update_picture_tables(Picture *dst, Picture *src)
713 #define UPDATE_TABLE(table)\
716 (!dst->table || dst->table->buffer != src->table->buffer)) {\
717 av_buffer_unref(&dst->table);\
718 dst->table = av_buffer_ref(src->table);\
720 ff_free_picture_tables(dst);\
721 return AVERROR(ENOMEM);\
726 UPDATE_TABLE(mb_var_buf);
727 UPDATE_TABLE(mc_mb_var_buf);
728 UPDATE_TABLE(mb_mean_buf);
729 UPDATE_TABLE(mbskip_table_buf);
730 UPDATE_TABLE(qscale_table_buf);
731 UPDATE_TABLE(mb_type_buf);
732 for (i = 0; i < 2; i++) {
733 UPDATE_TABLE(motion_val_buf[i]);
734 UPDATE_TABLE(ref_index_buf[i]);
737 dst->mb_var = src->mb_var;
738 dst->mc_mb_var = src->mc_mb_var;
739 dst->mb_mean = src->mb_mean;
740 dst->mbskip_table = src->mbskip_table;
741 dst->qscale_table = src->qscale_table;
742 dst->mb_type = src->mb_type;
743 for (i = 0; i < 2; i++) {
744 dst->motion_val[i] = src->motion_val[i];
745 dst->ref_index[i] = src->ref_index[i];
748 dst->alloc_mb_width = src->alloc_mb_width;
749 dst->alloc_mb_height = src->alloc_mb_height;
754 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
758 av_assert0(!dst->f->buf[0]);
759 av_assert0(src->f->buf[0]);
763 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
767 ret = update_picture_tables(dst, src);
771 if (src->hwaccel_picture_private) {
772 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
773 if (!dst->hwaccel_priv_buf)
775 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
778 dst->field_picture = src->field_picture;
779 dst->mb_var_sum = src->mb_var_sum;
780 dst->mc_mb_var_sum = src->mc_mb_var_sum;
781 dst->b_frame_score = src->b_frame_score;
782 dst->needs_realloc = src->needs_realloc;
783 dst->reference = src->reference;
784 dst->shared = src->shared;
788 ff_mpeg_unref_picture(s, dst);
792 static void exchange_uv(MpegEncContext *s)
797 s->pblocks[4] = s->pblocks[5];
801 static int init_duplicate_context(MpegEncContext *s)
803 int y_size = s->b8_stride * (2 * s->mb_height + 1);
804 int c_size = s->mb_stride * (s->mb_height + 1);
805 int yc_size = y_size + 2 * c_size;
808 if (s->mb_height & 1)
809 yc_size += 2*s->b8_stride + 2*s->mb_stride;
816 s->obmc_scratchpad = NULL;
819 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
820 ME_MAP_SIZE * sizeof(uint32_t), fail)
821 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
822 ME_MAP_SIZE * sizeof(uint32_t), fail)
823 if (s->avctx->noise_reduction) {
824 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
825 2 * 64 * sizeof(int), fail)
828 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
829 s->block = s->blocks[0];
831 for (i = 0; i < 12; i++) {
832 s->pblocks[i] = &s->block[i];
834 if (s->avctx->codec_tag == AV_RL32("VCR2"))
837 if (s->out_format == FMT_H263) {
839 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
840 yc_size * sizeof(int16_t) * 16, fail);
841 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
842 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
843 s->ac_val[2] = s->ac_val[1] + c_size;
848 return -1; // free() through ff_MPV_common_end()
851 static void free_duplicate_context(MpegEncContext *s)
856 av_freep(&s->edge_emu_buffer);
857 av_freep(&s->me.scratchpad);
861 s->obmc_scratchpad = NULL;
863 av_freep(&s->dct_error_sum);
864 av_freep(&s->me.map);
865 av_freep(&s->me.score_map);
866 av_freep(&s->blocks);
867 av_freep(&s->ac_val_base);
871 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
873 #define COPY(a) bak->a = src->a
874 COPY(edge_emu_buffer);
879 COPY(obmc_scratchpad);
886 COPY(me.map_generation);
898 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
902 // FIXME copy only needed parts
904 backup_duplicate_context(&bak, dst);
905 memcpy(dst, src, sizeof(MpegEncContext));
906 backup_duplicate_context(dst, &bak);
907 for (i = 0; i < 12; i++) {
908 dst->pblocks[i] = &dst->block[i];
910 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
912 if (!dst->edge_emu_buffer &&
913 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
914 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
915 "scratch buffers.\n");
918 // STOP_TIMER("update_duplicate_context")
919 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
923 int ff_mpeg_update_thread_context(AVCodecContext *dst,
924 const AVCodecContext *src)
927 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
934 // FIXME can parameters change on I-frames?
935 // in that case dst may need a reinit
936 if (!s->context_initialized) {
937 memcpy(s, s1, sizeof(MpegEncContext));
940 s->bitstream_buffer = NULL;
941 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
943 if (s1->context_initialized){
944 // s->picture_range_start += MAX_PICTURE_COUNT;
945 // s->picture_range_end += MAX_PICTURE_COUNT;
946 if((ret = ff_MPV_common_init(s)) < 0){
947 memset(s, 0, sizeof(MpegEncContext));
954 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
955 s->context_reinit = 0;
956 s->height = s1->height;
957 s->width = s1->width;
958 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
962 s->avctx->coded_height = s1->avctx->coded_height;
963 s->avctx->coded_width = s1->avctx->coded_width;
964 s->avctx->width = s1->avctx->width;
965 s->avctx->height = s1->avctx->height;
967 s->coded_picture_number = s1->coded_picture_number;
968 s->picture_number = s1->picture_number;
970 av_assert0(!s->picture || s->picture != s1->picture);
972 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
973 ff_mpeg_unref_picture(s, &s->picture[i]);
974 if (s1->picture[i].f->buf[0] &&
975 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
979 #define UPDATE_PICTURE(pic)\
981 ff_mpeg_unref_picture(s, &s->pic);\
982 if (s1->pic.f && s1->pic.f->buf[0])\
983 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
985 ret = update_picture_tables(&s->pic, &s1->pic);\
990 UPDATE_PICTURE(current_picture);
991 UPDATE_PICTURE(last_picture);
992 UPDATE_PICTURE(next_picture);
994 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
995 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
996 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
998 // Error/bug resilience
999 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1000 s->workaround_bugs = s1->workaround_bugs;
1001 s->padding_bug_score = s1->padding_bug_score;
1003 // MPEG4 timing info
1004 memcpy(&s->last_time_base, &s1->last_time_base,
1005 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1006 (char *) &s1->last_time_base);
1009 s->max_b_frames = s1->max_b_frames;
1010 s->low_delay = s1->low_delay;
1011 s->droppable = s1->droppable;
1013 // DivX handling (doesn't work)
1014 s->divx_packed = s1->divx_packed;
1016 if (s1->bitstream_buffer) {
1017 if (s1->bitstream_buffer_size +
1018 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1019 av_fast_malloc(&s->bitstream_buffer,
1020 &s->allocated_bitstream_buffer_size,
1021 s1->allocated_bitstream_buffer_size);
1022 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1023 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1024 s1->bitstream_buffer_size);
1025 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1026 FF_INPUT_BUFFER_PADDING_SIZE);
1029 // linesize dependend scratch buffer allocation
1030 if (!s->edge_emu_buffer)
1032 if (frame_size_alloc(s, s1->linesize) < 0) {
1033 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1034 "scratch buffers.\n");
1035 return AVERROR(ENOMEM);
1038 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1039 "be allocated due to unknown size.\n");
1042 // MPEG2/interlacing info
1043 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1044 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1046 if (!s1->first_field) {
1047 s->last_pict_type = s1->pict_type;
1048 if (s1->current_picture_ptr)
1049 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1056 * Set the given MpegEncContext to common defaults
1057 * (same for encoding and decoding).
1058 * The changed fields will not depend upon the
1059 * prior state of the MpegEncContext.
1061 void ff_MPV_common_defaults(MpegEncContext *s)
1063 s->y_dc_scale_table =
1064 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1065 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1066 s->progressive_frame = 1;
1067 s->progressive_sequence = 1;
1068 s->picture_structure = PICT_FRAME;
1070 s->coded_picture_number = 0;
1071 s->picture_number = 0;
1076 s->slice_context_count = 1;
1080 * Set the given MpegEncContext to defaults for decoding.
1081 * the changed fields will not depend upon
1082 * the prior state of the MpegEncContext.
1084 void ff_MPV_decode_defaults(MpegEncContext *s)
1086 ff_MPV_common_defaults(s);
1089 static int init_er(MpegEncContext *s)
1091 ERContext *er = &s->er;
1092 int mb_array_size = s->mb_height * s->mb_stride;
1095 er->avctx = s->avctx;
1098 er->mb_index2xy = s->mb_index2xy;
1099 er->mb_num = s->mb_num;
1100 er->mb_width = s->mb_width;
1101 er->mb_height = s->mb_height;
1102 er->mb_stride = s->mb_stride;
1103 er->b8_stride = s->b8_stride;
1105 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1106 er->error_status_table = av_mallocz(mb_array_size);
1107 if (!er->er_temp_buffer || !er->error_status_table)
1110 er->mbskip_table = s->mbskip_table;
1111 er->mbintra_table = s->mbintra_table;
1113 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1114 er->dc_val[i] = s->dc_val[i];
1116 er->decode_mb = mpeg_er_decode_mb;
1121 av_freep(&er->er_temp_buffer);
1122 av_freep(&er->error_status_table);
1123 return AVERROR(ENOMEM);
1127 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1129 static int init_context_frame(MpegEncContext *s)
1131 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1133 s->mb_width = (s->width + 15) / 16;
1134 s->mb_stride = s->mb_width + 1;
1135 s->b8_stride = s->mb_width * 2 + 1;
1136 mb_array_size = s->mb_height * s->mb_stride;
1137 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1139 /* set default edge pos, will be overridden
1140 * in decode_header if needed */
1141 s->h_edge_pos = s->mb_width * 16;
1142 s->v_edge_pos = s->mb_height * 16;
1144 s->mb_num = s->mb_width * s->mb_height;
1149 s->block_wrap[3] = s->b8_stride;
1151 s->block_wrap[5] = s->mb_stride;
1153 y_size = s->b8_stride * (2 * s->mb_height + 1);
1154 c_size = s->mb_stride * (s->mb_height + 1);
1155 yc_size = y_size + 2 * c_size;
1157 if (s->mb_height & 1)
1158 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1160 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1161 for (y = 0; y < s->mb_height; y++)
1162 for (x = 0; x < s->mb_width; x++)
1163 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1165 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1168 /* Allocate MV tables */
1169 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1170 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1171 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1172 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1173 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1174 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1175 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1176 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1177 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1178 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1179 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1180 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1182 /* Allocate MB type table */
1183 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1185 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1187 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1188 mb_array_size * sizeof(float), fail);
1189 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1190 mb_array_size * sizeof(float), fail);
1194 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1195 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1196 /* interlaced direct mode decoding tables */
1197 for (i = 0; i < 2; i++) {
1199 for (j = 0; j < 2; j++) {
1200 for (k = 0; k < 2; k++) {
1201 FF_ALLOCZ_OR_GOTO(s->avctx,
1202 s->b_field_mv_table_base[i][j][k],
1203 mv_table_size * 2 * sizeof(int16_t),
1205 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1208 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1209 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1210 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1212 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1215 if (s->out_format == FMT_H263) {
1217 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1218 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1220 /* cbp, ac_pred, pred_dir */
1221 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1222 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1225 if (s->h263_pred || s->h263_plus || !s->encoding) {
1227 // MN: we need these for error resilience of intra-frames
1228 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1229 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1230 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1231 s->dc_val[2] = s->dc_val[1] + c_size;
1232 for (i = 0; i < yc_size; i++)
1233 s->dc_val_base[i] = 1024;
1236 /* which mb is a intra block */
1237 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1238 memset(s->mbintra_table, 1, mb_array_size);
1240 /* init macroblock skip table */
1241 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1242 // Note the + 1 is for a quicker mpeg4 slice_end detection
1246 return AVERROR(ENOMEM);
1250 * init common structure for both encoder and decoder.
1251 * this assumes that some variables like width/height are already set
1253 av_cold int ff_MPV_common_init(MpegEncContext *s)
1256 int nb_slices = (HAVE_THREADS &&
1257 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1258 s->avctx->thread_count : 1;
1260 if (s->encoding && s->avctx->slices)
1261 nb_slices = s->avctx->slices;
1263 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1264 s->mb_height = (s->height + 31) / 32 * 2;
1266 s->mb_height = (s->height + 15) / 16;
1268 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1269 av_log(s->avctx, AV_LOG_ERROR,
1270 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1274 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1277 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1279 max_slices = MAX_THREADS;
1280 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1281 " reducing to %d\n", nb_slices, max_slices);
1282 nb_slices = max_slices;
1285 if ((s->width || s->height) &&
1286 av_image_check_size(s->width, s->height, 0, s->avctx))
1289 ff_dct_common_init(s);
1291 s->flags = s->avctx->flags;
1292 s->flags2 = s->avctx->flags2;
1294 /* set chroma shifts */
1295 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1297 &s->chroma_y_shift);
1299 /* convert fourcc to upper case */
1300 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1302 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1304 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1305 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1306 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1307 s->picture[i].f = av_frame_alloc();
1308 if (!s->picture[i].f)
1311 memset(&s->next_picture, 0, sizeof(s->next_picture));
1312 memset(&s->last_picture, 0, sizeof(s->last_picture));
1313 memset(&s->current_picture, 0, sizeof(s->current_picture));
1314 memset(&s->new_picture, 0, sizeof(s->new_picture));
1315 s->next_picture.f = av_frame_alloc();
1316 if (!s->next_picture.f)
1318 s->last_picture.f = av_frame_alloc();
1319 if (!s->last_picture.f)
1321 s->current_picture.f = av_frame_alloc();
1322 if (!s->current_picture.f)
1324 s->new_picture.f = av_frame_alloc();
1325 if (!s->new_picture.f)
1328 if (init_context_frame(s))
1331 s->parse_context.state = -1;
1333 s->context_initialized = 1;
1334 s->thread_context[0] = s;
1336 // if (s->width && s->height) {
1337 if (nb_slices > 1) {
1338 for (i = 1; i < nb_slices; i++) {
1339 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1340 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1343 for (i = 0; i < nb_slices; i++) {
1344 if (init_duplicate_context(s->thread_context[i]) < 0)
1346 s->thread_context[i]->start_mb_y =
1347 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1348 s->thread_context[i]->end_mb_y =
1349 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1352 if (init_duplicate_context(s) < 0)
1355 s->end_mb_y = s->mb_height;
1357 s->slice_context_count = nb_slices;
1362 ff_MPV_common_end(s);
1367 * Frees and resets MpegEncContext fields depending on the resolution.
1368 * Is used during resolution changes to avoid a full reinitialization of the
1371 static int free_context_frame(MpegEncContext *s)
1375 av_freep(&s->mb_type);
1376 av_freep(&s->p_mv_table_base);
1377 av_freep(&s->b_forw_mv_table_base);
1378 av_freep(&s->b_back_mv_table_base);
1379 av_freep(&s->b_bidir_forw_mv_table_base);
1380 av_freep(&s->b_bidir_back_mv_table_base);
1381 av_freep(&s->b_direct_mv_table_base);
1382 s->p_mv_table = NULL;
1383 s->b_forw_mv_table = NULL;
1384 s->b_back_mv_table = NULL;
1385 s->b_bidir_forw_mv_table = NULL;
1386 s->b_bidir_back_mv_table = NULL;
1387 s->b_direct_mv_table = NULL;
1388 for (i = 0; i < 2; i++) {
1389 for (j = 0; j < 2; j++) {
1390 for (k = 0; k < 2; k++) {
1391 av_freep(&s->b_field_mv_table_base[i][j][k]);
1392 s->b_field_mv_table[i][j][k] = NULL;
1394 av_freep(&s->b_field_select_table[i][j]);
1395 av_freep(&s->p_field_mv_table_base[i][j]);
1396 s->p_field_mv_table[i][j] = NULL;
1398 av_freep(&s->p_field_select_table[i]);
1401 av_freep(&s->dc_val_base);
1402 av_freep(&s->coded_block_base);
1403 av_freep(&s->mbintra_table);
1404 av_freep(&s->cbp_table);
1405 av_freep(&s->pred_dir_table);
1407 av_freep(&s->mbskip_table);
1409 av_freep(&s->er.error_status_table);
1410 av_freep(&s->er.er_temp_buffer);
1411 av_freep(&s->mb_index2xy);
1412 av_freep(&s->lambda_table);
1414 av_freep(&s->cplx_tab);
1415 av_freep(&s->bits_tab);
1417 s->linesize = s->uvlinesize = 0;
1422 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1426 if (s->slice_context_count > 1) {
1427 for (i = 0; i < s->slice_context_count; i++) {
1428 free_duplicate_context(s->thread_context[i]);
1430 for (i = 1; i < s->slice_context_count; i++) {
1431 av_freep(&s->thread_context[i]);
1434 free_duplicate_context(s);
1436 if ((err = free_context_frame(s)) < 0)
1440 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1441 s->picture[i].needs_realloc = 1;
1444 s->last_picture_ptr =
1445 s->next_picture_ptr =
1446 s->current_picture_ptr = NULL;
1449 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1450 s->mb_height = (s->height + 31) / 32 * 2;
1452 s->mb_height = (s->height + 15) / 16;
1454 if ((s->width || s->height) &&
1455 av_image_check_size(s->width, s->height, 0, s->avctx))
1456 return AVERROR_INVALIDDATA;
1458 if ((err = init_context_frame(s)))
1461 s->thread_context[0] = s;
1463 if (s->width && s->height) {
1464 int nb_slices = s->slice_context_count;
1465 if (nb_slices > 1) {
1466 for (i = 1; i < nb_slices; i++) {
1467 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1468 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1471 for (i = 0; i < nb_slices; i++) {
1472 if (init_duplicate_context(s->thread_context[i]) < 0)
1474 s->thread_context[i]->start_mb_y =
1475 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1476 s->thread_context[i]->end_mb_y =
1477 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1480 err = init_duplicate_context(s);
1484 s->end_mb_y = s->mb_height;
1486 s->slice_context_count = nb_slices;
1491 ff_MPV_common_end(s);
1495 /* init common structure for both encoder and decoder */
1496 void ff_MPV_common_end(MpegEncContext *s)
1500 if (s->slice_context_count > 1) {
1501 for (i = 0; i < s->slice_context_count; i++) {
1502 free_duplicate_context(s->thread_context[i]);
1504 for (i = 1; i < s->slice_context_count; i++) {
1505 av_freep(&s->thread_context[i]);
1507 s->slice_context_count = 1;
1508 } else free_duplicate_context(s);
1510 av_freep(&s->parse_context.buffer);
1511 s->parse_context.buffer_size = 0;
1513 av_freep(&s->bitstream_buffer);
1514 s->allocated_bitstream_buffer_size = 0;
1517 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1518 ff_free_picture_tables(&s->picture[i]);
1519 ff_mpeg_unref_picture(s, &s->picture[i]);
1520 av_frame_free(&s->picture[i].f);
1523 av_freep(&s->picture);
1524 ff_free_picture_tables(&s->last_picture);
1525 ff_mpeg_unref_picture(s, &s->last_picture);
1526 av_frame_free(&s->last_picture.f);
1527 ff_free_picture_tables(&s->current_picture);
1528 ff_mpeg_unref_picture(s, &s->current_picture);
1529 av_frame_free(&s->current_picture.f);
1530 ff_free_picture_tables(&s->next_picture);
1531 ff_mpeg_unref_picture(s, &s->next_picture);
1532 av_frame_free(&s->next_picture.f);
1533 ff_free_picture_tables(&s->new_picture);
1534 ff_mpeg_unref_picture(s, &s->new_picture);
1535 av_frame_free(&s->new_picture.f);
1537 free_context_frame(s);
1539 s->context_initialized = 0;
1540 s->last_picture_ptr =
1541 s->next_picture_ptr =
1542 s->current_picture_ptr = NULL;
1543 s->linesize = s->uvlinesize = 0;
1546 av_cold void ff_init_rl(RLTable *rl,
1547 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1549 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1550 uint8_t index_run[MAX_RUN + 1];
1551 int last, run, level, start, end, i;
1553 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1554 if (static_store && rl->max_level[0])
1557 /* compute max_level[], max_run[] and index_run[] */
1558 for (last = 0; last < 2; last++) {
1567 memset(max_level, 0, MAX_RUN + 1);
1568 memset(max_run, 0, MAX_LEVEL + 1);
1569 memset(index_run, rl->n, MAX_RUN + 1);
1570 for (i = start; i < end; i++) {
1571 run = rl->table_run[i];
1572 level = rl->table_level[i];
1573 if (index_run[run] == rl->n)
1575 if (level > max_level[run])
1576 max_level[run] = level;
1577 if (run > max_run[level])
1578 max_run[level] = run;
1581 rl->max_level[last] = static_store[last];
1583 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1584 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1586 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1588 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1589 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1591 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1593 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1594 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1598 av_cold void ff_init_vlc_rl(RLTable *rl)
1602 for (q = 0; q < 32; q++) {
1604 int qadd = (q - 1) | 1;
1610 for (i = 0; i < rl->vlc.table_size; i++) {
1611 int code = rl->vlc.table[i][0];
1612 int len = rl->vlc.table[i][1];
1615 if (len == 0) { // illegal code
1618 } else if (len < 0) { // more bits needed
1622 if (code == rl->n) { // esc
1626 run = rl->table_run[code] + 1;
1627 level = rl->table_level[code] * qmul + qadd;
1628 if (code >= rl->last) run += 192;
1631 rl->rl_vlc[q][i].len = len;
1632 rl->rl_vlc[q][i].level = level;
1633 rl->rl_vlc[q][i].run = run;
1638 static void release_unused_pictures(MpegEncContext *s)
1642 /* release non reference frames */
1643 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1644 if (!s->picture[i].reference)
1645 ff_mpeg_unref_picture(s, &s->picture[i]);
1649 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1651 if (pic == s->last_picture_ptr)
1653 if (pic->f->buf[0] == NULL)
1655 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1660 static int find_unused_picture(MpegEncContext *s, int shared)
1665 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1666 if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1670 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1671 if (pic_is_unused(s, &s->picture[i]))
1676 av_log(s->avctx, AV_LOG_FATAL,
1677 "Internal error, picture buffer overflow\n");
1678 /* We could return -1, but the codec would crash trying to draw into a
1679 * non-existing frame anyway. This is safer than waiting for a random crash.
1680 * Also the return of this is never useful, an encoder must only allocate
1681 * as much as allowed in the specification. This has no relationship to how
1682 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1683 * enough for such valid streams).
1684 * Plus, a decoder has to check stream validity and remove frames if too
1685 * many reference frames are around. Waiting for "OOM" is not correct at
1686 * all. Similarly, missing reference frames have to be replaced by
1687 * interpolated/MC frames, anything else is a bug in the codec ...
1693 int ff_find_unused_picture(MpegEncContext *s, int shared)
1695 int ret = find_unused_picture(s, shared);
1697 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1698 if (s->picture[ret].needs_realloc) {
1699 s->picture[ret].needs_realloc = 0;
1700 ff_free_picture_tables(&s->picture[ret]);
1701 ff_mpeg_unref_picture(s, &s->picture[ret]);
1707 static void gray_frame(AVFrame *frame)
1709 int i, h_chroma_shift, v_chroma_shift;
1711 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1713 for(i=0; i<frame->height; i++)
1714 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1715 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1716 memset(frame->data[1] + frame->linesize[1]*i,
1717 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1718 memset(frame->data[2] + frame->linesize[2]*i,
1719 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1724 * generic function called after decoding
1725 * the header and before a frame is decoded.
1727 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1733 if (!ff_thread_can_start_frame(avctx)) {
1734 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1738 /* mark & release old frames */
1739 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1740 s->last_picture_ptr != s->next_picture_ptr &&
1741 s->last_picture_ptr->f->buf[0]) {
1742 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1745 /* release forgotten pictures */
1746 /* if (mpeg124/h263) */
1747 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1748 if (&s->picture[i] != s->last_picture_ptr &&
1749 &s->picture[i] != s->next_picture_ptr &&
1750 s->picture[i].reference && !s->picture[i].needs_realloc) {
1751 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1752 av_log(avctx, AV_LOG_ERROR,
1753 "releasing zombie picture\n");
1754 ff_mpeg_unref_picture(s, &s->picture[i]);
1758 ff_mpeg_unref_picture(s, &s->current_picture);
1760 release_unused_pictures(s);
1762 if (s->current_picture_ptr &&
1763 s->current_picture_ptr->f->buf[0] == NULL) {
1764 // we already have a unused image
1765 // (maybe it was set before reading the header)
1766 pic = s->current_picture_ptr;
1768 i = ff_find_unused_picture(s, 0);
1770 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1773 pic = &s->picture[i];
1777 if (!s->droppable) {
1778 if (s->pict_type != AV_PICTURE_TYPE_B)
1782 pic->f->coded_picture_number = s->coded_picture_number++;
1784 if (ff_alloc_picture(s, pic, 0) < 0)
1787 s->current_picture_ptr = pic;
1788 // FIXME use only the vars from current_pic
1789 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1790 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1791 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1792 if (s->picture_structure != PICT_FRAME)
1793 s->current_picture_ptr->f->top_field_first =
1794 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1796 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1797 !s->progressive_sequence;
1798 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1800 s->current_picture_ptr->f->pict_type = s->pict_type;
1801 // if (s->flags && CODEC_FLAG_QSCALE)
1802 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1803 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1805 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1806 s->current_picture_ptr)) < 0)
1809 if (s->pict_type != AV_PICTURE_TYPE_B) {
1810 s->last_picture_ptr = s->next_picture_ptr;
1812 s->next_picture_ptr = s->current_picture_ptr;
1814 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1815 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1816 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1817 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1818 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1819 s->pict_type, s->droppable);
1821 if ((s->last_picture_ptr == NULL ||
1822 s->last_picture_ptr->f->buf[0] == NULL) &&
1823 (s->pict_type != AV_PICTURE_TYPE_I ||
1824 s->picture_structure != PICT_FRAME)) {
1825 int h_chroma_shift, v_chroma_shift;
1826 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1827 &h_chroma_shift, &v_chroma_shift);
1828 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1829 av_log(avctx, AV_LOG_DEBUG,
1830 "allocating dummy last picture for B frame\n");
1831 else if (s->pict_type != AV_PICTURE_TYPE_I)
1832 av_log(avctx, AV_LOG_ERROR,
1833 "warning: first frame is no keyframe\n");
1834 else if (s->picture_structure != PICT_FRAME)
1835 av_log(avctx, AV_LOG_DEBUG,
1836 "allocate dummy last picture for field based first keyframe\n");
1838 /* Allocate a dummy frame */
1839 i = ff_find_unused_picture(s, 0);
1841 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1844 s->last_picture_ptr = &s->picture[i];
1846 s->last_picture_ptr->reference = 3;
1847 s->last_picture_ptr->f->key_frame = 0;
1848 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1850 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1851 s->last_picture_ptr = NULL;
1855 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1856 for(i=0; i<avctx->height; i++)
1857 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1858 0x80, avctx->width);
1859 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1860 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1861 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1862 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1863 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1866 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1867 for(i=0; i<avctx->height; i++)
1868 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1872 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1873 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1875 if ((s->next_picture_ptr == NULL ||
1876 s->next_picture_ptr->f->buf[0] == NULL) &&
1877 s->pict_type == AV_PICTURE_TYPE_B) {
1878 /* Allocate a dummy frame */
1879 i = ff_find_unused_picture(s, 0);
1881 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1884 s->next_picture_ptr = &s->picture[i];
1886 s->next_picture_ptr->reference = 3;
1887 s->next_picture_ptr->f->key_frame = 0;
1888 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1890 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1891 s->next_picture_ptr = NULL;
1894 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1895 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1898 #if 0 // BUFREF-FIXME
1899 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1900 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1902 if (s->last_picture_ptr) {
1903 ff_mpeg_unref_picture(s, &s->last_picture);
1904 if (s->last_picture_ptr->f->buf[0] &&
1905 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1906 s->last_picture_ptr)) < 0)
1909 if (s->next_picture_ptr) {
1910 ff_mpeg_unref_picture(s, &s->next_picture);
1911 if (s->next_picture_ptr->f->buf[0] &&
1912 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1913 s->next_picture_ptr)) < 0)
1917 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1918 s->last_picture_ptr->f->buf[0]));
1920 if (s->picture_structure!= PICT_FRAME) {
1922 for (i = 0; i < 4; i++) {
1923 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1924 s->current_picture.f->data[i] +=
1925 s->current_picture.f->linesize[i];
1927 s->current_picture.f->linesize[i] *= 2;
1928 s->last_picture.f->linesize[i] *= 2;
1929 s->next_picture.f->linesize[i] *= 2;
1933 s->err_recognition = avctx->err_recognition;
1935 /* set dequantizer, we can't do it during init as
1936 * it might change for mpeg4 and we can't do it in the header
1937 * decode as init is not called for mpeg4 there yet */
1938 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1939 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1940 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1941 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1942 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1943 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1945 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1946 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1949 if (s->avctx->debug & FF_DEBUG_NOMC) {
1950 gray_frame(s->current_picture_ptr->f);
1956 /* called after a frame has been decoded. */
1957 void ff_MPV_frame_end(MpegEncContext *s)
1961 if (s->current_picture.reference)
1962 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1966 * Draw a line from (ex, ey) -> (sx, sy).
1967 * @param w width of the image
1968 * @param h height of the image
1969 * @param stride stride/linesize of the image
1970 * @param color color of the arrow
1972 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1973 int w, int h, int stride, int color)
1977 sx = av_clip(sx, 0, w - 1);
1978 sy = av_clip(sy, 0, h - 1);
1979 ex = av_clip(ex, 0, w - 1);
1980 ey = av_clip(ey, 0, h - 1);
1982 buf[sy * stride + sx] += color;
1984 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1986 FFSWAP(int, sx, ex);
1987 FFSWAP(int, sy, ey);
1989 buf += sx + sy * stride;
1991 f = ((ey - sy) << 16) / ex;
1992 for (x = 0; x <= ex; x++) {
1994 fr = (x * f) & 0xFFFF;
1995 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1996 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2000 FFSWAP(int, sx, ex);
2001 FFSWAP(int, sy, ey);
2003 buf += sx + sy * stride;
2006 f = ((ex - sx) << 16) / ey;
2009 for(y= 0; y <= ey; y++){
2011 fr = (y*f) & 0xFFFF;
2012 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2013 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2019 * Draw an arrow from (ex, ey) -> (sx, sy).
2020 * @param w width of the image
2021 * @param h height of the image
2022 * @param stride stride/linesize of the image
2023 * @param color color of the arrow
2025 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2026 int ey, int w, int h, int stride, int color)
2030 sx = av_clip(sx, -100, w + 100);
2031 sy = av_clip(sy, -100, h + 100);
2032 ex = av_clip(ex, -100, w + 100);
2033 ey = av_clip(ey, -100, h + 100);
2038 if (dx * dx + dy * dy > 3 * 3) {
2041 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2043 // FIXME subpixel accuracy
2044 rx = ROUNDED_DIV(rx * 3 << 4, length);
2045 ry = ROUNDED_DIV(ry * 3 << 4, length);
2047 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2048 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2050 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2054 * Print debugging info for the given picture.
2056 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2057 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2059 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2061 if (avctx->hwaccel || !mbtype_table
2062 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2066 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2069 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2070 av_get_picture_type_char(pict->pict_type));
2071 for (y = 0; y < mb_height; y++) {
2072 for (x = 0; x < mb_width; x++) {
2073 if (avctx->debug & FF_DEBUG_SKIP) {
2074 int count = mbskip_table[x + y * mb_stride];
2077 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2079 if (avctx->debug & FF_DEBUG_QP) {
2080 av_log(avctx, AV_LOG_DEBUG, "%2d",
2081 qscale_table[x + y * mb_stride]);
2083 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2084 int mb_type = mbtype_table[x + y * mb_stride];
2085 // Type & MV direction
2086 if (IS_PCM(mb_type))
2087 av_log(avctx, AV_LOG_DEBUG, "P");
2088 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2089 av_log(avctx, AV_LOG_DEBUG, "A");
2090 else if (IS_INTRA4x4(mb_type))
2091 av_log(avctx, AV_LOG_DEBUG, "i");
2092 else if (IS_INTRA16x16(mb_type))
2093 av_log(avctx, AV_LOG_DEBUG, "I");
2094 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2095 av_log(avctx, AV_LOG_DEBUG, "d");
2096 else if (IS_DIRECT(mb_type))
2097 av_log(avctx, AV_LOG_DEBUG, "D");
2098 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2099 av_log(avctx, AV_LOG_DEBUG, "g");
2100 else if (IS_GMC(mb_type))
2101 av_log(avctx, AV_LOG_DEBUG, "G");
2102 else if (IS_SKIP(mb_type))
2103 av_log(avctx, AV_LOG_DEBUG, "S");
2104 else if (!USES_LIST(mb_type, 1))
2105 av_log(avctx, AV_LOG_DEBUG, ">");
2106 else if (!USES_LIST(mb_type, 0))
2107 av_log(avctx, AV_LOG_DEBUG, "<");
2109 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2110 av_log(avctx, AV_LOG_DEBUG, "X");
2114 if (IS_8X8(mb_type))
2115 av_log(avctx, AV_LOG_DEBUG, "+");
2116 else if (IS_16X8(mb_type))
2117 av_log(avctx, AV_LOG_DEBUG, "-");
2118 else if (IS_8X16(mb_type))
2119 av_log(avctx, AV_LOG_DEBUG, "|");
2120 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2121 av_log(avctx, AV_LOG_DEBUG, " ");
2123 av_log(avctx, AV_LOG_DEBUG, "?");
2126 if (IS_INTERLACED(mb_type))
2127 av_log(avctx, AV_LOG_DEBUG, "=");
2129 av_log(avctx, AV_LOG_DEBUG, " ");
2132 av_log(avctx, AV_LOG_DEBUG, "\n");
2136 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2137 (avctx->debug_mv)) {
2138 const int shift = 1 + quarter_sample;
2142 int h_chroma_shift, v_chroma_shift, block_height;
2143 const int width = avctx->width;
2144 const int height = avctx->height;
2145 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2146 const int mv_stride = (mb_width << mv_sample_log2) +
2147 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2149 *low_delay = 0; // needed to see the vectors without trashing the buffers
2151 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2153 av_frame_make_writable(pict);
2155 pict->opaque = NULL;
2156 ptr = pict->data[0];
2157 block_height = 16 >> v_chroma_shift;
2159 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2161 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2162 const int mb_index = mb_x + mb_y * mb_stride;
2163 if ((avctx->debug_mv) && motion_val[0]) {
2165 for (type = 0; type < 3; type++) {
2169 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2170 (pict->pict_type!= AV_PICTURE_TYPE_P))
2175 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2176 (pict->pict_type!= AV_PICTURE_TYPE_B))
2181 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2182 (pict->pict_type!= AV_PICTURE_TYPE_B))
2187 if (!USES_LIST(mbtype_table[mb_index], direction))
2190 if (IS_8X8(mbtype_table[mb_index])) {
2192 for (i = 0; i < 4; i++) {
2193 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2194 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2195 int xy = (mb_x * 2 + (i & 1) +
2196 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2197 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2198 int my = (motion_val[direction][xy][1] >> shift) + sy;
2199 draw_arrow(ptr, sx, sy, mx, my, width,
2200 height, pict->linesize[0], 100);
2202 } else if (IS_16X8(mbtype_table[mb_index])) {
2204 for (i = 0; i < 2; i++) {
2205 int sx = mb_x * 16 + 8;
2206 int sy = mb_y * 16 + 4 + 8 * i;
2207 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2208 int mx = (motion_val[direction][xy][0] >> shift);
2209 int my = (motion_val[direction][xy][1] >> shift);
2211 if (IS_INTERLACED(mbtype_table[mb_index]))
2214 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2215 height, pict->linesize[0], 100);
2217 } else if (IS_8X16(mbtype_table[mb_index])) {
2219 for (i = 0; i < 2; i++) {
2220 int sx = mb_x * 16 + 4 + 8 * i;
2221 int sy = mb_y * 16 + 8;
2222 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2223 int mx = motion_val[direction][xy][0] >> shift;
2224 int my = motion_val[direction][xy][1] >> shift;
2226 if (IS_INTERLACED(mbtype_table[mb_index]))
2229 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2230 height, pict->linesize[0], 100);
2233 int sx= mb_x * 16 + 8;
2234 int sy= mb_y * 16 + 8;
2235 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2236 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2237 int my= (motion_val[direction][xy][1]>>shift) + sy;
2238 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2242 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2243 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2244 0x0101010101010101ULL;
2246 for (y = 0; y < block_height; y++) {
2247 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2248 (block_height * mb_y + y) *
2249 pict->linesize[1]) = c;
2250 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2251 (block_height * mb_y + y) *
2252 pict->linesize[2]) = c;
2255 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2257 int mb_type = mbtype_table[mb_index];
2260 #define COLOR(theta, r) \
2261 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2262 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2266 if (IS_PCM(mb_type)) {
2268 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2269 IS_INTRA16x16(mb_type)) {
2271 } else if (IS_INTRA4x4(mb_type)) {
2273 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2275 } else if (IS_DIRECT(mb_type)) {
2277 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2279 } else if (IS_GMC(mb_type)) {
2281 } else if (IS_SKIP(mb_type)) {
2283 } else if (!USES_LIST(mb_type, 1)) {
2285 } else if (!USES_LIST(mb_type, 0)) {
2288 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2292 u *= 0x0101010101010101ULL;
2293 v *= 0x0101010101010101ULL;
2294 for (y = 0; y < block_height; y++) {
2295 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2296 (block_height * mb_y + y) * pict->linesize[1]) = u;
2297 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2298 (block_height * mb_y + y) * pict->linesize[2]) = v;
2302 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2303 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2304 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2305 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2306 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2308 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2309 for (y = 0; y < 16; y++)
2310 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2311 pict->linesize[0]] ^= 0x80;
2313 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2314 int dm = 1 << (mv_sample_log2 - 2);
2315 for (i = 0; i < 4; i++) {
2316 int sx = mb_x * 16 + 8 * (i & 1);
2317 int sy = mb_y * 16 + 8 * (i >> 1);
2318 int xy = (mb_x * 2 + (i & 1) +
2319 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2321 int32_t *mv = (int32_t *) &motion_val[0][xy];
2322 if (mv[0] != mv[dm] ||
2323 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2324 for (y = 0; y < 8; y++)
2325 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2326 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2327 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2328 pict->linesize[0]) ^= 0x8080808080808080ULL;
2332 if (IS_INTERLACED(mb_type) &&
2333 avctx->codec->id == AV_CODEC_ID_H264) {
2337 mbskip_table[mb_index] = 0;
2343 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2345 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2346 p->qscale_table, p->motion_val, &s->low_delay,
2347 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2350 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2352 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2353 int offset = 2*s->mb_stride + 1;
2355 return AVERROR(ENOMEM);
2356 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2357 ref->size -= offset;
2358 ref->data += offset;
2359 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2362 static inline int hpel_motion_lowres(MpegEncContext *s,
2363 uint8_t *dest, uint8_t *src,
2364 int field_based, int field_select,
2365 int src_x, int src_y,
2366 int width, int height, ptrdiff_t stride,
2367 int h_edge_pos, int v_edge_pos,
2368 int w, int h, h264_chroma_mc_func *pix_op,
2369 int motion_x, int motion_y)
2371 const int lowres = s->avctx->lowres;
2372 const int op_index = FFMIN(lowres, 3);
2373 const int s_mask = (2 << lowres) - 1;
2377 if (s->quarter_sample) {
2382 sx = motion_x & s_mask;
2383 sy = motion_y & s_mask;
2384 src_x += motion_x >> lowres + 1;
2385 src_y += motion_y >> lowres + 1;
2387 src += src_y * stride + src_x;
2389 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2390 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2391 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2392 s->linesize, s->linesize,
2393 w + 1, (h + 1) << field_based,
2394 src_x, src_y << field_based,
2395 h_edge_pos, v_edge_pos);
2396 src = s->edge_emu_buffer;
2400 sx = (sx << 2) >> lowres;
2401 sy = (sy << 2) >> lowres;
2404 pix_op[op_index](dest, src, stride, h, sx, sy);
2408 /* apply one mpeg motion vector to the three components */
2409 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2416 uint8_t **ref_picture,
2417 h264_chroma_mc_func *pix_op,
2418 int motion_x, int motion_y,
2421 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2422 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2423 ptrdiff_t uvlinesize, linesize;
2424 const int lowres = s->avctx->lowres;
2425 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2426 const int block_s = 8>>lowres;
2427 const int s_mask = (2 << lowres) - 1;
2428 const int h_edge_pos = s->h_edge_pos >> lowres;
2429 const int v_edge_pos = s->v_edge_pos >> lowres;
2430 linesize = s->current_picture.f->linesize[0] << field_based;
2431 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2433 // FIXME obviously not perfect but qpel will not work in lowres anyway
2434 if (s->quarter_sample) {
2440 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2443 sx = motion_x & s_mask;
2444 sy = motion_y & s_mask;
2445 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2446 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2448 if (s->out_format == FMT_H263) {
2449 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2450 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2451 uvsrc_x = src_x >> 1;
2452 uvsrc_y = src_y >> 1;
2453 } else if (s->out_format == FMT_H261) {
2454 // even chroma mv's are full pel in H261
2457 uvsx = (2 * mx) & s_mask;
2458 uvsy = (2 * my) & s_mask;
2459 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2460 uvsrc_y = mb_y * block_s + (my >> lowres);
2462 if(s->chroma_y_shift){
2467 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2468 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2470 if(s->chroma_x_shift){
2474 uvsy = motion_y & s_mask;
2476 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2479 uvsx = motion_x & s_mask;
2480 uvsy = motion_y & s_mask;
2487 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2488 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2489 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2491 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2492 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2493 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2494 linesize >> field_based, linesize >> field_based,
2495 17, 17 + field_based,
2496 src_x, src_y << field_based, h_edge_pos,
2498 ptr_y = s->edge_emu_buffer;
2499 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2500 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2501 s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
2502 uvlinesize >> field_based, uvlinesize >> field_based,
2504 uvsrc_x, uvsrc_y << field_based,
2505 h_edge_pos >> 1, v_edge_pos >> 1);
2506 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
2507 uvlinesize >> field_based,uvlinesize >> field_based,
2509 uvsrc_x, uvsrc_y << field_based,
2510 h_edge_pos >> 1, v_edge_pos >> 1);
2512 ptr_cr = uvbuf + 16;
2516 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2518 dest_y += s->linesize;
2519 dest_cb += s->uvlinesize;
2520 dest_cr += s->uvlinesize;
2524 ptr_y += s->linesize;
2525 ptr_cb += s->uvlinesize;
2526 ptr_cr += s->uvlinesize;
2529 sx = (sx << 2) >> lowres;
2530 sy = (sy << 2) >> lowres;
2531 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2533 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2534 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2535 uvsx = (uvsx << 2) >> lowres;
2536 uvsy = (uvsy << 2) >> lowres;
2538 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2539 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2542 // FIXME h261 lowres loop filter
2545 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2546 uint8_t *dest_cb, uint8_t *dest_cr,
2547 uint8_t **ref_picture,
2548 h264_chroma_mc_func * pix_op,
2551 const int lowres = s->avctx->lowres;
2552 const int op_index = FFMIN(lowres, 3);
2553 const int block_s = 8 >> lowres;
2554 const int s_mask = (2 << lowres) - 1;
2555 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2556 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2557 int emu = 0, src_x, src_y, sx, sy;
2561 if (s->quarter_sample) {
2566 /* In case of 8X8, we construct a single chroma motion vector
2567 with a special rounding */
2568 mx = ff_h263_round_chroma(mx);
2569 my = ff_h263_round_chroma(my);
2573 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2574 src_y = s->mb_y * block_s + (my >> lowres + 1);
2576 offset = src_y * s->uvlinesize + src_x;
2577 ptr = ref_picture[1] + offset;
2578 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2579 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2580 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2581 s->uvlinesize, s->uvlinesize,
2583 src_x, src_y, h_edge_pos, v_edge_pos);
2584 ptr = s->edge_emu_buffer;
2587 sx = (sx << 2) >> lowres;
2588 sy = (sy << 2) >> lowres;
2589 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2591 ptr = ref_picture[2] + offset;
2593 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2594 s->uvlinesize, s->uvlinesize,
2596 src_x, src_y, h_edge_pos, v_edge_pos);
2597 ptr = s->edge_emu_buffer;
2599 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2603 * motion compensation of a single macroblock
2605 * @param dest_y luma destination pointer
2606 * @param dest_cb chroma cb/u destination pointer
2607 * @param dest_cr chroma cr/v destination pointer
2608 * @param dir direction (0->forward, 1->backward)
2609 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2610 * @param pix_op halfpel motion compensation function (average or put normally)
2611 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2613 static inline void MPV_motion_lowres(MpegEncContext *s,
2614 uint8_t *dest_y, uint8_t *dest_cb,
2616 int dir, uint8_t **ref_picture,
2617 h264_chroma_mc_func *pix_op)
2621 const int lowres = s->avctx->lowres;
2622 const int block_s = 8 >>lowres;
2627 switch (s->mv_type) {
2629 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2631 ref_picture, pix_op,
2632 s->mv[dir][0][0], s->mv[dir][0][1],
2638 for (i = 0; i < 4; i++) {
2639 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2640 s->linesize) * block_s,
2641 ref_picture[0], 0, 0,
2642 (2 * mb_x + (i & 1)) * block_s,
2643 (2 * mb_y + (i >> 1)) * block_s,
2644 s->width, s->height, s->linesize,
2645 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2646 block_s, block_s, pix_op,
2647 s->mv[dir][i][0], s->mv[dir][i][1]);
2649 mx += s->mv[dir][i][0];
2650 my += s->mv[dir][i][1];
2653 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2654 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2658 if (s->picture_structure == PICT_FRAME) {
2660 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2661 1, 0, s->field_select[dir][0],
2662 ref_picture, pix_op,
2663 s->mv[dir][0][0], s->mv[dir][0][1],
2666 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2667 1, 1, s->field_select[dir][1],
2668 ref_picture, pix_op,
2669 s->mv[dir][1][0], s->mv[dir][1][1],
2672 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2673 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2674 ref_picture = s->current_picture_ptr->f->data;
2677 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2678 0, 0, s->field_select[dir][0],
2679 ref_picture, pix_op,
2681 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2685 for (i = 0; i < 2; i++) {
2686 uint8_t **ref2picture;
2688 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2689 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2690 ref2picture = ref_picture;
2692 ref2picture = s->current_picture_ptr->f->data;
2695 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2696 0, 0, s->field_select[dir][i],
2697 ref2picture, pix_op,
2698 s->mv[dir][i][0], s->mv[dir][i][1] +
2699 2 * block_s * i, block_s, mb_y >> 1);
2701 dest_y += 2 * block_s * s->linesize;
2702 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2703 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2707 if (s->picture_structure == PICT_FRAME) {
2708 for (i = 0; i < 2; i++) {
2710 for (j = 0; j < 2; j++) {
2711 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2713 ref_picture, pix_op,
2714 s->mv[dir][2 * i + j][0],
2715 s->mv[dir][2 * i + j][1],
2718 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2721 for (i = 0; i < 2; i++) {
2722 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2723 0, 0, s->picture_structure != i + 1,
2724 ref_picture, pix_op,
2725 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2726 2 * block_s, mb_y >> 1);
2728 // after put we make avg of the same block
2729 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2731 // opposite parity is always in the same
2732 // frame if this is second field
2733 if (!s->first_field) {
2734 ref_picture = s->current_picture_ptr->f->data;
2745 * find the lowest MB row referenced in the MVs
2747 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2749 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2750 int my, off, i, mvs;
2752 if (s->picture_structure != PICT_FRAME || s->mcsel)
2755 switch (s->mv_type) {
2769 for (i = 0; i < mvs; i++) {
2770 my = s->mv[dir][i][1]<<qpel_shift;
2771 my_max = FFMAX(my_max, my);
2772 my_min = FFMIN(my_min, my);
2775 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2777 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2779 return s->mb_height-1;
2782 /* put block[] to dest[] */
2783 static inline void put_dct(MpegEncContext *s,
2784 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2786 s->dct_unquantize_intra(s, block, i, qscale);
2787 s->dsp.idct_put (dest, line_size, block);
2790 /* add block[] to dest[] */
2791 static inline void add_dct(MpegEncContext *s,
2792 int16_t *block, int i, uint8_t *dest, int line_size)
2794 if (s->block_last_index[i] >= 0) {
2795 s->dsp.idct_add (dest, line_size, block);
2799 static inline void add_dequant_dct(MpegEncContext *s,
2800 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2802 if (s->block_last_index[i] >= 0) {
2803 s->dct_unquantize_inter(s, block, i, qscale);
2805 s->dsp.idct_add (dest, line_size, block);
2810 * Clean dc, ac, coded_block for the current non-intra MB.
2812 void ff_clean_intra_table_entries(MpegEncContext *s)
2814 int wrap = s->b8_stride;
2815 int xy = s->block_index[0];
2818 s->dc_val[0][xy + 1 ] =
2819 s->dc_val[0][xy + wrap] =
2820 s->dc_val[0][xy + 1 + wrap] = 1024;
2822 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2823 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2824 if (s->msmpeg4_version>=3) {
2825 s->coded_block[xy ] =
2826 s->coded_block[xy + 1 ] =
2827 s->coded_block[xy + wrap] =
2828 s->coded_block[xy + 1 + wrap] = 0;
2831 wrap = s->mb_stride;
2832 xy = s->mb_x + s->mb_y * wrap;
2834 s->dc_val[2][xy] = 1024;
2836 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2837 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2839 s->mbintra_table[xy]= 0;
2842 /* generic function called after a macroblock has been parsed by the
2843 decoder or after it has been encoded by the encoder.
2845 Important variables used:
2846 s->mb_intra : true if intra macroblock
2847 s->mv_dir : motion vector direction
2848 s->mv_type : motion vector type
2849 s->mv : motion vector
2850 s->interlaced_dct : true if interlaced dct used (mpeg2)
2852 static av_always_inline
2853 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2854 int lowres_flag, int is_mpeg12)
2856 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2859 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2860 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2864 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2865 /* print DCT coefficients */
2867 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2869 for(j=0; j<64; j++){
2870 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2872 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2876 s->current_picture.qscale_table[mb_xy] = s->qscale;
2878 /* update DC predictors for P macroblocks */
2880 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2881 if(s->mbintra_table[mb_xy])
2882 ff_clean_intra_table_entries(s);
2886 s->last_dc[2] = 128 << s->intra_dc_precision;
2889 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2890 s->mbintra_table[mb_xy]=1;
2892 if ( (s->flags&CODEC_FLAG_PSNR)
2893 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
2894 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2895 uint8_t *dest_y, *dest_cb, *dest_cr;
2896 int dct_linesize, dct_offset;
2897 op_pixels_func (*op_pix)[4];
2898 qpel_mc_func (*op_qpix)[16];
2899 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2900 const int uvlinesize = s->current_picture.f->linesize[1];
2901 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2902 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2904 /* avoid copy if macroblock skipped in last frame too */
2905 /* skip only during decoding as we might trash the buffers during encoding a bit */
2907 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2909 if (s->mb_skipped) {
2911 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2913 } else if(!s->current_picture.reference) {
2916 *mbskip_ptr = 0; /* not skipped */
2920 dct_linesize = linesize << s->interlaced_dct;
2921 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2925 dest_cb= s->dest[1];
2926 dest_cr= s->dest[2];
2928 dest_y = s->b_scratchpad;
2929 dest_cb= s->b_scratchpad+16*linesize;
2930 dest_cr= s->b_scratchpad+32*linesize;
2934 /* motion handling */
2935 /* decoding or more than one mb_type (MC was already done otherwise) */
2938 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2939 if (s->mv_dir & MV_DIR_FORWARD) {
2940 ff_thread_await_progress(&s->last_picture_ptr->tf,
2941 ff_MPV_lowest_referenced_row(s, 0),
2944 if (s->mv_dir & MV_DIR_BACKWARD) {
2945 ff_thread_await_progress(&s->next_picture_ptr->tf,
2946 ff_MPV_lowest_referenced_row(s, 1),
2952 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2954 if (s->mv_dir & MV_DIR_FORWARD) {
2955 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
2956 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2958 if (s->mv_dir & MV_DIR_BACKWARD) {
2959 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
2962 op_qpix = s->me.qpel_put;
2963 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2964 op_pix = s->hdsp.put_pixels_tab;
2966 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2968 if (s->mv_dir & MV_DIR_FORWARD) {
2969 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2970 op_pix = s->hdsp.avg_pixels_tab;
2971 op_qpix= s->me.qpel_avg;
2973 if (s->mv_dir & MV_DIR_BACKWARD) {
2974 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2979 /* skip dequant / idct if we are really late ;) */
2980 if(s->avctx->skip_idct){
2981 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2982 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2983 || s->avctx->skip_idct >= AVDISCARD_ALL)
2987 /* add dct residue */
2988 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2989 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2990 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2991 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2992 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2993 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2995 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2996 if (s->chroma_y_shift){
2997 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2998 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3002 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3003 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3004 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3005 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3008 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3009 add_dct(s, block[0], 0, dest_y , dct_linesize);
3010 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3011 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3012 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3014 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3015 if(s->chroma_y_shift){//Chroma420
3016 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3017 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3020 dct_linesize = uvlinesize << s->interlaced_dct;
3021 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3023 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3024 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3025 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3026 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3027 if(!s->chroma_x_shift){//Chroma444
3028 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3029 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3030 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3031 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3036 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3037 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3040 /* dct only in intra block */
3041 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3042 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3043 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3044 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3045 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3047 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3048 if(s->chroma_y_shift){
3049 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3050 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3054 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3055 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3056 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3057 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3061 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
3062 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3063 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
3064 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3066 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3067 if(s->chroma_y_shift){
3068 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
3069 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
3072 dct_linesize = uvlinesize << s->interlaced_dct;
3073 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3075 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
3076 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
3077 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3078 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3079 if(!s->chroma_x_shift){//Chroma444
3080 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3081 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3082 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3083 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3091 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3092 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3093 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3098 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
3100 if(s->out_format == FMT_MPEG1) {
3101 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
3102 else MPV_decode_mb_internal(s, block, 0, 1);
3105 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
3106 else MPV_decode_mb_internal(s, block, 0, 0);
3109 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3111 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3112 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3113 s->first_field, s->low_delay);
3116 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3117 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3118 const int uvlinesize = s->current_picture.f->linesize[1];
3119 const int mb_size= 4 - s->avctx->lowres;
3121 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3122 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3123 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3124 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3125 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3126 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3127 //block_index is not used by mpeg2, so it is not affected by chroma_format
3129 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3130 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3131 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3133 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3135 if(s->picture_structure==PICT_FRAME){
3136 s->dest[0] += s->mb_y * linesize << mb_size;
3137 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3138 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3140 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3141 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3142 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3143 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3149 * Permute an 8x8 block.
3150 * @param block the block which will be permuted according to the given permutation vector
3151 * @param permutation the permutation vector
3152 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3153 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3154 * (inverse) permutated to scantable order!
3156 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3162 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3164 for(i=0; i<=last; i++){
3165 const int j= scantable[i];
3170 for(i=0; i<=last; i++){
3171 const int j= scantable[i];
3172 const int perm_j= permutation[j];
3173 block[perm_j]= temp[j];
3177 void ff_mpeg_flush(AVCodecContext *avctx){
3179 MpegEncContext *s = avctx->priv_data;
3181 if(s==NULL || s->picture==NULL)
3184 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3185 ff_mpeg_unref_picture(s, &s->picture[i]);
3186 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3188 ff_mpeg_unref_picture(s, &s->current_picture);
3189 ff_mpeg_unref_picture(s, &s->last_picture);
3190 ff_mpeg_unref_picture(s, &s->next_picture);
3192 s->mb_x= s->mb_y= 0;
3195 s->parse_context.state= -1;
3196 s->parse_context.frame_start_found= 0;
3197 s->parse_context.overread= 0;
3198 s->parse_context.overread_index= 0;
3199 s->parse_context.index= 0;
3200 s->parse_context.last_index= 0;
3201 s->bitstream_buffer_size=0;
3206 * set qscale and update qscale dependent variables.
3208 void ff_set_qscale(MpegEncContext * s, int qscale)
3212 else if (qscale > 31)
3216 s->chroma_qscale= s->chroma_qscale_table[qscale];
3218 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3219 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3222 void ff_MPV_report_decode_progress(MpegEncContext *s)
3224 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3225 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3228 #if CONFIG_ERROR_RESILIENCE
3229 void ff_mpeg_set_erpic(ERPicture *dst, Picture *src)
3233 memset(dst, 0, sizeof(*dst));
3240 for (i = 0; i < 2; i++) {
3241 dst->motion_val[i] = src->motion_val[i];
3242 dst->ref_index[i] = src->ref_index[i];
3245 dst->mb_type = src->mb_type;
3246 dst->field_picture = src->field_picture;
3249 void ff_mpeg_er_frame_start(MpegEncContext *s)
3251 ERContext *er = &s->er;
3253 ff_mpeg_set_erpic(&er->cur_pic, s->current_picture_ptr);
3254 ff_mpeg_set_erpic(&er->next_pic, s->next_picture_ptr);
3255 ff_mpeg_set_erpic(&er->last_pic, s->last_picture_ptr);
3257 er->pp_time = s->pp_time;
3258 er->pb_time = s->pb_time;
3259 er->quarter_sample = s->quarter_sample;
3260 er->partitioned_frame = s->partitioned_frame;
3262 ff_er_frame_start(er);
3264 #endif /* CONFIG_ERROR_RESILIENCE */