2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
45 #include "xvmc_internal.h"
49 static const uint8_t ff_default_chroma_qscale_table[32] = {
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
55 const uint8_t ff_mpeg1_dc_scale_table[128] = {
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const uint8_t ff_alternate_horizontal_scan[64] = {
111 0, 1, 2, 3, 8, 9, 16, 17,
112 10, 11, 4, 5, 6, 7, 15, 14,
113 13, 12, 19, 18, 24, 25, 32, 33,
114 26, 27, 20, 21, 22, 23, 28, 29,
115 30, 31, 34, 35, 40, 41, 48, 49,
116 42, 43, 36, 37, 38, 39, 44, 45,
117 46, 47, 50, 51, 56, 57, 58, 59,
118 52, 53, 54, 55, 60, 61, 62, 63,
121 const uint8_t ff_alternate_vertical_scan[64] = {
122 0, 8, 16, 24, 1, 9, 2, 10,
123 17, 25, 32, 40, 48, 56, 57, 49,
124 41, 33, 26, 18, 3, 11, 4, 12,
125 19, 27, 34, 42, 50, 58, 35, 43,
126 51, 59, 20, 28, 5, 13, 6, 14,
127 21, 29, 36, 44, 52, 60, 37, 45,
128 53, 61, 22, 30, 7, 15, 23, 31,
129 38, 46, 54, 62, 39, 47, 55, 63,
132 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133 int16_t *block, int n, int qscale)
135 int i, level, nCoeffs;
136 const uint16_t *quant_matrix;
138 nCoeffs= s->block_last_index[n];
141 block[0] = block[0] * s->y_dc_scale;
143 block[0] = block[0] * s->c_dc_scale;
144 /* XXX: only mpeg1 */
145 quant_matrix = s->intra_matrix;
146 for(i=1;i<=nCoeffs;i++) {
147 int j= s->intra_scantable.permutated[i];
152 level = (int)(level * qscale * quant_matrix[j]) >> 3;
153 level = (level - 1) | 1;
156 level = (int)(level * qscale * quant_matrix[j]) >> 3;
157 level = (level - 1) | 1;
164 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
165 int16_t *block, int n, int qscale)
167 int i, level, nCoeffs;
168 const uint16_t *quant_matrix;
170 nCoeffs= s->block_last_index[n];
172 quant_matrix = s->inter_matrix;
173 for(i=0; i<=nCoeffs; i++) {
174 int j= s->intra_scantable.permutated[i];
179 level = (((level << 1) + 1) * qscale *
180 ((int) (quant_matrix[j]))) >> 4;
181 level = (level - 1) | 1;
184 level = (((level << 1) + 1) * qscale *
185 ((int) (quant_matrix[j]))) >> 4;
186 level = (level - 1) | 1;
193 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
194 int16_t *block, int n, int qscale)
196 int i, level, nCoeffs;
197 const uint16_t *quant_matrix;
199 if(s->alternate_scan) nCoeffs= 63;
200 else nCoeffs= s->block_last_index[n];
203 block[0] = block[0] * s->y_dc_scale;
205 block[0] = block[0] * s->c_dc_scale;
206 quant_matrix = s->intra_matrix;
207 for(i=1;i<=nCoeffs;i++) {
208 int j= s->intra_scantable.permutated[i];
213 level = (int)(level * qscale * quant_matrix[j]) >> 3;
216 level = (int)(level * qscale * quant_matrix[j]) >> 3;
223 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
224 int16_t *block, int n, int qscale)
226 int i, level, nCoeffs;
227 const uint16_t *quant_matrix;
230 if(s->alternate_scan) nCoeffs= 63;
231 else nCoeffs= s->block_last_index[n];
234 block[0] = block[0] * s->y_dc_scale;
236 block[0] = block[0] * s->c_dc_scale;
237 quant_matrix = s->intra_matrix;
238 for(i=1;i<=nCoeffs;i++) {
239 int j= s->intra_scantable.permutated[i];
244 level = (int)(level * qscale * quant_matrix[j]) >> 3;
247 level = (int)(level * qscale * quant_matrix[j]) >> 3;
256 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
257 int16_t *block, int n, int qscale)
259 int i, level, nCoeffs;
260 const uint16_t *quant_matrix;
263 if(s->alternate_scan) nCoeffs= 63;
264 else nCoeffs= s->block_last_index[n];
266 quant_matrix = s->inter_matrix;
267 for(i=0; i<=nCoeffs; i++) {
268 int j= s->intra_scantable.permutated[i];
273 level = (((level << 1) + 1) * qscale *
274 ((int) (quant_matrix[j]))) >> 4;
277 level = (((level << 1) + 1) * qscale *
278 ((int) (quant_matrix[j]))) >> 4;
287 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
288 int16_t *block, int n, int qscale)
290 int i, level, qmul, qadd;
293 assert(s->block_last_index[n]>=0);
299 block[0] = block[0] * s->y_dc_scale;
301 block[0] = block[0] * s->c_dc_scale;
302 qadd = (qscale - 1) | 1;
309 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
311 for(i=1; i<=nCoeffs; i++) {
315 level = level * qmul - qadd;
317 level = level * qmul + qadd;
324 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
325 int16_t *block, int n, int qscale)
327 int i, level, qmul, qadd;
330 assert(s->block_last_index[n]>=0);
332 qadd = (qscale - 1) | 1;
335 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
337 for(i=0; i<=nCoeffs; i++) {
341 level = level * qmul - qadd;
343 level = level * qmul + qadd;
350 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
352 int mb_x, int mb_y, int mb_intra, int mb_skipped)
354 MpegEncContext *s = opaque;
357 s->mv_type = mv_type;
358 s->mb_intra = mb_intra;
359 s->mb_skipped = mb_skipped;
362 memcpy(s->mv, mv, sizeof(*mv));
364 ff_init_block_index(s);
365 ff_update_block_index(s);
367 s->bdsp.clear_blocks(s->block[0]);
369 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
370 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
371 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
374 ff_MPV_decode_mb(s, s->block);
377 /* init common dct for both encoder and decoder */
378 av_cold int ff_dct_common_init(MpegEncContext *s)
380 ff_blockdsp_init(&s->bdsp, s->avctx);
381 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
382 ff_idctdsp_init(&s->idsp, s->avctx);
383 ff_me_cmp_init(&s->mecc, s->avctx);
384 ff_mpegvideodsp_init(&s->mdsp);
385 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
387 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
388 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
389 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
390 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
391 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
392 if (s->flags & CODEC_FLAG_BITEXACT)
393 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
394 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
397 ff_MPV_common_init_arm(s);
399 ff_MPV_common_init_ppc(s);
401 ff_MPV_common_init_x86(s);
403 /* load & permutate scantables
404 * note: only wmv uses different ones
406 if (s->alternate_scan) {
407 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
408 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
410 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
411 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
413 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
414 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
419 static int frame_size_alloc(MpegEncContext *s, int linesize)
421 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
423 // edge emu needs blocksize + filter length - 1
424 // (= 17x17 for halfpel / 21x21 for h264)
425 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
426 // at uvlinesize. It supports only YUV420 so 24x24 is enough
427 // linesize * interlaced * MBsize
428 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
431 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
433 s->me.temp = s->me.scratchpad;
434 s->rd_scratchpad = s->me.scratchpad;
435 s->b_scratchpad = s->me.scratchpad;
436 s->obmc_scratchpad = s->me.scratchpad + 16;
440 av_freep(&s->edge_emu_buffer);
441 return AVERROR(ENOMEM);
445 * Allocate a frame buffer
447 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
449 int edges_needed = av_codec_is_encoder(s->avctx->codec);
453 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
454 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
455 s->codec_id != AV_CODEC_ID_MSS2) {
457 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
458 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
461 r = ff_thread_get_buffer(s->avctx, &pic->tf,
462 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
464 pic->f->width = s->avctx->width;
465 pic->f->height = s->avctx->height;
466 pic->f->format = s->avctx->pix_fmt;
467 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
470 if (r < 0 || !pic->f->buf[0]) {
471 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
478 for (i = 0; pic->f->data[i]; i++) {
479 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
480 pic->f->linesize[i] +
481 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
482 pic->f->data[i] += offset;
484 pic->f->width = s->avctx->width;
485 pic->f->height = s->avctx->height;
488 if (s->avctx->hwaccel) {
489 assert(!pic->hwaccel_picture_private);
490 if (s->avctx->hwaccel->frame_priv_data_size) {
491 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
492 if (!pic->hwaccel_priv_buf) {
493 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
496 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
500 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
501 s->uvlinesize != pic->f->linesize[1])) {
502 av_log(s->avctx, AV_LOG_ERROR,
503 "get_buffer() failed (stride changed)\n");
504 ff_mpeg_unref_picture(s, pic);
508 if (pic->f->linesize[1] != pic->f->linesize[2]) {
509 av_log(s->avctx, AV_LOG_ERROR,
510 "get_buffer() failed (uv stride mismatch)\n");
511 ff_mpeg_unref_picture(s, pic);
515 if (!s->edge_emu_buffer &&
516 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
517 av_log(s->avctx, AV_LOG_ERROR,
518 "get_buffer() failed to allocate context scratch buffers.\n");
519 ff_mpeg_unref_picture(s, pic);
526 void ff_free_picture_tables(Picture *pic)
530 av_buffer_unref(&pic->mb_var_buf);
531 av_buffer_unref(&pic->mc_mb_var_buf);
532 av_buffer_unref(&pic->mb_mean_buf);
533 av_buffer_unref(&pic->mbskip_table_buf);
534 av_buffer_unref(&pic->qscale_table_buf);
535 av_buffer_unref(&pic->mb_type_buf);
537 for (i = 0; i < 2; i++) {
538 av_buffer_unref(&pic->motion_val_buf[i]);
539 av_buffer_unref(&pic->ref_index_buf[i]);
543 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
545 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
546 const int mb_array_size = s->mb_stride * s->mb_height;
547 const int b8_array_size = s->b8_stride * s->mb_height * 2;
551 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
552 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
553 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
555 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
556 return AVERROR(ENOMEM);
559 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
560 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
561 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
562 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
563 return AVERROR(ENOMEM);
566 if (s->out_format == FMT_H263 || s->encoding) {
567 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
568 int ref_index_size = 4 * mb_array_size;
570 for (i = 0; mv_size && i < 2; i++) {
571 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
572 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
573 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
574 return AVERROR(ENOMEM);
581 static int make_tables_writable(Picture *pic)
584 #define MAKE_WRITABLE(table) \
587 (ret = av_buffer_make_writable(&pic->table)) < 0)\
591 MAKE_WRITABLE(mb_var_buf);
592 MAKE_WRITABLE(mc_mb_var_buf);
593 MAKE_WRITABLE(mb_mean_buf);
594 MAKE_WRITABLE(mbskip_table_buf);
595 MAKE_WRITABLE(qscale_table_buf);
596 MAKE_WRITABLE(mb_type_buf);
598 for (i = 0; i < 2; i++) {
599 MAKE_WRITABLE(motion_val_buf[i]);
600 MAKE_WRITABLE(ref_index_buf[i]);
607 * Allocate a Picture.
608 * The pixels are allocated/set by calling get_buffer() if shared = 0
610 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
615 assert(pic->f->data[0]);
618 assert(!pic->f->buf[0]);
620 if (alloc_frame_buffer(s, pic) < 0)
623 s->linesize = pic->f->linesize[0];
624 s->uvlinesize = pic->f->linesize[1];
627 if (!pic->qscale_table_buf)
628 ret = alloc_picture_tables(s, pic);
630 ret = make_tables_writable(pic);
635 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
636 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
637 pic->mb_mean = pic->mb_mean_buf->data;
640 pic->mbskip_table = pic->mbskip_table_buf->data;
641 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
642 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
644 if (pic->motion_val_buf[0]) {
645 for (i = 0; i < 2; i++) {
646 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
647 pic->ref_index[i] = pic->ref_index_buf[i]->data;
653 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
654 ff_mpeg_unref_picture(s, pic);
655 ff_free_picture_tables(pic);
656 return AVERROR(ENOMEM);
660 * Deallocate a picture.
662 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
664 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
667 /* WM Image / Screen codecs allocate internal buffers with different
668 * dimensions / colorspaces; ignore user-defined callbacks for these. */
669 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
670 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
671 s->codec_id != AV_CODEC_ID_MSS2)
672 ff_thread_release_buffer(s->avctx, &pic->tf);
674 av_frame_unref(pic->f);
676 av_buffer_unref(&pic->hwaccel_priv_buf);
678 if (pic->needs_realloc)
679 ff_free_picture_tables(pic);
681 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
684 static int update_picture_tables(Picture *dst, Picture *src)
688 #define UPDATE_TABLE(table)\
691 (!dst->table || dst->table->buffer != src->table->buffer)) {\
692 av_buffer_unref(&dst->table);\
693 dst->table = av_buffer_ref(src->table);\
695 ff_free_picture_tables(dst);\
696 return AVERROR(ENOMEM);\
701 UPDATE_TABLE(mb_var_buf);
702 UPDATE_TABLE(mc_mb_var_buf);
703 UPDATE_TABLE(mb_mean_buf);
704 UPDATE_TABLE(mbskip_table_buf);
705 UPDATE_TABLE(qscale_table_buf);
706 UPDATE_TABLE(mb_type_buf);
707 for (i = 0; i < 2; i++) {
708 UPDATE_TABLE(motion_val_buf[i]);
709 UPDATE_TABLE(ref_index_buf[i]);
712 dst->mb_var = src->mb_var;
713 dst->mc_mb_var = src->mc_mb_var;
714 dst->mb_mean = src->mb_mean;
715 dst->mbskip_table = src->mbskip_table;
716 dst->qscale_table = src->qscale_table;
717 dst->mb_type = src->mb_type;
718 for (i = 0; i < 2; i++) {
719 dst->motion_val[i] = src->motion_val[i];
720 dst->ref_index[i] = src->ref_index[i];
726 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
730 av_assert0(!dst->f->buf[0]);
731 av_assert0(src->f->buf[0]);
735 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
739 ret = update_picture_tables(dst, src);
743 if (src->hwaccel_picture_private) {
744 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
745 if (!dst->hwaccel_priv_buf)
747 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
750 dst->field_picture = src->field_picture;
751 dst->mb_var_sum = src->mb_var_sum;
752 dst->mc_mb_var_sum = src->mc_mb_var_sum;
753 dst->b_frame_score = src->b_frame_score;
754 dst->needs_realloc = src->needs_realloc;
755 dst->reference = src->reference;
756 dst->shared = src->shared;
760 ff_mpeg_unref_picture(s, dst);
764 static void exchange_uv(MpegEncContext *s)
769 s->pblocks[4] = s->pblocks[5];
773 static int init_duplicate_context(MpegEncContext *s)
775 int y_size = s->b8_stride * (2 * s->mb_height + 1);
776 int c_size = s->mb_stride * (s->mb_height + 1);
777 int yc_size = y_size + 2 * c_size;
785 s->obmc_scratchpad = NULL;
788 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
789 ME_MAP_SIZE * sizeof(uint32_t), fail)
790 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
791 ME_MAP_SIZE * sizeof(uint32_t), fail)
792 if (s->avctx->noise_reduction) {
793 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
794 2 * 64 * sizeof(int), fail)
797 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
798 s->block = s->blocks[0];
800 for (i = 0; i < 12; i++) {
801 s->pblocks[i] = &s->block[i];
803 if (s->avctx->codec_tag == AV_RL32("VCR2"))
806 if (s->out_format == FMT_H263) {
808 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
809 yc_size * sizeof(int16_t) * 16, fail);
810 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
811 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
812 s->ac_val[2] = s->ac_val[1] + c_size;
817 return -1; // free() through ff_MPV_common_end()
820 static void free_duplicate_context(MpegEncContext *s)
825 av_freep(&s->edge_emu_buffer);
826 av_freep(&s->me.scratchpad);
830 s->obmc_scratchpad = NULL;
832 av_freep(&s->dct_error_sum);
833 av_freep(&s->me.map);
834 av_freep(&s->me.score_map);
835 av_freep(&s->blocks);
836 av_freep(&s->ac_val_base);
840 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
842 #define COPY(a) bak->a = src->a
843 COPY(edge_emu_buffer);
848 COPY(obmc_scratchpad);
855 COPY(me.map_generation);
867 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
871 // FIXME copy only needed parts
873 backup_duplicate_context(&bak, dst);
874 memcpy(dst, src, sizeof(MpegEncContext));
875 backup_duplicate_context(dst, &bak);
876 for (i = 0; i < 12; i++) {
877 dst->pblocks[i] = &dst->block[i];
879 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
881 if (!dst->edge_emu_buffer &&
882 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
883 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
884 "scratch buffers.\n");
887 // STOP_TIMER("update_duplicate_context")
888 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
892 int ff_mpeg_update_thread_context(AVCodecContext *dst,
893 const AVCodecContext *src)
896 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
898 if (dst == src || !s1->context_initialized)
901 // FIXME can parameters change on I-frames?
902 // in that case dst may need a reinit
903 if (!s->context_initialized) {
904 memcpy(s, s1, sizeof(MpegEncContext));
907 s->bitstream_buffer = NULL;
908 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
910 ff_MPV_common_init(s);
913 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
915 s->context_reinit = 0;
916 s->height = s1->height;
917 s->width = s1->width;
918 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
922 s->avctx->coded_height = s1->avctx->coded_height;
923 s->avctx->coded_width = s1->avctx->coded_width;
924 s->avctx->width = s1->avctx->width;
925 s->avctx->height = s1->avctx->height;
927 s->coded_picture_number = s1->coded_picture_number;
928 s->picture_number = s1->picture_number;
930 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
931 ff_mpeg_unref_picture(s, &s->picture[i]);
932 if (s1->picture[i].f->buf[0] &&
933 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
937 #define UPDATE_PICTURE(pic)\
939 ff_mpeg_unref_picture(s, &s->pic);\
940 if (s1->pic.f->buf[0])\
941 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
943 ret = update_picture_tables(&s->pic, &s1->pic);\
948 UPDATE_PICTURE(current_picture);
949 UPDATE_PICTURE(last_picture);
950 UPDATE_PICTURE(next_picture);
952 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
953 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
954 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
956 // Error/bug resilience
957 s->next_p_frame_damaged = s1->next_p_frame_damaged;
958 s->workaround_bugs = s1->workaround_bugs;
961 memcpy(&s->last_time_base, &s1->last_time_base,
962 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
963 (char *) &s1->last_time_base);
966 s->max_b_frames = s1->max_b_frames;
967 s->low_delay = s1->low_delay;
968 s->droppable = s1->droppable;
970 // DivX handling (doesn't work)
971 s->divx_packed = s1->divx_packed;
973 if (s1->bitstream_buffer) {
974 if (s1->bitstream_buffer_size +
975 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
976 av_fast_malloc(&s->bitstream_buffer,
977 &s->allocated_bitstream_buffer_size,
978 s1->allocated_bitstream_buffer_size);
979 s->bitstream_buffer_size = s1->bitstream_buffer_size;
980 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
981 s1->bitstream_buffer_size);
982 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
983 FF_INPUT_BUFFER_PADDING_SIZE);
986 // linesize dependend scratch buffer allocation
987 if (!s->edge_emu_buffer)
989 if (frame_size_alloc(s, s1->linesize) < 0) {
990 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
991 "scratch buffers.\n");
992 return AVERROR(ENOMEM);
995 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
996 "be allocated due to unknown size.\n");
1000 // MPEG2/interlacing info
1001 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1002 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1004 if (!s1->first_field) {
1005 s->last_pict_type = s1->pict_type;
1006 if (s1->current_picture_ptr)
1007 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1014 * Set the given MpegEncContext to common defaults
1015 * (same for encoding and decoding).
1016 * The changed fields will not depend upon the
1017 * prior state of the MpegEncContext.
1019 void ff_MPV_common_defaults(MpegEncContext *s)
1021 s->y_dc_scale_table =
1022 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1023 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1024 s->progressive_frame = 1;
1025 s->progressive_sequence = 1;
1026 s->picture_structure = PICT_FRAME;
1028 s->coded_picture_number = 0;
1029 s->picture_number = 0;
1034 s->slice_context_count = 1;
1038 * Set the given MpegEncContext to defaults for decoding.
1039 * the changed fields will not depend upon
1040 * the prior state of the MpegEncContext.
1042 void ff_MPV_decode_defaults(MpegEncContext *s)
1044 ff_MPV_common_defaults(s);
1047 static int init_er(MpegEncContext *s)
1049 ERContext *er = &s->er;
1050 int mb_array_size = s->mb_height * s->mb_stride;
1053 er->avctx = s->avctx;
1054 er->mecc = &s->mecc;
1056 er->mb_index2xy = s->mb_index2xy;
1057 er->mb_num = s->mb_num;
1058 er->mb_width = s->mb_width;
1059 er->mb_height = s->mb_height;
1060 er->mb_stride = s->mb_stride;
1061 er->b8_stride = s->b8_stride;
1063 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1064 er->error_status_table = av_mallocz(mb_array_size);
1065 if (!er->er_temp_buffer || !er->error_status_table)
1068 er->mbskip_table = s->mbskip_table;
1069 er->mbintra_table = s->mbintra_table;
1071 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1072 er->dc_val[i] = s->dc_val[i];
1074 er->decode_mb = mpeg_er_decode_mb;
1079 av_freep(&er->er_temp_buffer);
1080 av_freep(&er->error_status_table);
1081 return AVERROR(ENOMEM);
1085 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1087 static int init_context_frame(MpegEncContext *s)
1089 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1091 s->mb_width = (s->width + 15) / 16;
1092 s->mb_stride = s->mb_width + 1;
1093 s->b8_stride = s->mb_width * 2 + 1;
1094 mb_array_size = s->mb_height * s->mb_stride;
1095 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1097 /* set default edge pos, will be overriden
1098 * in decode_header if needed */
1099 s->h_edge_pos = s->mb_width * 16;
1100 s->v_edge_pos = s->mb_height * 16;
1102 s->mb_num = s->mb_width * s->mb_height;
1107 s->block_wrap[3] = s->b8_stride;
1109 s->block_wrap[5] = s->mb_stride;
1111 y_size = s->b8_stride * (2 * s->mb_height + 1);
1112 c_size = s->mb_stride * (s->mb_height + 1);
1113 yc_size = y_size + 2 * c_size;
1115 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
1116 fail); // error ressilience code looks cleaner with this
1117 for (y = 0; y < s->mb_height; y++)
1118 for (x = 0; x < s->mb_width; x++)
1119 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1121 s->mb_index2xy[s->mb_height * s->mb_width] =
1122 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1125 /* Allocate MV tables */
1126 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
1127 mv_table_size * 2 * sizeof(int16_t), fail);
1128 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
1129 mv_table_size * 2 * sizeof(int16_t), fail);
1130 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
1131 mv_table_size * 2 * sizeof(int16_t), fail);
1132 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
1133 mv_table_size * 2 * sizeof(int16_t), fail);
1134 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
1135 mv_table_size * 2 * sizeof(int16_t), fail);
1136 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
1137 mv_table_size * 2 * sizeof(int16_t), fail);
1138 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1139 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1140 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1141 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
1143 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
1145 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1147 /* Allocate MB type table */
1148 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
1149 sizeof(uint16_t), fail); // needed for encoding
1151 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
1154 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1155 mb_array_size * sizeof(float), fail);
1156 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1157 mb_array_size * sizeof(float), fail);
1161 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1162 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1163 /* interlaced direct mode decoding tables */
1164 for (i = 0; i < 2; i++) {
1166 for (j = 0; j < 2; j++) {
1167 for (k = 0; k < 2; k++) {
1168 FF_ALLOCZ_OR_GOTO(s->avctx,
1169 s->b_field_mv_table_base[i][j][k],
1170 mv_table_size * 2 * sizeof(int16_t),
1172 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1175 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
1176 mb_array_size * 2 * sizeof(uint8_t), fail);
1177 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
1178 mv_table_size * 2 * sizeof(int16_t), fail);
1179 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
1182 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
1183 mb_array_size * 2 * sizeof(uint8_t), fail);
1186 if (s->out_format == FMT_H263) {
1188 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
1189 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1191 /* cbp, ac_pred, pred_dir */
1192 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
1193 mb_array_size * sizeof(uint8_t), fail);
1194 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
1195 mb_array_size * sizeof(uint8_t), fail);
1198 if (s->h263_pred || s->h263_plus || !s->encoding) {
1200 // MN: we need these for error resilience of intra-frames
1201 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
1202 yc_size * sizeof(int16_t), fail);
1203 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1204 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1205 s->dc_val[2] = s->dc_val[1] + c_size;
1206 for (i = 0; i < yc_size; i++)
1207 s->dc_val_base[i] = 1024;
1210 /* which mb is a intra block */
1211 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1212 memset(s->mbintra_table, 1, mb_array_size);
1214 /* init macroblock skip table */
1215 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1216 // Note the + 1 is for a quicker mpeg4 slice_end detection
1220 return AVERROR(ENOMEM);
1224 * init common structure for both encoder and decoder.
1225 * this assumes that some variables like width/height are already set
1227 av_cold int ff_MPV_common_init(MpegEncContext *s)
1230 int nb_slices = (HAVE_THREADS &&
1231 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1232 s->avctx->thread_count : 1;
1234 if (s->encoding && s->avctx->slices)
1235 nb_slices = s->avctx->slices;
1237 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1238 s->mb_height = (s->height + 31) / 32 * 2;
1240 s->mb_height = (s->height + 15) / 16;
1242 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1243 av_log(s->avctx, AV_LOG_ERROR,
1244 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1248 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1251 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1253 max_slices = MAX_THREADS;
1254 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1255 " reducing to %d\n", nb_slices, max_slices);
1256 nb_slices = max_slices;
1259 if ((s->width || s->height) &&
1260 av_image_check_size(s->width, s->height, 0, s->avctx))
1263 ff_dct_common_init(s);
1265 s->flags = s->avctx->flags;
1266 s->flags2 = s->avctx->flags2;
1268 /* set chroma shifts */
1269 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1271 &s->chroma_y_shift);
1273 /* convert fourcc to upper case */
1274 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1276 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1278 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1279 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1280 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1281 s->picture[i].f = av_frame_alloc();
1282 if (!s->picture[i].f)
1285 memset(&s->next_picture, 0, sizeof(s->next_picture));
1286 memset(&s->last_picture, 0, sizeof(s->last_picture));
1287 memset(&s->current_picture, 0, sizeof(s->current_picture));
1288 memset(&s->new_picture, 0, sizeof(s->new_picture));
1289 s->next_picture.f = av_frame_alloc();
1290 if (!s->next_picture.f)
1292 s->last_picture.f = av_frame_alloc();
1293 if (!s->last_picture.f)
1295 s->current_picture.f = av_frame_alloc();
1296 if (!s->current_picture.f)
1298 s->new_picture.f = av_frame_alloc();
1299 if (!s->new_picture.f)
1302 if (s->width && s->height) {
1303 if (init_context_frame(s))
1306 s->parse_context.state = -1;
1309 s->context_initialized = 1;
1310 s->thread_context[0] = s;
1312 if (s->width && s->height) {
1313 if (nb_slices > 1) {
1314 for (i = 1; i < nb_slices; i++) {
1315 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1316 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1319 for (i = 0; i < nb_slices; i++) {
1320 if (init_duplicate_context(s->thread_context[i]) < 0)
1322 s->thread_context[i]->start_mb_y =
1323 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1324 s->thread_context[i]->end_mb_y =
1325 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1328 if (init_duplicate_context(s) < 0)
1331 s->end_mb_y = s->mb_height;
1333 s->slice_context_count = nb_slices;
1338 ff_MPV_common_end(s);
1343 * Frees and resets MpegEncContext fields depending on the resolution.
1344 * Is used during resolution changes to avoid a full reinitialization of the
1347 static int free_context_frame(MpegEncContext *s)
1351 av_freep(&s->mb_type);
1352 av_freep(&s->p_mv_table_base);
1353 av_freep(&s->b_forw_mv_table_base);
1354 av_freep(&s->b_back_mv_table_base);
1355 av_freep(&s->b_bidir_forw_mv_table_base);
1356 av_freep(&s->b_bidir_back_mv_table_base);
1357 av_freep(&s->b_direct_mv_table_base);
1358 s->p_mv_table = NULL;
1359 s->b_forw_mv_table = NULL;
1360 s->b_back_mv_table = NULL;
1361 s->b_bidir_forw_mv_table = NULL;
1362 s->b_bidir_back_mv_table = NULL;
1363 s->b_direct_mv_table = NULL;
1364 for (i = 0; i < 2; i++) {
1365 for (j = 0; j < 2; j++) {
1366 for (k = 0; k < 2; k++) {
1367 av_freep(&s->b_field_mv_table_base[i][j][k]);
1368 s->b_field_mv_table[i][j][k] = NULL;
1370 av_freep(&s->b_field_select_table[i][j]);
1371 av_freep(&s->p_field_mv_table_base[i][j]);
1372 s->p_field_mv_table[i][j] = NULL;
1374 av_freep(&s->p_field_select_table[i]);
1377 av_freep(&s->dc_val_base);
1378 av_freep(&s->coded_block_base);
1379 av_freep(&s->mbintra_table);
1380 av_freep(&s->cbp_table);
1381 av_freep(&s->pred_dir_table);
1383 av_freep(&s->mbskip_table);
1385 av_freep(&s->er.error_status_table);
1386 av_freep(&s->er.er_temp_buffer);
1387 av_freep(&s->mb_index2xy);
1388 av_freep(&s->lambda_table);
1389 av_freep(&s->cplx_tab);
1390 av_freep(&s->bits_tab);
1392 s->linesize = s->uvlinesize = 0;
1397 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1401 if (s->slice_context_count > 1) {
1402 for (i = 0; i < s->slice_context_count; i++) {
1403 free_duplicate_context(s->thread_context[i]);
1405 for (i = 1; i < s->slice_context_count; i++) {
1406 av_freep(&s->thread_context[i]);
1409 free_duplicate_context(s);
1411 if ((err = free_context_frame(s)) < 0)
1415 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1416 s->picture[i].needs_realloc = 1;
1419 s->last_picture_ptr =
1420 s->next_picture_ptr =
1421 s->current_picture_ptr = NULL;
1424 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1425 s->mb_height = (s->height + 31) / 32 * 2;
1427 s->mb_height = (s->height + 15) / 16;
1429 if ((s->width || s->height) &&
1430 av_image_check_size(s->width, s->height, 0, s->avctx))
1431 return AVERROR_INVALIDDATA;
1433 if ((err = init_context_frame(s)))
1436 s->thread_context[0] = s;
1438 if (s->width && s->height) {
1439 int nb_slices = s->slice_context_count;
1440 if (nb_slices > 1) {
1441 for (i = 1; i < nb_slices; i++) {
1442 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1443 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1446 for (i = 0; i < nb_slices; i++) {
1447 if (init_duplicate_context(s->thread_context[i]) < 0)
1449 s->thread_context[i]->start_mb_y =
1450 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1451 s->thread_context[i]->end_mb_y =
1452 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1455 if (init_duplicate_context(s) < 0)
1458 s->end_mb_y = s->mb_height;
1460 s->slice_context_count = nb_slices;
1465 ff_MPV_common_end(s);
1469 /* init common structure for both encoder and decoder */
1470 void ff_MPV_common_end(MpegEncContext *s)
1474 if (s->slice_context_count > 1) {
1475 for (i = 0; i < s->slice_context_count; i++) {
1476 free_duplicate_context(s->thread_context[i]);
1478 for (i = 1; i < s->slice_context_count; i++) {
1479 av_freep(&s->thread_context[i]);
1481 s->slice_context_count = 1;
1482 } else free_duplicate_context(s);
1484 av_freep(&s->parse_context.buffer);
1485 s->parse_context.buffer_size = 0;
1487 av_freep(&s->bitstream_buffer);
1488 s->allocated_bitstream_buffer_size = 0;
1491 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1492 ff_free_picture_tables(&s->picture[i]);
1493 ff_mpeg_unref_picture(s, &s->picture[i]);
1494 av_frame_free(&s->picture[i].f);
1497 av_freep(&s->picture);
1498 ff_free_picture_tables(&s->last_picture);
1499 ff_mpeg_unref_picture(s, &s->last_picture);
1500 av_frame_free(&s->last_picture.f);
1501 ff_free_picture_tables(&s->current_picture);
1502 ff_mpeg_unref_picture(s, &s->current_picture);
1503 av_frame_free(&s->current_picture.f);
1504 ff_free_picture_tables(&s->next_picture);
1505 ff_mpeg_unref_picture(s, &s->next_picture);
1506 av_frame_free(&s->next_picture.f);
1507 ff_free_picture_tables(&s->new_picture);
1508 ff_mpeg_unref_picture(s, &s->new_picture);
1509 av_frame_free(&s->new_picture.f);
1511 free_context_frame(s);
1513 s->context_initialized = 0;
1514 s->last_picture_ptr =
1515 s->next_picture_ptr =
1516 s->current_picture_ptr = NULL;
1517 s->linesize = s->uvlinesize = 0;
1520 av_cold void ff_init_rl(RLTable *rl,
1521 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1523 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1524 uint8_t index_run[MAX_RUN + 1];
1525 int last, run, level, start, end, i;
1527 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1528 if (static_store && rl->max_level[0])
1531 /* compute max_level[], max_run[] and index_run[] */
1532 for (last = 0; last < 2; last++) {
1541 memset(max_level, 0, MAX_RUN + 1);
1542 memset(max_run, 0, MAX_LEVEL + 1);
1543 memset(index_run, rl->n, MAX_RUN + 1);
1544 for (i = start; i < end; i++) {
1545 run = rl->table_run[i];
1546 level = rl->table_level[i];
1547 if (index_run[run] == rl->n)
1549 if (level > max_level[run])
1550 max_level[run] = level;
1551 if (run > max_run[level])
1552 max_run[level] = run;
1555 rl->max_level[last] = static_store[last];
1557 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1558 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1560 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1562 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1563 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1565 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1567 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1568 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1572 av_cold void ff_init_vlc_rl(RLTable *rl)
1576 for (q = 0; q < 32; q++) {
1578 int qadd = (q - 1) | 1;
1584 for (i = 0; i < rl->vlc.table_size; i++) {
1585 int code = rl->vlc.table[i][0];
1586 int len = rl->vlc.table[i][1];
1589 if (len == 0) { // illegal code
1592 } else if (len < 0) { // more bits needed
1596 if (code == rl->n) { // esc
1600 run = rl->table_run[code] + 1;
1601 level = rl->table_level[code] * qmul + qadd;
1602 if (code >= rl->last) run += 192;
1605 rl->rl_vlc[q][i].len = len;
1606 rl->rl_vlc[q][i].level = level;
1607 rl->rl_vlc[q][i].run = run;
1612 static void release_unused_pictures(MpegEncContext *s)
1616 /* release non reference frames */
1617 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1618 if (!s->picture[i].reference)
1619 ff_mpeg_unref_picture(s, &s->picture[i]);
1623 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1625 if (pic->f->buf[0] == NULL)
1627 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1632 static int find_unused_picture(MpegEncContext *s, int shared)
1637 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1638 if (s->picture[i].f->buf[0] == NULL)
1642 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1643 if (pic_is_unused(s, &s->picture[i]))
1648 return AVERROR_INVALIDDATA;
1651 int ff_find_unused_picture(MpegEncContext *s, int shared)
1653 int ret = find_unused_picture(s, shared);
1655 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1656 if (s->picture[ret].needs_realloc) {
1657 s->picture[ret].needs_realloc = 0;
1658 ff_free_picture_tables(&s->picture[ret]);
1659 ff_mpeg_unref_picture(s, &s->picture[ret]);
1666 * generic function called after decoding
1667 * the header and before a frame is decoded.
1669 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1675 /* mark & release old frames */
1676 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1677 s->last_picture_ptr != s->next_picture_ptr &&
1678 s->last_picture_ptr->f->buf[0]) {
1679 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1682 /* release forgotten pictures */
1683 /* if (mpeg124/h263) */
1684 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1685 if (&s->picture[i] != s->last_picture_ptr &&
1686 &s->picture[i] != s->next_picture_ptr &&
1687 s->picture[i].reference && !s->picture[i].needs_realloc) {
1688 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1689 av_log(avctx, AV_LOG_ERROR,
1690 "releasing zombie picture\n");
1691 ff_mpeg_unref_picture(s, &s->picture[i]);
1695 ff_mpeg_unref_picture(s, &s->current_picture);
1697 release_unused_pictures(s);
1699 if (s->current_picture_ptr &&
1700 s->current_picture_ptr->f->buf[0] == NULL) {
1701 // we already have a unused image
1702 // (maybe it was set before reading the header)
1703 pic = s->current_picture_ptr;
1705 i = ff_find_unused_picture(s, 0);
1707 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1710 pic = &s->picture[i];
1714 if (!s->droppable) {
1715 if (s->pict_type != AV_PICTURE_TYPE_B)
1719 pic->f->coded_picture_number = s->coded_picture_number++;
1721 if (ff_alloc_picture(s, pic, 0) < 0)
1724 s->current_picture_ptr = pic;
1725 // FIXME use only the vars from current_pic
1726 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1727 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1728 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1729 if (s->picture_structure != PICT_FRAME)
1730 s->current_picture_ptr->f->top_field_first =
1731 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1733 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1734 !s->progressive_sequence;
1735 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1737 s->current_picture_ptr->f->pict_type = s->pict_type;
1738 // if (s->flags && CODEC_FLAG_QSCALE)
1739 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1740 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1742 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1743 s->current_picture_ptr)) < 0)
1746 if (s->pict_type != AV_PICTURE_TYPE_B) {
1747 s->last_picture_ptr = s->next_picture_ptr;
1749 s->next_picture_ptr = s->current_picture_ptr;
1751 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1752 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1753 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1754 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1755 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1756 s->pict_type, s->droppable);
1758 if ((s->last_picture_ptr == NULL ||
1759 s->last_picture_ptr->f->buf[0] == NULL) &&
1760 (s->pict_type != AV_PICTURE_TYPE_I ||
1761 s->picture_structure != PICT_FRAME)) {
1762 int h_chroma_shift, v_chroma_shift;
1763 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1764 &h_chroma_shift, &v_chroma_shift);
1765 if (s->pict_type != AV_PICTURE_TYPE_I)
1766 av_log(avctx, AV_LOG_ERROR,
1767 "warning: first frame is no keyframe\n");
1768 else if (s->picture_structure != PICT_FRAME)
1769 av_log(avctx, AV_LOG_INFO,
1770 "allocate dummy last picture for field based first keyframe\n");
1772 /* Allocate a dummy frame */
1773 i = ff_find_unused_picture(s, 0);
1775 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1778 s->last_picture_ptr = &s->picture[i];
1780 s->last_picture_ptr->reference = 3;
1781 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1783 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1784 s->last_picture_ptr = NULL;
1788 memset(s->last_picture_ptr->f->data[0], 0,
1789 avctx->height * s->last_picture_ptr->f->linesize[0]);
1790 memset(s->last_picture_ptr->f->data[1], 0x80,
1791 (avctx->height >> v_chroma_shift) *
1792 s->last_picture_ptr->f->linesize[1]);
1793 memset(s->last_picture_ptr->f->data[2], 0x80,
1794 (avctx->height >> v_chroma_shift) *
1795 s->last_picture_ptr->f->linesize[2]);
1797 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1798 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1800 if ((s->next_picture_ptr == NULL ||
1801 s->next_picture_ptr->f->buf[0] == NULL) &&
1802 s->pict_type == AV_PICTURE_TYPE_B) {
1803 /* Allocate a dummy frame */
1804 i = ff_find_unused_picture(s, 0);
1806 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1809 s->next_picture_ptr = &s->picture[i];
1811 s->next_picture_ptr->reference = 3;
1812 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1814 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1815 s->next_picture_ptr = NULL;
1818 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1819 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1822 if (s->last_picture_ptr) {
1823 ff_mpeg_unref_picture(s, &s->last_picture);
1824 if (s->last_picture_ptr->f->buf[0] &&
1825 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1826 s->last_picture_ptr)) < 0)
1829 if (s->next_picture_ptr) {
1830 ff_mpeg_unref_picture(s, &s->next_picture);
1831 if (s->next_picture_ptr->f->buf[0] &&
1832 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1833 s->next_picture_ptr)) < 0)
1837 if (s->pict_type != AV_PICTURE_TYPE_I &&
1838 !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1839 av_log(s, AV_LOG_ERROR,
1840 "Non-reference picture received and no reference available\n");
1841 return AVERROR_INVALIDDATA;
1844 if (s->picture_structure!= PICT_FRAME) {
1846 for (i = 0; i < 4; i++) {
1847 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1848 s->current_picture.f->data[i] +=
1849 s->current_picture.f->linesize[i];
1851 s->current_picture.f->linesize[i] *= 2;
1852 s->last_picture.f->linesize[i] *= 2;
1853 s->next_picture.f->linesize[i] *= 2;
1857 s->err_recognition = avctx->err_recognition;
1859 /* set dequantizer, we can't do it during init as
1860 * it might change for mpeg4 and we can't do it in the header
1861 * decode as init is not called for mpeg4 there yet */
1862 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1863 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1864 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1865 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1866 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1867 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1869 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1870 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1874 FF_DISABLE_DEPRECATION_WARNINGS
1875 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1876 return ff_xvmc_field_start(s, avctx);
1877 FF_ENABLE_DEPRECATION_WARNINGS
1878 #endif /* FF_API_XVMC */
1883 /* called after a frame has been decoded. */
1884 void ff_MPV_frame_end(MpegEncContext *s)
1887 FF_DISABLE_DEPRECATION_WARNINGS
1888 /* redraw edges for the frame if decoding didn't complete */
1889 // just to make sure that all data is rendered.
1890 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1891 ff_xvmc_field_end(s);
1893 FF_ENABLE_DEPRECATION_WARNINGS
1894 #endif /* FF_API_XVMC */
1898 if (s->current_picture.reference)
1899 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1903 * Print debugging info for the given picture.
1905 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1908 if (s->avctx->hwaccel || !p || !p->mb_type)
1912 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1915 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1916 switch (pict->pict_type) {
1917 case AV_PICTURE_TYPE_I:
1918 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1920 case AV_PICTURE_TYPE_P:
1921 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1923 case AV_PICTURE_TYPE_B:
1924 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1926 case AV_PICTURE_TYPE_S:
1927 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1929 case AV_PICTURE_TYPE_SI:
1930 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1932 case AV_PICTURE_TYPE_SP:
1933 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1936 for (y = 0; y < s->mb_height; y++) {
1937 for (x = 0; x < s->mb_width; x++) {
1938 if (s->avctx->debug & FF_DEBUG_SKIP) {
1939 int count = s->mbskip_table[x + y * s->mb_stride];
1942 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1944 if (s->avctx->debug & FF_DEBUG_QP) {
1945 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1946 p->qscale_table[x + y * s->mb_stride]);
1948 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1949 int mb_type = p->mb_type[x + y * s->mb_stride];
1950 // Type & MV direction
1951 if (IS_PCM(mb_type))
1952 av_log(s->avctx, AV_LOG_DEBUG, "P");
1953 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1954 av_log(s->avctx, AV_LOG_DEBUG, "A");
1955 else if (IS_INTRA4x4(mb_type))
1956 av_log(s->avctx, AV_LOG_DEBUG, "i");
1957 else if (IS_INTRA16x16(mb_type))
1958 av_log(s->avctx, AV_LOG_DEBUG, "I");
1959 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1960 av_log(s->avctx, AV_LOG_DEBUG, "d");
1961 else if (IS_DIRECT(mb_type))
1962 av_log(s->avctx, AV_LOG_DEBUG, "D");
1963 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1964 av_log(s->avctx, AV_LOG_DEBUG, "g");
1965 else if (IS_GMC(mb_type))
1966 av_log(s->avctx, AV_LOG_DEBUG, "G");
1967 else if (IS_SKIP(mb_type))
1968 av_log(s->avctx, AV_LOG_DEBUG, "S");
1969 else if (!USES_LIST(mb_type, 1))
1970 av_log(s->avctx, AV_LOG_DEBUG, ">");
1971 else if (!USES_LIST(mb_type, 0))
1972 av_log(s->avctx, AV_LOG_DEBUG, "<");
1974 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1975 av_log(s->avctx, AV_LOG_DEBUG, "X");
1979 if (IS_8X8(mb_type))
1980 av_log(s->avctx, AV_LOG_DEBUG, "+");
1981 else if (IS_16X8(mb_type))
1982 av_log(s->avctx, AV_LOG_DEBUG, "-");
1983 else if (IS_8X16(mb_type))
1984 av_log(s->avctx, AV_LOG_DEBUG, "|");
1985 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1986 av_log(s->avctx, AV_LOG_DEBUG, " ");
1988 av_log(s->avctx, AV_LOG_DEBUG, "?");
1991 if (IS_INTERLACED(mb_type))
1992 av_log(s->avctx, AV_LOG_DEBUG, "=");
1994 av_log(s->avctx, AV_LOG_DEBUG, " ");
1997 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2003 * find the lowest MB row referenced in the MVs
2005 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2007 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2008 int my, off, i, mvs;
2010 if (s->picture_structure != PICT_FRAME || s->mcsel)
2013 switch (s->mv_type) {
2027 for (i = 0; i < mvs; i++) {
2028 my = s->mv[dir][i][1]<<qpel_shift;
2029 my_max = FFMAX(my_max, my);
2030 my_min = FFMIN(my_min, my);
2033 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2035 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2037 return s->mb_height-1;
2040 /* put block[] to dest[] */
2041 static inline void put_dct(MpegEncContext *s,
2042 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2044 s->dct_unquantize_intra(s, block, i, qscale);
2045 s->idsp.idct_put(dest, line_size, block);
2048 /* add block[] to dest[] */
2049 static inline void add_dct(MpegEncContext *s,
2050 int16_t *block, int i, uint8_t *dest, int line_size)
2052 if (s->block_last_index[i] >= 0) {
2053 s->idsp.idct_add(dest, line_size, block);
2057 static inline void add_dequant_dct(MpegEncContext *s,
2058 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2060 if (s->block_last_index[i] >= 0) {
2061 s->dct_unquantize_inter(s, block, i, qscale);
2063 s->idsp.idct_add(dest, line_size, block);
2068 * Clean dc, ac, coded_block for the current non-intra MB.
2070 void ff_clean_intra_table_entries(MpegEncContext *s)
2072 int wrap = s->b8_stride;
2073 int xy = s->block_index[0];
2076 s->dc_val[0][xy + 1 ] =
2077 s->dc_val[0][xy + wrap] =
2078 s->dc_val[0][xy + 1 + wrap] = 1024;
2080 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2081 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2082 if (s->msmpeg4_version>=3) {
2083 s->coded_block[xy ] =
2084 s->coded_block[xy + 1 ] =
2085 s->coded_block[xy + wrap] =
2086 s->coded_block[xy + 1 + wrap] = 0;
2089 wrap = s->mb_stride;
2090 xy = s->mb_x + s->mb_y * wrap;
2092 s->dc_val[2][xy] = 1024;
2094 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2095 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2097 s->mbintra_table[xy]= 0;
2100 /* generic function called after a macroblock has been parsed by the
2101 decoder or after it has been encoded by the encoder.
2103 Important variables used:
2104 s->mb_intra : true if intra macroblock
2105 s->mv_dir : motion vector direction
2106 s->mv_type : motion vector type
2107 s->mv : motion vector
2108 s->interlaced_dct : true if interlaced dct used (mpeg2)
2110 static av_always_inline
2111 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2114 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2117 FF_DISABLE_DEPRECATION_WARNINGS
2118 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2119 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2122 FF_ENABLE_DEPRECATION_WARNINGS
2123 #endif /* FF_API_XVMC */
2125 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2126 /* print DCT coefficients */
2128 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2130 for(j=0; j<64; j++){
2131 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2132 block[i][s->idsp.idct_permutation[j]]);
2134 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2138 s->current_picture.qscale_table[mb_xy] = s->qscale;
2140 /* update DC predictors for P macroblocks */
2142 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2143 if(s->mbintra_table[mb_xy])
2144 ff_clean_intra_table_entries(s);
2148 s->last_dc[2] = 128 << s->intra_dc_precision;
2151 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2152 s->mbintra_table[mb_xy]=1;
2154 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2155 uint8_t *dest_y, *dest_cb, *dest_cr;
2156 int dct_linesize, dct_offset;
2157 op_pixels_func (*op_pix)[4];
2158 qpel_mc_func (*op_qpix)[16];
2159 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2160 const int uvlinesize = s->current_picture.f->linesize[1];
2161 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2162 const int block_size = 8;
2164 /* avoid copy if macroblock skipped in last frame too */
2165 /* skip only during decoding as we might trash the buffers during encoding a bit */
2167 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2169 if (s->mb_skipped) {
2171 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2173 } else if(!s->current_picture.reference) {
2176 *mbskip_ptr = 0; /* not skipped */
2180 dct_linesize = linesize << s->interlaced_dct;
2181 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2185 dest_cb= s->dest[1];
2186 dest_cr= s->dest[2];
2188 dest_y = s->b_scratchpad;
2189 dest_cb= s->b_scratchpad+16*linesize;
2190 dest_cr= s->b_scratchpad+32*linesize;
2194 /* motion handling */
2195 /* decoding or more than one mb_type (MC was already done otherwise) */
2198 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2199 if (s->mv_dir & MV_DIR_FORWARD) {
2200 ff_thread_await_progress(&s->last_picture_ptr->tf,
2201 ff_MPV_lowest_referenced_row(s, 0),
2204 if (s->mv_dir & MV_DIR_BACKWARD) {
2205 ff_thread_await_progress(&s->next_picture_ptr->tf,
2206 ff_MPV_lowest_referenced_row(s, 1),
2211 op_qpix= s->me.qpel_put;
2212 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2213 op_pix = s->hdsp.put_pixels_tab;
2215 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2217 if (s->mv_dir & MV_DIR_FORWARD) {
2218 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2219 op_pix = s->hdsp.avg_pixels_tab;
2220 op_qpix= s->me.qpel_avg;
2222 if (s->mv_dir & MV_DIR_BACKWARD) {
2223 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2227 /* skip dequant / idct if we are really late ;) */
2228 if(s->avctx->skip_idct){
2229 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2230 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2231 || s->avctx->skip_idct >= AVDISCARD_ALL)
2235 /* add dct residue */
2236 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2237 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2238 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2239 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2240 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2241 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2243 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2244 if (s->chroma_y_shift){
2245 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2246 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2250 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2251 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2252 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2253 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2256 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2257 add_dct(s, block[0], 0, dest_y , dct_linesize);
2258 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2259 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2260 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2262 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2263 if(s->chroma_y_shift){//Chroma420
2264 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2265 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2268 dct_linesize = uvlinesize << s->interlaced_dct;
2269 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2271 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2272 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2273 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2274 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2275 if(!s->chroma_x_shift){//Chroma444
2276 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2277 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2278 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2279 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2284 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2285 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2288 /* dct only in intra block */
2289 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2290 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2291 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2292 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2293 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2295 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2296 if(s->chroma_y_shift){
2297 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2298 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2302 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2303 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2304 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2305 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2309 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2310 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2311 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2312 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2314 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2315 if(s->chroma_y_shift){
2316 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2317 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2320 dct_linesize = uvlinesize << s->interlaced_dct;
2321 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2323 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2324 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2325 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2326 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2327 if(!s->chroma_x_shift){//Chroma444
2328 s->idsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2329 s->idsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2330 s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2331 s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2339 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2340 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2341 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2346 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2348 if(s->out_format == FMT_MPEG1) {
2349 MPV_decode_mb_internal(s, block, 1);
2352 MPV_decode_mb_internal(s, block, 0);
2355 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2357 ff_draw_horiz_band(s->avctx, s->current_picture.f,
2358 s->last_picture.f, y, h, s->picture_structure,
2359 s->first_field, s->low_delay);
2362 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2363 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2364 const int uvlinesize = s->current_picture.f->linesize[1];
2365 const int mb_size= 4;
2367 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2368 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2369 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2370 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2371 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2372 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2373 //block_index is not used by mpeg2, so it is not affected by chroma_format
2375 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
2376 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2377 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2379 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2381 if(s->picture_structure==PICT_FRAME){
2382 s->dest[0] += s->mb_y * linesize << mb_size;
2383 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2384 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2386 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2387 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2388 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2389 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2395 * Permute an 8x8 block.
2396 * @param block the block which will be permuted according to the given permutation vector
2397 * @param permutation the permutation vector
2398 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2399 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2400 * (inverse) permutated to scantable order!
2402 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2408 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2410 for(i=0; i<=last; i++){
2411 const int j= scantable[i];
2416 for(i=0; i<=last; i++){
2417 const int j= scantable[i];
2418 const int perm_j= permutation[j];
2419 block[perm_j]= temp[j];
2423 void ff_mpeg_flush(AVCodecContext *avctx){
2425 MpegEncContext *s = avctx->priv_data;
2427 if(s==NULL || s->picture==NULL)
2430 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2431 ff_mpeg_unref_picture(s, &s->picture[i]);
2432 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2434 ff_mpeg_unref_picture(s, &s->current_picture);
2435 ff_mpeg_unref_picture(s, &s->last_picture);
2436 ff_mpeg_unref_picture(s, &s->next_picture);
2438 s->mb_x= s->mb_y= 0;
2440 s->parse_context.state= -1;
2441 s->parse_context.frame_start_found= 0;
2442 s->parse_context.overread= 0;
2443 s->parse_context.overread_index= 0;
2444 s->parse_context.index= 0;
2445 s->parse_context.last_index= 0;
2446 s->bitstream_buffer_size=0;
2451 * set qscale and update qscale dependent variables.
2453 void ff_set_qscale(MpegEncContext * s, int qscale)
2457 else if (qscale > 31)
2461 s->chroma_qscale= s->chroma_qscale_table[qscale];
2463 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2464 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2467 void ff_MPV_report_decode_progress(MpegEncContext *s)
2469 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2470 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);