2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
45 #include "xvmc_internal.h"
49 static const uint8_t ff_default_chroma_qscale_table[32] = {
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
55 const uint8_t ff_mpeg1_dc_scale_table[128] = {
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const uint8_t ff_alternate_horizontal_scan[64] = {
111 0, 1, 2, 3, 8, 9, 16, 17,
112 10, 11, 4, 5, 6, 7, 15, 14,
113 13, 12, 19, 18, 24, 25, 32, 33,
114 26, 27, 20, 21, 22, 23, 28, 29,
115 30, 31, 34, 35, 40, 41, 48, 49,
116 42, 43, 36, 37, 38, 39, 44, 45,
117 46, 47, 50, 51, 56, 57, 58, 59,
118 52, 53, 54, 55, 60, 61, 62, 63,
121 const uint8_t ff_alternate_vertical_scan[64] = {
122 0, 8, 16, 24, 1, 9, 2, 10,
123 17, 25, 32, 40, 48, 56, 57, 49,
124 41, 33, 26, 18, 3, 11, 4, 12,
125 19, 27, 34, 42, 50, 58, 35, 43,
126 51, 59, 20, 28, 5, 13, 6, 14,
127 21, 29, 36, 44, 52, 60, 37, 45,
128 53, 61, 22, 30, 7, 15, 23, 31,
129 38, 46, 54, 62, 39, 47, 55, 63,
132 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133 int16_t *block, int n, int qscale)
135 int i, level, nCoeffs;
136 const uint16_t *quant_matrix;
138 nCoeffs= s->block_last_index[n];
141 block[0] = block[0] * s->y_dc_scale;
143 block[0] = block[0] * s->c_dc_scale;
144 /* XXX: only mpeg1 */
145 quant_matrix = s->intra_matrix;
146 for(i=1;i<=nCoeffs;i++) {
147 int j= s->intra_scantable.permutated[i];
152 level = (int)(level * qscale * quant_matrix[j]) >> 3;
153 level = (level - 1) | 1;
156 level = (int)(level * qscale * quant_matrix[j]) >> 3;
157 level = (level - 1) | 1;
164 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
165 int16_t *block, int n, int qscale)
167 int i, level, nCoeffs;
168 const uint16_t *quant_matrix;
170 nCoeffs= s->block_last_index[n];
172 quant_matrix = s->inter_matrix;
173 for(i=0; i<=nCoeffs; i++) {
174 int j= s->intra_scantable.permutated[i];
179 level = (((level << 1) + 1) * qscale *
180 ((int) (quant_matrix[j]))) >> 4;
181 level = (level - 1) | 1;
184 level = (((level << 1) + 1) * qscale *
185 ((int) (quant_matrix[j]))) >> 4;
186 level = (level - 1) | 1;
193 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
194 int16_t *block, int n, int qscale)
196 int i, level, nCoeffs;
197 const uint16_t *quant_matrix;
199 if(s->alternate_scan) nCoeffs= 63;
200 else nCoeffs= s->block_last_index[n];
203 block[0] = block[0] * s->y_dc_scale;
205 block[0] = block[0] * s->c_dc_scale;
206 quant_matrix = s->intra_matrix;
207 for(i=1;i<=nCoeffs;i++) {
208 int j= s->intra_scantable.permutated[i];
213 level = (int)(level * qscale * quant_matrix[j]) >> 3;
216 level = (int)(level * qscale * quant_matrix[j]) >> 3;
223 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
224 int16_t *block, int n, int qscale)
226 int i, level, nCoeffs;
227 const uint16_t *quant_matrix;
230 if(s->alternate_scan) nCoeffs= 63;
231 else nCoeffs= s->block_last_index[n];
234 block[0] = block[0] * s->y_dc_scale;
236 block[0] = block[0] * s->c_dc_scale;
237 quant_matrix = s->intra_matrix;
238 for(i=1;i<=nCoeffs;i++) {
239 int j= s->intra_scantable.permutated[i];
244 level = (int)(level * qscale * quant_matrix[j]) >> 3;
247 level = (int)(level * qscale * quant_matrix[j]) >> 3;
256 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
257 int16_t *block, int n, int qscale)
259 int i, level, nCoeffs;
260 const uint16_t *quant_matrix;
263 if(s->alternate_scan) nCoeffs= 63;
264 else nCoeffs= s->block_last_index[n];
266 quant_matrix = s->inter_matrix;
267 for(i=0; i<=nCoeffs; i++) {
268 int j= s->intra_scantable.permutated[i];
273 level = (((level << 1) + 1) * qscale *
274 ((int) (quant_matrix[j]))) >> 4;
277 level = (((level << 1) + 1) * qscale *
278 ((int) (quant_matrix[j]))) >> 4;
287 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
288 int16_t *block, int n, int qscale)
290 int i, level, qmul, qadd;
293 assert(s->block_last_index[n]>=0);
299 block[0] = block[0] * s->y_dc_scale;
301 block[0] = block[0] * s->c_dc_scale;
302 qadd = (qscale - 1) | 1;
309 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
311 for(i=1; i<=nCoeffs; i++) {
315 level = level * qmul - qadd;
317 level = level * qmul + qadd;
324 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
325 int16_t *block, int n, int qscale)
327 int i, level, qmul, qadd;
330 assert(s->block_last_index[n]>=0);
332 qadd = (qscale - 1) | 1;
335 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
337 for(i=0; i<=nCoeffs; i++) {
341 level = level * qmul - qadd;
343 level = level * qmul + qadd;
350 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
352 int mb_x, int mb_y, int mb_intra, int mb_skipped)
354 MpegEncContext *s = opaque;
357 s->mv_type = mv_type;
358 s->mb_intra = mb_intra;
359 s->mb_skipped = mb_skipped;
362 memcpy(s->mv, mv, sizeof(*mv));
364 ff_init_block_index(s);
365 ff_update_block_index(s);
367 s->bdsp.clear_blocks(s->block[0]);
369 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
370 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
371 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
374 ff_MPV_decode_mb(s, s->block);
377 /* init common dct for both encoder and decoder */
378 av_cold int ff_dct_common_init(MpegEncContext *s)
380 ff_blockdsp_init(&s->bdsp, s->avctx);
381 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
382 ff_idctdsp_init(&s->idsp, s->avctx);
383 ff_me_cmp_init(&s->mecc, s->avctx);
384 ff_mpegvideodsp_init(&s->mdsp);
385 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
387 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
388 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
389 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
390 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
391 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
392 if (s->flags & CODEC_FLAG_BITEXACT)
393 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
394 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
396 if (HAVE_INTRINSICS_NEON)
397 ff_MPV_common_init_neon(s);
400 ff_MPV_common_init_arm(s);
402 ff_MPV_common_init_ppc(s);
404 ff_MPV_common_init_x86(s);
406 /* load & permutate scantables
407 * note: only wmv uses different ones
409 if (s->alternate_scan) {
410 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
411 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
413 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
414 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
416 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
417 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
422 static int frame_size_alloc(MpegEncContext *s, int linesize)
424 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
426 // edge emu needs blocksize + filter length - 1
427 // (= 17x17 for halfpel / 21x21 for h264)
428 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
429 // at uvlinesize. It supports only YUV420 so 24x24 is enough
430 // linesize * interlaced * MBsize
431 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
434 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
436 s->me.temp = s->me.scratchpad;
437 s->rd_scratchpad = s->me.scratchpad;
438 s->b_scratchpad = s->me.scratchpad;
439 s->obmc_scratchpad = s->me.scratchpad + 16;
443 av_freep(&s->edge_emu_buffer);
444 return AVERROR(ENOMEM);
448 * Allocate a frame buffer
450 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
452 int edges_needed = av_codec_is_encoder(s->avctx->codec);
456 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
457 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
458 s->codec_id != AV_CODEC_ID_MSS2) {
460 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
461 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
464 r = ff_thread_get_buffer(s->avctx, &pic->tf,
465 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
467 pic->f->width = s->avctx->width;
468 pic->f->height = s->avctx->height;
469 pic->f->format = s->avctx->pix_fmt;
470 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
473 if (r < 0 || !pic->f->buf[0]) {
474 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
481 for (i = 0; pic->f->data[i]; i++) {
482 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
483 pic->f->linesize[i] +
484 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
485 pic->f->data[i] += offset;
487 pic->f->width = s->avctx->width;
488 pic->f->height = s->avctx->height;
491 if (s->avctx->hwaccel) {
492 assert(!pic->hwaccel_picture_private);
493 if (s->avctx->hwaccel->frame_priv_data_size) {
494 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
495 if (!pic->hwaccel_priv_buf) {
496 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
499 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
503 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
504 s->uvlinesize != pic->f->linesize[1])) {
505 av_log(s->avctx, AV_LOG_ERROR,
506 "get_buffer() failed (stride changed)\n");
507 ff_mpeg_unref_picture(s, pic);
511 if (pic->f->linesize[1] != pic->f->linesize[2]) {
512 av_log(s->avctx, AV_LOG_ERROR,
513 "get_buffer() failed (uv stride mismatch)\n");
514 ff_mpeg_unref_picture(s, pic);
518 if (!s->edge_emu_buffer &&
519 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
520 av_log(s->avctx, AV_LOG_ERROR,
521 "get_buffer() failed to allocate context scratch buffers.\n");
522 ff_mpeg_unref_picture(s, pic);
529 void ff_free_picture_tables(Picture *pic)
533 av_buffer_unref(&pic->mb_var_buf);
534 av_buffer_unref(&pic->mc_mb_var_buf);
535 av_buffer_unref(&pic->mb_mean_buf);
536 av_buffer_unref(&pic->mbskip_table_buf);
537 av_buffer_unref(&pic->qscale_table_buf);
538 av_buffer_unref(&pic->mb_type_buf);
540 for (i = 0; i < 2; i++) {
541 av_buffer_unref(&pic->motion_val_buf[i]);
542 av_buffer_unref(&pic->ref_index_buf[i]);
546 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
548 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
549 const int mb_array_size = s->mb_stride * s->mb_height;
550 const int b8_array_size = s->b8_stride * s->mb_height * 2;
554 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
555 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
556 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
558 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
559 return AVERROR(ENOMEM);
562 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
563 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
564 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
565 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
566 return AVERROR(ENOMEM);
569 if (s->out_format == FMT_H263 || s->encoding) {
570 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
571 int ref_index_size = 4 * mb_array_size;
573 for (i = 0; mv_size && i < 2; i++) {
574 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
575 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
576 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
577 return AVERROR(ENOMEM);
584 static int make_tables_writable(Picture *pic)
587 #define MAKE_WRITABLE(table) \
590 (ret = av_buffer_make_writable(&pic->table)) < 0)\
594 MAKE_WRITABLE(mb_var_buf);
595 MAKE_WRITABLE(mc_mb_var_buf);
596 MAKE_WRITABLE(mb_mean_buf);
597 MAKE_WRITABLE(mbskip_table_buf);
598 MAKE_WRITABLE(qscale_table_buf);
599 MAKE_WRITABLE(mb_type_buf);
601 for (i = 0; i < 2; i++) {
602 MAKE_WRITABLE(motion_val_buf[i]);
603 MAKE_WRITABLE(ref_index_buf[i]);
610 * Allocate a Picture.
611 * The pixels are allocated/set by calling get_buffer() if shared = 0
613 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
618 assert(pic->f->data[0]);
621 assert(!pic->f->buf[0]);
623 if (alloc_frame_buffer(s, pic) < 0)
626 s->linesize = pic->f->linesize[0];
627 s->uvlinesize = pic->f->linesize[1];
630 if (!pic->qscale_table_buf)
631 ret = alloc_picture_tables(s, pic);
633 ret = make_tables_writable(pic);
638 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
639 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
640 pic->mb_mean = pic->mb_mean_buf->data;
643 pic->mbskip_table = pic->mbskip_table_buf->data;
644 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
645 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
647 if (pic->motion_val_buf[0]) {
648 for (i = 0; i < 2; i++) {
649 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
650 pic->ref_index[i] = pic->ref_index_buf[i]->data;
656 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
657 ff_mpeg_unref_picture(s, pic);
658 ff_free_picture_tables(pic);
659 return AVERROR(ENOMEM);
663 * Deallocate a picture.
665 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
667 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
670 /* WM Image / Screen codecs allocate internal buffers with different
671 * dimensions / colorspaces; ignore user-defined callbacks for these. */
672 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
673 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
674 s->codec_id != AV_CODEC_ID_MSS2)
675 ff_thread_release_buffer(s->avctx, &pic->tf);
677 av_frame_unref(pic->f);
679 av_buffer_unref(&pic->hwaccel_priv_buf);
681 if (pic->needs_realloc)
682 ff_free_picture_tables(pic);
684 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
687 static int update_picture_tables(Picture *dst, Picture *src)
691 #define UPDATE_TABLE(table)\
694 (!dst->table || dst->table->buffer != src->table->buffer)) {\
695 av_buffer_unref(&dst->table);\
696 dst->table = av_buffer_ref(src->table);\
698 ff_free_picture_tables(dst);\
699 return AVERROR(ENOMEM);\
704 UPDATE_TABLE(mb_var_buf);
705 UPDATE_TABLE(mc_mb_var_buf);
706 UPDATE_TABLE(mb_mean_buf);
707 UPDATE_TABLE(mbskip_table_buf);
708 UPDATE_TABLE(qscale_table_buf);
709 UPDATE_TABLE(mb_type_buf);
710 for (i = 0; i < 2; i++) {
711 UPDATE_TABLE(motion_val_buf[i]);
712 UPDATE_TABLE(ref_index_buf[i]);
715 dst->mb_var = src->mb_var;
716 dst->mc_mb_var = src->mc_mb_var;
717 dst->mb_mean = src->mb_mean;
718 dst->mbskip_table = src->mbskip_table;
719 dst->qscale_table = src->qscale_table;
720 dst->mb_type = src->mb_type;
721 for (i = 0; i < 2; i++) {
722 dst->motion_val[i] = src->motion_val[i];
723 dst->ref_index[i] = src->ref_index[i];
729 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
733 av_assert0(!dst->f->buf[0]);
734 av_assert0(src->f->buf[0]);
738 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
742 ret = update_picture_tables(dst, src);
746 if (src->hwaccel_picture_private) {
747 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
748 if (!dst->hwaccel_priv_buf)
750 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
753 dst->field_picture = src->field_picture;
754 dst->mb_var_sum = src->mb_var_sum;
755 dst->mc_mb_var_sum = src->mc_mb_var_sum;
756 dst->b_frame_score = src->b_frame_score;
757 dst->needs_realloc = src->needs_realloc;
758 dst->reference = src->reference;
759 dst->shared = src->shared;
763 ff_mpeg_unref_picture(s, dst);
767 static void exchange_uv(MpegEncContext *s)
772 s->pblocks[4] = s->pblocks[5];
776 static int init_duplicate_context(MpegEncContext *s)
778 int y_size = s->b8_stride * (2 * s->mb_height + 1);
779 int c_size = s->mb_stride * (s->mb_height + 1);
780 int yc_size = y_size + 2 * c_size;
788 s->obmc_scratchpad = NULL;
791 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
792 ME_MAP_SIZE * sizeof(uint32_t), fail)
793 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
794 ME_MAP_SIZE * sizeof(uint32_t), fail)
795 if (s->avctx->noise_reduction) {
796 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
797 2 * 64 * sizeof(int), fail)
800 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
801 s->block = s->blocks[0];
803 for (i = 0; i < 12; i++) {
804 s->pblocks[i] = &s->block[i];
806 if (s->avctx->codec_tag == AV_RL32("VCR2"))
809 if (s->out_format == FMT_H263) {
811 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
812 yc_size * sizeof(int16_t) * 16, fail);
813 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
814 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
815 s->ac_val[2] = s->ac_val[1] + c_size;
820 return -1; // free() through ff_MPV_common_end()
823 static void free_duplicate_context(MpegEncContext *s)
828 av_freep(&s->edge_emu_buffer);
829 av_freep(&s->me.scratchpad);
833 s->obmc_scratchpad = NULL;
835 av_freep(&s->dct_error_sum);
836 av_freep(&s->me.map);
837 av_freep(&s->me.score_map);
838 av_freep(&s->blocks);
839 av_freep(&s->ac_val_base);
843 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
845 #define COPY(a) bak->a = src->a
846 COPY(edge_emu_buffer);
851 COPY(obmc_scratchpad);
858 COPY(me.map_generation);
870 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
874 // FIXME copy only needed parts
876 backup_duplicate_context(&bak, dst);
877 memcpy(dst, src, sizeof(MpegEncContext));
878 backup_duplicate_context(dst, &bak);
879 for (i = 0; i < 12; i++) {
880 dst->pblocks[i] = &dst->block[i];
882 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
884 if (!dst->edge_emu_buffer &&
885 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
886 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
887 "scratch buffers.\n");
890 // STOP_TIMER("update_duplicate_context")
891 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
895 int ff_mpeg_update_thread_context(AVCodecContext *dst,
896 const AVCodecContext *src)
899 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
901 if (dst == src || !s1->context_initialized)
904 // FIXME can parameters change on I-frames?
905 // in that case dst may need a reinit
906 if (!s->context_initialized) {
907 memcpy(s, s1, sizeof(MpegEncContext));
910 s->bitstream_buffer = NULL;
911 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
913 ff_MPV_common_init(s);
916 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
918 s->context_reinit = 0;
919 s->height = s1->height;
920 s->width = s1->width;
921 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
925 s->avctx->coded_height = s1->avctx->coded_height;
926 s->avctx->coded_width = s1->avctx->coded_width;
927 s->avctx->width = s1->avctx->width;
928 s->avctx->height = s1->avctx->height;
930 s->coded_picture_number = s1->coded_picture_number;
931 s->picture_number = s1->picture_number;
933 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
934 ff_mpeg_unref_picture(s, &s->picture[i]);
935 if (s1->picture[i].f->buf[0] &&
936 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
940 #define UPDATE_PICTURE(pic)\
942 ff_mpeg_unref_picture(s, &s->pic);\
943 if (s1->pic.f->buf[0])\
944 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
946 ret = update_picture_tables(&s->pic, &s1->pic);\
951 UPDATE_PICTURE(current_picture);
952 UPDATE_PICTURE(last_picture);
953 UPDATE_PICTURE(next_picture);
955 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
956 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
957 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
959 // Error/bug resilience
960 s->next_p_frame_damaged = s1->next_p_frame_damaged;
961 s->workaround_bugs = s1->workaround_bugs;
964 memcpy(&s->last_time_base, &s1->last_time_base,
965 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
966 (char *) &s1->last_time_base);
969 s->max_b_frames = s1->max_b_frames;
970 s->low_delay = s1->low_delay;
971 s->droppable = s1->droppable;
973 // DivX handling (doesn't work)
974 s->divx_packed = s1->divx_packed;
976 if (s1->bitstream_buffer) {
977 if (s1->bitstream_buffer_size +
978 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
979 av_fast_malloc(&s->bitstream_buffer,
980 &s->allocated_bitstream_buffer_size,
981 s1->allocated_bitstream_buffer_size);
982 s->bitstream_buffer_size = s1->bitstream_buffer_size;
983 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
984 s1->bitstream_buffer_size);
985 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
986 FF_INPUT_BUFFER_PADDING_SIZE);
989 // linesize dependend scratch buffer allocation
990 if (!s->edge_emu_buffer)
992 if (frame_size_alloc(s, s1->linesize) < 0) {
993 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
994 "scratch buffers.\n");
995 return AVERROR(ENOMEM);
998 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
999 "be allocated due to unknown size.\n");
1003 // MPEG2/interlacing info
1004 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1005 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1007 if (!s1->first_field) {
1008 s->last_pict_type = s1->pict_type;
1009 if (s1->current_picture_ptr)
1010 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1017 * Set the given MpegEncContext to common defaults
1018 * (same for encoding and decoding).
1019 * The changed fields will not depend upon the
1020 * prior state of the MpegEncContext.
1022 void ff_MPV_common_defaults(MpegEncContext *s)
1024 s->y_dc_scale_table =
1025 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1026 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1027 s->progressive_frame = 1;
1028 s->progressive_sequence = 1;
1029 s->picture_structure = PICT_FRAME;
1031 s->coded_picture_number = 0;
1032 s->picture_number = 0;
1037 s->slice_context_count = 1;
1041 * Set the given MpegEncContext to defaults for decoding.
1042 * the changed fields will not depend upon
1043 * the prior state of the MpegEncContext.
1045 void ff_MPV_decode_defaults(MpegEncContext *s)
1047 ff_MPV_common_defaults(s);
1050 static int init_er(MpegEncContext *s)
1052 ERContext *er = &s->er;
1053 int mb_array_size = s->mb_height * s->mb_stride;
1056 er->avctx = s->avctx;
1057 er->mecc = &s->mecc;
1059 er->mb_index2xy = s->mb_index2xy;
1060 er->mb_num = s->mb_num;
1061 er->mb_width = s->mb_width;
1062 er->mb_height = s->mb_height;
1063 er->mb_stride = s->mb_stride;
1064 er->b8_stride = s->b8_stride;
1066 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1067 er->error_status_table = av_mallocz(mb_array_size);
1068 if (!er->er_temp_buffer || !er->error_status_table)
1071 er->mbskip_table = s->mbskip_table;
1072 er->mbintra_table = s->mbintra_table;
1074 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1075 er->dc_val[i] = s->dc_val[i];
1077 er->decode_mb = mpeg_er_decode_mb;
1082 av_freep(&er->er_temp_buffer);
1083 av_freep(&er->error_status_table);
1084 return AVERROR(ENOMEM);
1088 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1090 static int init_context_frame(MpegEncContext *s)
1092 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1094 s->mb_width = (s->width + 15) / 16;
1095 s->mb_stride = s->mb_width + 1;
1096 s->b8_stride = s->mb_width * 2 + 1;
1097 mb_array_size = s->mb_height * s->mb_stride;
1098 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1100 /* set default edge pos, will be overriden
1101 * in decode_header if needed */
1102 s->h_edge_pos = s->mb_width * 16;
1103 s->v_edge_pos = s->mb_height * 16;
1105 s->mb_num = s->mb_width * s->mb_height;
1110 s->block_wrap[3] = s->b8_stride;
1112 s->block_wrap[5] = s->mb_stride;
1114 y_size = s->b8_stride * (2 * s->mb_height + 1);
1115 c_size = s->mb_stride * (s->mb_height + 1);
1116 yc_size = y_size + 2 * c_size;
1118 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
1119 fail); // error ressilience code looks cleaner with this
1120 for (y = 0; y < s->mb_height; y++)
1121 for (x = 0; x < s->mb_width; x++)
1122 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1124 s->mb_index2xy[s->mb_height * s->mb_width] =
1125 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1128 /* Allocate MV tables */
1129 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
1130 mv_table_size * 2 * sizeof(int16_t), fail);
1131 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
1132 mv_table_size * 2 * sizeof(int16_t), fail);
1133 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
1134 mv_table_size * 2 * sizeof(int16_t), fail);
1135 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
1136 mv_table_size * 2 * sizeof(int16_t), fail);
1137 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
1138 mv_table_size * 2 * sizeof(int16_t), fail);
1139 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
1140 mv_table_size * 2 * sizeof(int16_t), fail);
1141 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1142 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1143 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1144 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
1146 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
1148 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1150 /* Allocate MB type table */
1151 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
1152 sizeof(uint16_t), fail); // needed for encoding
1154 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
1157 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1158 mb_array_size * sizeof(float), fail);
1159 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1160 mb_array_size * sizeof(float), fail);
1164 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1165 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1166 /* interlaced direct mode decoding tables */
1167 for (i = 0; i < 2; i++) {
1169 for (j = 0; j < 2; j++) {
1170 for (k = 0; k < 2; k++) {
1171 FF_ALLOCZ_OR_GOTO(s->avctx,
1172 s->b_field_mv_table_base[i][j][k],
1173 mv_table_size * 2 * sizeof(int16_t),
1175 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1178 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
1179 mb_array_size * 2 * sizeof(uint8_t), fail);
1180 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
1181 mv_table_size * 2 * sizeof(int16_t), fail);
1182 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
1185 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
1186 mb_array_size * 2 * sizeof(uint8_t), fail);
1189 if (s->out_format == FMT_H263) {
1191 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
1192 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1194 /* cbp, ac_pred, pred_dir */
1195 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
1196 mb_array_size * sizeof(uint8_t), fail);
1197 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
1198 mb_array_size * sizeof(uint8_t), fail);
1201 if (s->h263_pred || s->h263_plus || !s->encoding) {
1203 // MN: we need these for error resilience of intra-frames
1204 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
1205 yc_size * sizeof(int16_t), fail);
1206 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1207 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1208 s->dc_val[2] = s->dc_val[1] + c_size;
1209 for (i = 0; i < yc_size; i++)
1210 s->dc_val_base[i] = 1024;
1213 /* which mb is a intra block */
1214 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1215 memset(s->mbintra_table, 1, mb_array_size);
1217 /* init macroblock skip table */
1218 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1219 // Note the + 1 is for a quicker mpeg4 slice_end detection
1223 return AVERROR(ENOMEM);
1227 * init common structure for both encoder and decoder.
1228 * this assumes that some variables like width/height are already set
1230 av_cold int ff_MPV_common_init(MpegEncContext *s)
1233 int nb_slices = (HAVE_THREADS &&
1234 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1235 s->avctx->thread_count : 1;
1237 if (s->encoding && s->avctx->slices)
1238 nb_slices = s->avctx->slices;
1240 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1241 s->mb_height = (s->height + 31) / 32 * 2;
1243 s->mb_height = (s->height + 15) / 16;
1245 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1246 av_log(s->avctx, AV_LOG_ERROR,
1247 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1251 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1254 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1256 max_slices = MAX_THREADS;
1257 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1258 " reducing to %d\n", nb_slices, max_slices);
1259 nb_slices = max_slices;
1262 if ((s->width || s->height) &&
1263 av_image_check_size(s->width, s->height, 0, s->avctx))
1266 ff_dct_common_init(s);
1268 s->flags = s->avctx->flags;
1269 s->flags2 = s->avctx->flags2;
1271 /* set chroma shifts */
1272 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1274 &s->chroma_y_shift);
1276 /* convert fourcc to upper case */
1277 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1279 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1281 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1282 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1283 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1284 s->picture[i].f = av_frame_alloc();
1285 if (!s->picture[i].f)
1288 memset(&s->next_picture, 0, sizeof(s->next_picture));
1289 memset(&s->last_picture, 0, sizeof(s->last_picture));
1290 memset(&s->current_picture, 0, sizeof(s->current_picture));
1291 memset(&s->new_picture, 0, sizeof(s->new_picture));
1292 s->next_picture.f = av_frame_alloc();
1293 if (!s->next_picture.f)
1295 s->last_picture.f = av_frame_alloc();
1296 if (!s->last_picture.f)
1298 s->current_picture.f = av_frame_alloc();
1299 if (!s->current_picture.f)
1301 s->new_picture.f = av_frame_alloc();
1302 if (!s->new_picture.f)
1305 if (s->width && s->height) {
1306 if (init_context_frame(s))
1309 s->parse_context.state = -1;
1312 s->context_initialized = 1;
1313 s->thread_context[0] = s;
1315 if (s->width && s->height) {
1316 if (nb_slices > 1) {
1317 for (i = 1; i < nb_slices; i++) {
1318 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1319 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1322 for (i = 0; i < nb_slices; i++) {
1323 if (init_duplicate_context(s->thread_context[i]) < 0)
1325 s->thread_context[i]->start_mb_y =
1326 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1327 s->thread_context[i]->end_mb_y =
1328 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1331 if (init_duplicate_context(s) < 0)
1334 s->end_mb_y = s->mb_height;
1336 s->slice_context_count = nb_slices;
1341 ff_MPV_common_end(s);
1346 * Frees and resets MpegEncContext fields depending on the resolution.
1347 * Is used during resolution changes to avoid a full reinitialization of the
1350 static int free_context_frame(MpegEncContext *s)
1354 av_freep(&s->mb_type);
1355 av_freep(&s->p_mv_table_base);
1356 av_freep(&s->b_forw_mv_table_base);
1357 av_freep(&s->b_back_mv_table_base);
1358 av_freep(&s->b_bidir_forw_mv_table_base);
1359 av_freep(&s->b_bidir_back_mv_table_base);
1360 av_freep(&s->b_direct_mv_table_base);
1361 s->p_mv_table = NULL;
1362 s->b_forw_mv_table = NULL;
1363 s->b_back_mv_table = NULL;
1364 s->b_bidir_forw_mv_table = NULL;
1365 s->b_bidir_back_mv_table = NULL;
1366 s->b_direct_mv_table = NULL;
1367 for (i = 0; i < 2; i++) {
1368 for (j = 0; j < 2; j++) {
1369 for (k = 0; k < 2; k++) {
1370 av_freep(&s->b_field_mv_table_base[i][j][k]);
1371 s->b_field_mv_table[i][j][k] = NULL;
1373 av_freep(&s->b_field_select_table[i][j]);
1374 av_freep(&s->p_field_mv_table_base[i][j]);
1375 s->p_field_mv_table[i][j] = NULL;
1377 av_freep(&s->p_field_select_table[i]);
1380 av_freep(&s->dc_val_base);
1381 av_freep(&s->coded_block_base);
1382 av_freep(&s->mbintra_table);
1383 av_freep(&s->cbp_table);
1384 av_freep(&s->pred_dir_table);
1386 av_freep(&s->mbskip_table);
1388 av_freep(&s->er.error_status_table);
1389 av_freep(&s->er.er_temp_buffer);
1390 av_freep(&s->mb_index2xy);
1391 av_freep(&s->lambda_table);
1392 av_freep(&s->cplx_tab);
1393 av_freep(&s->bits_tab);
1395 s->linesize = s->uvlinesize = 0;
1400 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1404 if (s->slice_context_count > 1) {
1405 for (i = 0; i < s->slice_context_count; i++) {
1406 free_duplicate_context(s->thread_context[i]);
1408 for (i = 1; i < s->slice_context_count; i++) {
1409 av_freep(&s->thread_context[i]);
1412 free_duplicate_context(s);
1414 if ((err = free_context_frame(s)) < 0)
1418 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1419 s->picture[i].needs_realloc = 1;
1422 s->last_picture_ptr =
1423 s->next_picture_ptr =
1424 s->current_picture_ptr = NULL;
1427 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1428 s->mb_height = (s->height + 31) / 32 * 2;
1430 s->mb_height = (s->height + 15) / 16;
1432 if ((s->width || s->height) &&
1433 av_image_check_size(s->width, s->height, 0, s->avctx))
1434 return AVERROR_INVALIDDATA;
1436 if ((err = init_context_frame(s)))
1439 s->thread_context[0] = s;
1441 if (s->width && s->height) {
1442 int nb_slices = s->slice_context_count;
1443 if (nb_slices > 1) {
1444 for (i = 1; i < nb_slices; i++) {
1445 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1446 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1449 for (i = 0; i < nb_slices; i++) {
1450 if (init_duplicate_context(s->thread_context[i]) < 0)
1452 s->thread_context[i]->start_mb_y =
1453 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1454 s->thread_context[i]->end_mb_y =
1455 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1458 if (init_duplicate_context(s) < 0)
1461 s->end_mb_y = s->mb_height;
1463 s->slice_context_count = nb_slices;
1468 ff_MPV_common_end(s);
1472 /* init common structure for both encoder and decoder */
1473 void ff_MPV_common_end(MpegEncContext *s)
1477 if (s->slice_context_count > 1) {
1478 for (i = 0; i < s->slice_context_count; i++) {
1479 free_duplicate_context(s->thread_context[i]);
1481 for (i = 1; i < s->slice_context_count; i++) {
1482 av_freep(&s->thread_context[i]);
1484 s->slice_context_count = 1;
1485 } else free_duplicate_context(s);
1487 av_freep(&s->parse_context.buffer);
1488 s->parse_context.buffer_size = 0;
1490 av_freep(&s->bitstream_buffer);
1491 s->allocated_bitstream_buffer_size = 0;
1494 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1495 ff_free_picture_tables(&s->picture[i]);
1496 ff_mpeg_unref_picture(s, &s->picture[i]);
1497 av_frame_free(&s->picture[i].f);
1500 av_freep(&s->picture);
1501 ff_free_picture_tables(&s->last_picture);
1502 ff_mpeg_unref_picture(s, &s->last_picture);
1503 av_frame_free(&s->last_picture.f);
1504 ff_free_picture_tables(&s->current_picture);
1505 ff_mpeg_unref_picture(s, &s->current_picture);
1506 av_frame_free(&s->current_picture.f);
1507 ff_free_picture_tables(&s->next_picture);
1508 ff_mpeg_unref_picture(s, &s->next_picture);
1509 av_frame_free(&s->next_picture.f);
1510 ff_free_picture_tables(&s->new_picture);
1511 ff_mpeg_unref_picture(s, &s->new_picture);
1512 av_frame_free(&s->new_picture.f);
1514 free_context_frame(s);
1516 s->context_initialized = 0;
1517 s->last_picture_ptr =
1518 s->next_picture_ptr =
1519 s->current_picture_ptr = NULL;
1520 s->linesize = s->uvlinesize = 0;
1523 av_cold void ff_init_rl(RLTable *rl,
1524 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1526 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1527 uint8_t index_run[MAX_RUN + 1];
1528 int last, run, level, start, end, i;
1530 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1531 if (static_store && rl->max_level[0])
1534 /* compute max_level[], max_run[] and index_run[] */
1535 for (last = 0; last < 2; last++) {
1544 memset(max_level, 0, MAX_RUN + 1);
1545 memset(max_run, 0, MAX_LEVEL + 1);
1546 memset(index_run, rl->n, MAX_RUN + 1);
1547 for (i = start; i < end; i++) {
1548 run = rl->table_run[i];
1549 level = rl->table_level[i];
1550 if (index_run[run] == rl->n)
1552 if (level > max_level[run])
1553 max_level[run] = level;
1554 if (run > max_run[level])
1555 max_run[level] = run;
1558 rl->max_level[last] = static_store[last];
1560 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1561 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1563 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1565 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1566 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1568 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1570 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1571 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1575 av_cold void ff_init_vlc_rl(RLTable *rl)
1579 for (q = 0; q < 32; q++) {
1581 int qadd = (q - 1) | 1;
1587 for (i = 0; i < rl->vlc.table_size; i++) {
1588 int code = rl->vlc.table[i][0];
1589 int len = rl->vlc.table[i][1];
1592 if (len == 0) { // illegal code
1595 } else if (len < 0) { // more bits needed
1599 if (code == rl->n) { // esc
1603 run = rl->table_run[code] + 1;
1604 level = rl->table_level[code] * qmul + qadd;
1605 if (code >= rl->last) run += 192;
1608 rl->rl_vlc[q][i].len = len;
1609 rl->rl_vlc[q][i].level = level;
1610 rl->rl_vlc[q][i].run = run;
1615 static void release_unused_pictures(MpegEncContext *s)
1619 /* release non reference frames */
1620 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1621 if (!s->picture[i].reference)
1622 ff_mpeg_unref_picture(s, &s->picture[i]);
1626 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1628 if (pic->f->buf[0] == NULL)
1630 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1635 static int find_unused_picture(MpegEncContext *s, int shared)
1640 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1641 if (s->picture[i].f->buf[0] == NULL)
1645 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1646 if (pic_is_unused(s, &s->picture[i]))
1651 return AVERROR_INVALIDDATA;
1654 int ff_find_unused_picture(MpegEncContext *s, int shared)
1656 int ret = find_unused_picture(s, shared);
1658 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1659 if (s->picture[ret].needs_realloc) {
1660 s->picture[ret].needs_realloc = 0;
1661 ff_free_picture_tables(&s->picture[ret]);
1662 ff_mpeg_unref_picture(s, &s->picture[ret]);
1669 * generic function called after decoding
1670 * the header and before a frame is decoded.
1672 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1678 /* mark & release old frames */
1679 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1680 s->last_picture_ptr != s->next_picture_ptr &&
1681 s->last_picture_ptr->f->buf[0]) {
1682 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1685 /* release forgotten pictures */
1686 /* if (mpeg124/h263) */
1687 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1688 if (&s->picture[i] != s->last_picture_ptr &&
1689 &s->picture[i] != s->next_picture_ptr &&
1690 s->picture[i].reference && !s->picture[i].needs_realloc) {
1691 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1692 av_log(avctx, AV_LOG_ERROR,
1693 "releasing zombie picture\n");
1694 ff_mpeg_unref_picture(s, &s->picture[i]);
1698 ff_mpeg_unref_picture(s, &s->current_picture);
1700 release_unused_pictures(s);
1702 if (s->current_picture_ptr &&
1703 s->current_picture_ptr->f->buf[0] == NULL) {
1704 // we already have a unused image
1705 // (maybe it was set before reading the header)
1706 pic = s->current_picture_ptr;
1708 i = ff_find_unused_picture(s, 0);
1710 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1713 pic = &s->picture[i];
1717 if (!s->droppable) {
1718 if (s->pict_type != AV_PICTURE_TYPE_B)
1722 pic->f->coded_picture_number = s->coded_picture_number++;
1724 if (ff_alloc_picture(s, pic, 0) < 0)
1727 s->current_picture_ptr = pic;
1728 // FIXME use only the vars from current_pic
1729 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1730 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1731 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1732 if (s->picture_structure != PICT_FRAME)
1733 s->current_picture_ptr->f->top_field_first =
1734 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1736 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1737 !s->progressive_sequence;
1738 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1740 s->current_picture_ptr->f->pict_type = s->pict_type;
1741 // if (s->flags && CODEC_FLAG_QSCALE)
1742 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1743 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1745 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1746 s->current_picture_ptr)) < 0)
1749 if (s->pict_type != AV_PICTURE_TYPE_B) {
1750 s->last_picture_ptr = s->next_picture_ptr;
1752 s->next_picture_ptr = s->current_picture_ptr;
1754 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1755 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1756 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1757 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1758 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1759 s->pict_type, s->droppable);
1761 if ((s->last_picture_ptr == NULL ||
1762 s->last_picture_ptr->f->buf[0] == NULL) &&
1763 (s->pict_type != AV_PICTURE_TYPE_I ||
1764 s->picture_structure != PICT_FRAME)) {
1765 int h_chroma_shift, v_chroma_shift;
1766 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1767 &h_chroma_shift, &v_chroma_shift);
1768 if (s->pict_type != AV_PICTURE_TYPE_I)
1769 av_log(avctx, AV_LOG_ERROR,
1770 "warning: first frame is no keyframe\n");
1771 else if (s->picture_structure != PICT_FRAME)
1772 av_log(avctx, AV_LOG_INFO,
1773 "allocate dummy last picture for field based first keyframe\n");
1775 /* Allocate a dummy frame */
1776 i = ff_find_unused_picture(s, 0);
1778 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1781 s->last_picture_ptr = &s->picture[i];
1783 s->last_picture_ptr->reference = 3;
1784 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1786 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1787 s->last_picture_ptr = NULL;
1791 memset(s->last_picture_ptr->f->data[0], 0,
1792 avctx->height * s->last_picture_ptr->f->linesize[0]);
1793 memset(s->last_picture_ptr->f->data[1], 0x80,
1794 (avctx->height >> v_chroma_shift) *
1795 s->last_picture_ptr->f->linesize[1]);
1796 memset(s->last_picture_ptr->f->data[2], 0x80,
1797 (avctx->height >> v_chroma_shift) *
1798 s->last_picture_ptr->f->linesize[2]);
1800 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1801 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1803 if ((s->next_picture_ptr == NULL ||
1804 s->next_picture_ptr->f->buf[0] == NULL) &&
1805 s->pict_type == AV_PICTURE_TYPE_B) {
1806 /* Allocate a dummy frame */
1807 i = ff_find_unused_picture(s, 0);
1809 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1812 s->next_picture_ptr = &s->picture[i];
1814 s->next_picture_ptr->reference = 3;
1815 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1817 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1818 s->next_picture_ptr = NULL;
1821 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1822 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1825 if (s->last_picture_ptr) {
1826 ff_mpeg_unref_picture(s, &s->last_picture);
1827 if (s->last_picture_ptr->f->buf[0] &&
1828 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1829 s->last_picture_ptr)) < 0)
1832 if (s->next_picture_ptr) {
1833 ff_mpeg_unref_picture(s, &s->next_picture);
1834 if (s->next_picture_ptr->f->buf[0] &&
1835 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1836 s->next_picture_ptr)) < 0)
1840 if (s->pict_type != AV_PICTURE_TYPE_I &&
1841 !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1842 av_log(s, AV_LOG_ERROR,
1843 "Non-reference picture received and no reference available\n");
1844 return AVERROR_INVALIDDATA;
1847 if (s->picture_structure!= PICT_FRAME) {
1849 for (i = 0; i < 4; i++) {
1850 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1851 s->current_picture.f->data[i] +=
1852 s->current_picture.f->linesize[i];
1854 s->current_picture.f->linesize[i] *= 2;
1855 s->last_picture.f->linesize[i] *= 2;
1856 s->next_picture.f->linesize[i] *= 2;
1860 s->err_recognition = avctx->err_recognition;
1862 /* set dequantizer, we can't do it during init as
1863 * it might change for mpeg4 and we can't do it in the header
1864 * decode as init is not called for mpeg4 there yet */
1865 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1866 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1867 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1868 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1869 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1870 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1872 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1873 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1877 FF_DISABLE_DEPRECATION_WARNINGS
1878 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1879 return ff_xvmc_field_start(s, avctx);
1880 FF_ENABLE_DEPRECATION_WARNINGS
1881 #endif /* FF_API_XVMC */
1886 /* called after a frame has been decoded. */
1887 void ff_MPV_frame_end(MpegEncContext *s)
1890 FF_DISABLE_DEPRECATION_WARNINGS
1891 /* redraw edges for the frame if decoding didn't complete */
1892 // just to make sure that all data is rendered.
1893 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1894 ff_xvmc_field_end(s);
1896 FF_ENABLE_DEPRECATION_WARNINGS
1897 #endif /* FF_API_XVMC */
1901 if (s->current_picture.reference)
1902 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1906 * Print debugging info for the given picture.
1908 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1911 if (s->avctx->hwaccel || !p || !p->mb_type)
1915 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1918 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1919 switch (pict->pict_type) {
1920 case AV_PICTURE_TYPE_I:
1921 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1923 case AV_PICTURE_TYPE_P:
1924 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1926 case AV_PICTURE_TYPE_B:
1927 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1929 case AV_PICTURE_TYPE_S:
1930 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1932 case AV_PICTURE_TYPE_SI:
1933 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1935 case AV_PICTURE_TYPE_SP:
1936 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1939 for (y = 0; y < s->mb_height; y++) {
1940 for (x = 0; x < s->mb_width; x++) {
1941 if (s->avctx->debug & FF_DEBUG_SKIP) {
1942 int count = s->mbskip_table[x + y * s->mb_stride];
1945 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1947 if (s->avctx->debug & FF_DEBUG_QP) {
1948 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1949 p->qscale_table[x + y * s->mb_stride]);
1951 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1952 int mb_type = p->mb_type[x + y * s->mb_stride];
1953 // Type & MV direction
1954 if (IS_PCM(mb_type))
1955 av_log(s->avctx, AV_LOG_DEBUG, "P");
1956 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1957 av_log(s->avctx, AV_LOG_DEBUG, "A");
1958 else if (IS_INTRA4x4(mb_type))
1959 av_log(s->avctx, AV_LOG_DEBUG, "i");
1960 else if (IS_INTRA16x16(mb_type))
1961 av_log(s->avctx, AV_LOG_DEBUG, "I");
1962 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1963 av_log(s->avctx, AV_LOG_DEBUG, "d");
1964 else if (IS_DIRECT(mb_type))
1965 av_log(s->avctx, AV_LOG_DEBUG, "D");
1966 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1967 av_log(s->avctx, AV_LOG_DEBUG, "g");
1968 else if (IS_GMC(mb_type))
1969 av_log(s->avctx, AV_LOG_DEBUG, "G");
1970 else if (IS_SKIP(mb_type))
1971 av_log(s->avctx, AV_LOG_DEBUG, "S");
1972 else if (!USES_LIST(mb_type, 1))
1973 av_log(s->avctx, AV_LOG_DEBUG, ">");
1974 else if (!USES_LIST(mb_type, 0))
1975 av_log(s->avctx, AV_LOG_DEBUG, "<");
1977 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1978 av_log(s->avctx, AV_LOG_DEBUG, "X");
1982 if (IS_8X8(mb_type))
1983 av_log(s->avctx, AV_LOG_DEBUG, "+");
1984 else if (IS_16X8(mb_type))
1985 av_log(s->avctx, AV_LOG_DEBUG, "-");
1986 else if (IS_8X16(mb_type))
1987 av_log(s->avctx, AV_LOG_DEBUG, "|");
1988 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1989 av_log(s->avctx, AV_LOG_DEBUG, " ");
1991 av_log(s->avctx, AV_LOG_DEBUG, "?");
1994 if (IS_INTERLACED(mb_type))
1995 av_log(s->avctx, AV_LOG_DEBUG, "=");
1997 av_log(s->avctx, AV_LOG_DEBUG, " ");
2000 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2006 * find the lowest MB row referenced in the MVs
2008 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2010 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2011 int my, off, i, mvs;
2013 if (s->picture_structure != PICT_FRAME || s->mcsel)
2016 switch (s->mv_type) {
2030 for (i = 0; i < mvs; i++) {
2031 my = s->mv[dir][i][1]<<qpel_shift;
2032 my_max = FFMAX(my_max, my);
2033 my_min = FFMIN(my_min, my);
2036 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2038 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2040 return s->mb_height-1;
2043 /* put block[] to dest[] */
2044 static inline void put_dct(MpegEncContext *s,
2045 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2047 s->dct_unquantize_intra(s, block, i, qscale);
2048 s->idsp.idct_put(dest, line_size, block);
2051 /* add block[] to dest[] */
2052 static inline void add_dct(MpegEncContext *s,
2053 int16_t *block, int i, uint8_t *dest, int line_size)
2055 if (s->block_last_index[i] >= 0) {
2056 s->idsp.idct_add(dest, line_size, block);
2060 static inline void add_dequant_dct(MpegEncContext *s,
2061 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2063 if (s->block_last_index[i] >= 0) {
2064 s->dct_unquantize_inter(s, block, i, qscale);
2066 s->idsp.idct_add(dest, line_size, block);
2071 * Clean dc, ac, coded_block for the current non-intra MB.
2073 void ff_clean_intra_table_entries(MpegEncContext *s)
2075 int wrap = s->b8_stride;
2076 int xy = s->block_index[0];
2079 s->dc_val[0][xy + 1 ] =
2080 s->dc_val[0][xy + wrap] =
2081 s->dc_val[0][xy + 1 + wrap] = 1024;
2083 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2084 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2085 if (s->msmpeg4_version>=3) {
2086 s->coded_block[xy ] =
2087 s->coded_block[xy + 1 ] =
2088 s->coded_block[xy + wrap] =
2089 s->coded_block[xy + 1 + wrap] = 0;
2092 wrap = s->mb_stride;
2093 xy = s->mb_x + s->mb_y * wrap;
2095 s->dc_val[2][xy] = 1024;
2097 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2098 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2100 s->mbintra_table[xy]= 0;
2103 /* generic function called after a macroblock has been parsed by the
2104 decoder or after it has been encoded by the encoder.
2106 Important variables used:
2107 s->mb_intra : true if intra macroblock
2108 s->mv_dir : motion vector direction
2109 s->mv_type : motion vector type
2110 s->mv : motion vector
2111 s->interlaced_dct : true if interlaced dct used (mpeg2)
2113 static av_always_inline
2114 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2117 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2120 FF_DISABLE_DEPRECATION_WARNINGS
2121 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2122 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2125 FF_ENABLE_DEPRECATION_WARNINGS
2126 #endif /* FF_API_XVMC */
2128 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2129 /* print DCT coefficients */
2131 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2133 for(j=0; j<64; j++){
2134 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2135 block[i][s->idsp.idct_permutation[j]]);
2137 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2141 s->current_picture.qscale_table[mb_xy] = s->qscale;
2143 /* update DC predictors for P macroblocks */
2145 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2146 if(s->mbintra_table[mb_xy])
2147 ff_clean_intra_table_entries(s);
2151 s->last_dc[2] = 128 << s->intra_dc_precision;
2154 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2155 s->mbintra_table[mb_xy]=1;
2157 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2158 uint8_t *dest_y, *dest_cb, *dest_cr;
2159 int dct_linesize, dct_offset;
2160 op_pixels_func (*op_pix)[4];
2161 qpel_mc_func (*op_qpix)[16];
2162 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2163 const int uvlinesize = s->current_picture.f->linesize[1];
2164 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2165 const int block_size = 8;
2167 /* avoid copy if macroblock skipped in last frame too */
2168 /* skip only during decoding as we might trash the buffers during encoding a bit */
2170 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2172 if (s->mb_skipped) {
2174 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2176 } else if(!s->current_picture.reference) {
2179 *mbskip_ptr = 0; /* not skipped */
2183 dct_linesize = linesize << s->interlaced_dct;
2184 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2188 dest_cb= s->dest[1];
2189 dest_cr= s->dest[2];
2191 dest_y = s->b_scratchpad;
2192 dest_cb= s->b_scratchpad+16*linesize;
2193 dest_cr= s->b_scratchpad+32*linesize;
2197 /* motion handling */
2198 /* decoding or more than one mb_type (MC was already done otherwise) */
2201 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2202 if (s->mv_dir & MV_DIR_FORWARD) {
2203 ff_thread_await_progress(&s->last_picture_ptr->tf,
2204 ff_MPV_lowest_referenced_row(s, 0),
2207 if (s->mv_dir & MV_DIR_BACKWARD) {
2208 ff_thread_await_progress(&s->next_picture_ptr->tf,
2209 ff_MPV_lowest_referenced_row(s, 1),
2214 op_qpix= s->me.qpel_put;
2215 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2216 op_pix = s->hdsp.put_pixels_tab;
2218 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2220 if (s->mv_dir & MV_DIR_FORWARD) {
2221 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2222 op_pix = s->hdsp.avg_pixels_tab;
2223 op_qpix= s->me.qpel_avg;
2225 if (s->mv_dir & MV_DIR_BACKWARD) {
2226 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2230 /* skip dequant / idct if we are really late ;) */
2231 if(s->avctx->skip_idct){
2232 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2233 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2234 || s->avctx->skip_idct >= AVDISCARD_ALL)
2238 /* add dct residue */
2239 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2240 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2241 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2242 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2243 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2244 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2246 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2247 if (s->chroma_y_shift){
2248 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2249 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2253 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2254 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2255 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2256 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2259 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2260 add_dct(s, block[0], 0, dest_y , dct_linesize);
2261 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2262 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2263 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2265 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2266 if(s->chroma_y_shift){//Chroma420
2267 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2268 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2271 dct_linesize = uvlinesize << s->interlaced_dct;
2272 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2274 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2275 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2276 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2277 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2278 if(!s->chroma_x_shift){//Chroma444
2279 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2280 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2281 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2282 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2287 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2288 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2291 /* dct only in intra block */
2292 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2293 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2294 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2295 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2296 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2298 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2299 if(s->chroma_y_shift){
2300 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2301 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2305 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2306 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2307 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2308 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2312 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2313 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2314 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2315 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2317 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2318 if(s->chroma_y_shift){
2319 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2320 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2323 dct_linesize = uvlinesize << s->interlaced_dct;
2324 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2326 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2327 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2328 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2329 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2330 if(!s->chroma_x_shift){//Chroma444
2331 s->idsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2332 s->idsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2333 s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2334 s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2342 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2343 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2344 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2349 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2351 if(s->out_format == FMT_MPEG1) {
2352 MPV_decode_mb_internal(s, block, 1);
2355 MPV_decode_mb_internal(s, block, 0);
2358 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2360 ff_draw_horiz_band(s->avctx, s->current_picture.f,
2361 s->last_picture.f, y, h, s->picture_structure,
2362 s->first_field, s->low_delay);
2365 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2366 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2367 const int uvlinesize = s->current_picture.f->linesize[1];
2368 const int mb_size= 4;
2370 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2371 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2372 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2373 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2374 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2375 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2376 //block_index is not used by mpeg2, so it is not affected by chroma_format
2378 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
2379 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2380 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2382 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2384 if(s->picture_structure==PICT_FRAME){
2385 s->dest[0] += s->mb_y * linesize << mb_size;
2386 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2387 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2389 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2390 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2391 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2392 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2398 * Permute an 8x8 block.
2399 * @param block the block which will be permuted according to the given permutation vector
2400 * @param permutation the permutation vector
2401 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2402 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2403 * (inverse) permutated to scantable order!
2405 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2411 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2413 for(i=0; i<=last; i++){
2414 const int j= scantable[i];
2419 for(i=0; i<=last; i++){
2420 const int j= scantable[i];
2421 const int perm_j= permutation[j];
2422 block[perm_j]= temp[j];
2426 void ff_mpeg_flush(AVCodecContext *avctx){
2428 MpegEncContext *s = avctx->priv_data;
2430 if(s==NULL || s->picture==NULL)
2433 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2434 ff_mpeg_unref_picture(s, &s->picture[i]);
2435 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2437 ff_mpeg_unref_picture(s, &s->current_picture);
2438 ff_mpeg_unref_picture(s, &s->last_picture);
2439 ff_mpeg_unref_picture(s, &s->next_picture);
2441 s->mb_x= s->mb_y= 0;
2443 s->parse_context.state= -1;
2444 s->parse_context.frame_start_found= 0;
2445 s->parse_context.overread= 0;
2446 s->parse_context.overread_index= 0;
2447 s->parse_context.index= 0;
2448 s->parse_context.last_index= 0;
2449 s->bitstream_buffer_size=0;
2454 * set qscale and update qscale dependent variables.
2456 void ff_set_qscale(MpegEncContext * s, int qscale)
2460 else if (qscale > 31)
2464 s->chroma_qscale= s->chroma_qscale_table[qscale];
2466 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2467 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2470 void ff_MPV_report_decode_progress(MpegEncContext *s)
2472 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2473 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);