2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
45 #include "xvmc_internal.h"
49 static const uint8_t ff_default_chroma_qscale_table[32] = {
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
55 const uint8_t ff_mpeg1_dc_scale_table[128] = {
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const uint8_t ff_alternate_horizontal_scan[64] = {
111 0, 1, 2, 3, 8, 9, 16, 17,
112 10, 11, 4, 5, 6, 7, 15, 14,
113 13, 12, 19, 18, 24, 25, 32, 33,
114 26, 27, 20, 21, 22, 23, 28, 29,
115 30, 31, 34, 35, 40, 41, 48, 49,
116 42, 43, 36, 37, 38, 39, 44, 45,
117 46, 47, 50, 51, 56, 57, 58, 59,
118 52, 53, 54, 55, 60, 61, 62, 63,
121 const uint8_t ff_alternate_vertical_scan[64] = {
122 0, 8, 16, 24, 1, 9, 2, 10,
123 17, 25, 32, 40, 48, 56, 57, 49,
124 41, 33, 26, 18, 3, 11, 4, 12,
125 19, 27, 34, 42, 50, 58, 35, 43,
126 51, 59, 20, 28, 5, 13, 6, 14,
127 21, 29, 36, 44, 52, 60, 37, 45,
128 53, 61, 22, 30, 7, 15, 23, 31,
129 38, 46, 54, 62, 39, 47, 55, 63,
132 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133 int16_t *block, int n, int qscale)
135 int i, level, nCoeffs;
136 const uint16_t *quant_matrix;
138 nCoeffs= s->block_last_index[n];
141 block[0] = block[0] * s->y_dc_scale;
143 block[0] = block[0] * s->c_dc_scale;
144 /* XXX: only mpeg1 */
145 quant_matrix = s->intra_matrix;
146 for(i=1;i<=nCoeffs;i++) {
147 int j= s->intra_scantable.permutated[i];
152 level = (int)(level * qscale * quant_matrix[j]) >> 3;
153 level = (level - 1) | 1;
156 level = (int)(level * qscale * quant_matrix[j]) >> 3;
157 level = (level - 1) | 1;
164 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
165 int16_t *block, int n, int qscale)
167 int i, level, nCoeffs;
168 const uint16_t *quant_matrix;
170 nCoeffs= s->block_last_index[n];
172 quant_matrix = s->inter_matrix;
173 for(i=0; i<=nCoeffs; i++) {
174 int j= s->intra_scantable.permutated[i];
179 level = (((level << 1) + 1) * qscale *
180 ((int) (quant_matrix[j]))) >> 4;
181 level = (level - 1) | 1;
184 level = (((level << 1) + 1) * qscale *
185 ((int) (quant_matrix[j]))) >> 4;
186 level = (level - 1) | 1;
193 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
194 int16_t *block, int n, int qscale)
196 int i, level, nCoeffs;
197 const uint16_t *quant_matrix;
199 if(s->alternate_scan) nCoeffs= 63;
200 else nCoeffs= s->block_last_index[n];
203 block[0] = block[0] * s->y_dc_scale;
205 block[0] = block[0] * s->c_dc_scale;
206 quant_matrix = s->intra_matrix;
207 for(i=1;i<=nCoeffs;i++) {
208 int j= s->intra_scantable.permutated[i];
213 level = (int)(level * qscale * quant_matrix[j]) >> 3;
216 level = (int)(level * qscale * quant_matrix[j]) >> 3;
223 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
224 int16_t *block, int n, int qscale)
226 int i, level, nCoeffs;
227 const uint16_t *quant_matrix;
230 if(s->alternate_scan) nCoeffs= 63;
231 else nCoeffs= s->block_last_index[n];
234 block[0] = block[0] * s->y_dc_scale;
236 block[0] = block[0] * s->c_dc_scale;
237 quant_matrix = s->intra_matrix;
238 for(i=1;i<=nCoeffs;i++) {
239 int j= s->intra_scantable.permutated[i];
244 level = (int)(level * qscale * quant_matrix[j]) >> 3;
247 level = (int)(level * qscale * quant_matrix[j]) >> 3;
256 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
257 int16_t *block, int n, int qscale)
259 int i, level, nCoeffs;
260 const uint16_t *quant_matrix;
263 if(s->alternate_scan) nCoeffs= 63;
264 else nCoeffs= s->block_last_index[n];
266 quant_matrix = s->inter_matrix;
267 for(i=0; i<=nCoeffs; i++) {
268 int j= s->intra_scantable.permutated[i];
273 level = (((level << 1) + 1) * qscale *
274 ((int) (quant_matrix[j]))) >> 4;
277 level = (((level << 1) + 1) * qscale *
278 ((int) (quant_matrix[j]))) >> 4;
287 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
288 int16_t *block, int n, int qscale)
290 int i, level, qmul, qadd;
293 assert(s->block_last_index[n]>=0);
299 block[0] = block[0] * s->y_dc_scale;
301 block[0] = block[0] * s->c_dc_scale;
302 qadd = (qscale - 1) | 1;
309 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
311 for(i=1; i<=nCoeffs; i++) {
315 level = level * qmul - qadd;
317 level = level * qmul + qadd;
324 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
325 int16_t *block, int n, int qscale)
327 int i, level, qmul, qadd;
330 assert(s->block_last_index[n]>=0);
332 qadd = (qscale - 1) | 1;
335 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
337 for(i=0; i<=nCoeffs; i++) {
341 level = level * qmul - qadd;
343 level = level * qmul + qadd;
350 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
352 int mb_x, int mb_y, int mb_intra, int mb_skipped)
354 MpegEncContext *s = opaque;
357 s->mv_type = mv_type;
358 s->mb_intra = mb_intra;
359 s->mb_skipped = mb_skipped;
362 memcpy(s->mv, mv, sizeof(*mv));
364 ff_init_block_index(s);
365 ff_update_block_index(s);
367 s->bdsp.clear_blocks(s->block[0]);
369 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
370 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
371 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
374 av_log(s->avctx, AV_LOG_DEBUG,
375 "Interlaced error concealment is not fully implemented\n");
376 ff_mpv_decode_mb(s, s->block);
379 /* init common dct for both encoder and decoder */
380 static av_cold int dct_init(MpegEncContext *s)
382 ff_blockdsp_init(&s->bdsp, s->avctx);
383 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
384 ff_mpegvideodsp_init(&s->mdsp);
385 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
387 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
388 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
389 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
390 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
391 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
392 if (s->avctx->flags & CODEC_FLAG_BITEXACT)
393 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
394 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
396 if (HAVE_INTRINSICS_NEON)
397 ff_mpv_common_init_neon(s);
400 ff_mpv_common_init_arm(s);
402 ff_mpv_common_init_ppc(s);
404 ff_mpv_common_init_x86(s);
409 av_cold void ff_mpv_idct_init(MpegEncContext *s)
411 ff_idctdsp_init(&s->idsp, s->avctx);
413 /* load & permutate scantables
414 * note: only wmv uses different ones
416 if (s->alternate_scan) {
417 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
418 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
420 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
421 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
423 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
424 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
427 static int frame_size_alloc(MpegEncContext *s, int linesize)
429 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
431 // edge emu needs blocksize + filter length - 1
432 // (= 17x17 for halfpel / 21x21 for h264)
433 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
434 // at uvlinesize. It supports only YUV420 so 24x24 is enough
435 // linesize * interlaced * MBsize
436 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
439 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
441 s->me.temp = s->me.scratchpad;
442 s->rd_scratchpad = s->me.scratchpad;
443 s->b_scratchpad = s->me.scratchpad;
444 s->obmc_scratchpad = s->me.scratchpad + 16;
448 av_freep(&s->edge_emu_buffer);
449 return AVERROR(ENOMEM);
453 * Allocate a frame buffer
455 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
457 int edges_needed = av_codec_is_encoder(s->avctx->codec);
461 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
462 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
463 s->codec_id != AV_CODEC_ID_MSS2) {
465 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
466 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
469 r = ff_thread_get_buffer(s->avctx, &pic->tf,
470 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
472 pic->f->width = s->avctx->width;
473 pic->f->height = s->avctx->height;
474 pic->f->format = s->avctx->pix_fmt;
475 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
478 if (r < 0 || !pic->f->buf[0]) {
479 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
486 for (i = 0; pic->f->data[i]; i++) {
487 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
488 pic->f->linesize[i] +
489 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
490 pic->f->data[i] += offset;
492 pic->f->width = s->avctx->width;
493 pic->f->height = s->avctx->height;
496 if (s->avctx->hwaccel) {
497 assert(!pic->hwaccel_picture_private);
498 if (s->avctx->hwaccel->frame_priv_data_size) {
499 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
500 if (!pic->hwaccel_priv_buf) {
501 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
504 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
508 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
509 s->uvlinesize != pic->f->linesize[1])) {
510 av_log(s->avctx, AV_LOG_ERROR,
511 "get_buffer() failed (stride changed)\n");
512 ff_mpeg_unref_picture(s->avctx, pic);
516 if (pic->f->linesize[1] != pic->f->linesize[2]) {
517 av_log(s->avctx, AV_LOG_ERROR,
518 "get_buffer() failed (uv stride mismatch)\n");
519 ff_mpeg_unref_picture(s->avctx, pic);
523 if (!s->edge_emu_buffer &&
524 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
525 av_log(s->avctx, AV_LOG_ERROR,
526 "get_buffer() failed to allocate context scratch buffers.\n");
527 ff_mpeg_unref_picture(s->avctx, pic);
534 void ff_free_picture_tables(Picture *pic)
538 av_buffer_unref(&pic->mb_var_buf);
539 av_buffer_unref(&pic->mc_mb_var_buf);
540 av_buffer_unref(&pic->mb_mean_buf);
541 av_buffer_unref(&pic->mbskip_table_buf);
542 av_buffer_unref(&pic->qscale_table_buf);
543 av_buffer_unref(&pic->mb_type_buf);
545 for (i = 0; i < 2; i++) {
546 av_buffer_unref(&pic->motion_val_buf[i]);
547 av_buffer_unref(&pic->ref_index_buf[i]);
551 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
553 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
554 const int mb_array_size = s->mb_stride * s->mb_height;
555 const int b8_array_size = s->b8_stride * s->mb_height * 2;
559 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
560 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
561 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
563 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
564 return AVERROR(ENOMEM);
567 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
568 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
569 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
570 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
571 return AVERROR(ENOMEM);
574 if (s->out_format == FMT_H263 || s->encoding) {
575 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
576 int ref_index_size = 4 * mb_array_size;
578 for (i = 0; mv_size && i < 2; i++) {
579 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
580 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
581 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
582 return AVERROR(ENOMEM);
589 static int make_tables_writable(Picture *pic)
592 #define MAKE_WRITABLE(table) \
595 (ret = av_buffer_make_writable(&pic->table)) < 0)\
599 MAKE_WRITABLE(mb_var_buf);
600 MAKE_WRITABLE(mc_mb_var_buf);
601 MAKE_WRITABLE(mb_mean_buf);
602 MAKE_WRITABLE(mbskip_table_buf);
603 MAKE_WRITABLE(qscale_table_buf);
604 MAKE_WRITABLE(mb_type_buf);
606 for (i = 0; i < 2; i++) {
607 MAKE_WRITABLE(motion_val_buf[i]);
608 MAKE_WRITABLE(ref_index_buf[i]);
615 * Allocate a Picture.
616 * The pixels are allocated/set by calling get_buffer() if shared = 0
618 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
623 assert(pic->f->data[0]);
626 assert(!pic->f->buf[0]);
628 if (alloc_frame_buffer(s, pic) < 0)
631 s->linesize = pic->f->linesize[0];
632 s->uvlinesize = pic->f->linesize[1];
635 if (!pic->qscale_table_buf)
636 ret = alloc_picture_tables(s, pic);
638 ret = make_tables_writable(pic);
643 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
644 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
645 pic->mb_mean = pic->mb_mean_buf->data;
648 pic->mbskip_table = pic->mbskip_table_buf->data;
649 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
650 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
652 if (pic->motion_val_buf[0]) {
653 for (i = 0; i < 2; i++) {
654 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
655 pic->ref_index[i] = pic->ref_index_buf[i]->data;
661 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
662 ff_mpeg_unref_picture(s->avctx, pic);
663 ff_free_picture_tables(pic);
664 return AVERROR(ENOMEM);
668 * Deallocate a picture.
670 void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
673 /* WM Image / Screen codecs allocate internal buffers with different
674 * dimensions / colorspaces; ignore user-defined callbacks for these. */
675 if (avctx->codec->id != AV_CODEC_ID_WMV3IMAGE &&
676 avctx->codec->id != AV_CODEC_ID_VC1IMAGE &&
677 avctx->codec->id != AV_CODEC_ID_MSS2)
678 ff_thread_release_buffer(avctx, &pic->tf);
680 av_frame_unref(pic->f);
682 av_buffer_unref(&pic->hwaccel_priv_buf);
684 if (pic->needs_realloc)
685 ff_free_picture_tables(pic);
688 static int update_picture_tables(Picture *dst, Picture *src)
692 #define UPDATE_TABLE(table)\
695 (!dst->table || dst->table->buffer != src->table->buffer)) {\
696 av_buffer_unref(&dst->table);\
697 dst->table = av_buffer_ref(src->table);\
699 ff_free_picture_tables(dst);\
700 return AVERROR(ENOMEM);\
705 UPDATE_TABLE(mb_var_buf);
706 UPDATE_TABLE(mc_mb_var_buf);
707 UPDATE_TABLE(mb_mean_buf);
708 UPDATE_TABLE(mbskip_table_buf);
709 UPDATE_TABLE(qscale_table_buf);
710 UPDATE_TABLE(mb_type_buf);
711 for (i = 0; i < 2; i++) {
712 UPDATE_TABLE(motion_val_buf[i]);
713 UPDATE_TABLE(ref_index_buf[i]);
716 dst->mb_var = src->mb_var;
717 dst->mc_mb_var = src->mc_mb_var;
718 dst->mb_mean = src->mb_mean;
719 dst->mbskip_table = src->mbskip_table;
720 dst->qscale_table = src->qscale_table;
721 dst->mb_type = src->mb_type;
722 for (i = 0; i < 2; i++) {
723 dst->motion_val[i] = src->motion_val[i];
724 dst->ref_index[i] = src->ref_index[i];
730 int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
734 av_assert0(!dst->f->buf[0]);
735 av_assert0(src->f->buf[0]);
739 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
743 ret = update_picture_tables(dst, src);
747 if (src->hwaccel_picture_private) {
748 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
749 if (!dst->hwaccel_priv_buf)
751 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
754 dst->field_picture = src->field_picture;
755 dst->mb_var_sum = src->mb_var_sum;
756 dst->mc_mb_var_sum = src->mc_mb_var_sum;
757 dst->b_frame_score = src->b_frame_score;
758 dst->needs_realloc = src->needs_realloc;
759 dst->reference = src->reference;
760 dst->shared = src->shared;
764 ff_mpeg_unref_picture(avctx, dst);
768 static int init_duplicate_context(MpegEncContext *s)
770 int y_size = s->b8_stride * (2 * s->mb_height + 1);
771 int c_size = s->mb_stride * (s->mb_height + 1);
772 int yc_size = y_size + 2 * c_size;
780 s->obmc_scratchpad = NULL;
783 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
784 ME_MAP_SIZE * sizeof(uint32_t), fail)
785 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
786 ME_MAP_SIZE * sizeof(uint32_t), fail)
787 if (s->avctx->noise_reduction) {
788 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
789 2 * 64 * sizeof(int), fail)
792 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
793 s->block = s->blocks[0];
795 for (i = 0; i < 12; i++) {
796 s->pblocks[i] = &s->block[i];
798 if (s->avctx->codec_tag == AV_RL32("VCR2")) {
802 s->pblocks[4] = s->pblocks[5];
806 if (s->out_format == FMT_H263) {
808 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
809 yc_size * sizeof(int16_t) * 16, fail);
810 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
811 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
812 s->ac_val[2] = s->ac_val[1] + c_size;
817 return -1; // free() through ff_mpv_common_end()
820 static void free_duplicate_context(MpegEncContext *s)
825 av_freep(&s->edge_emu_buffer);
826 av_freep(&s->me.scratchpad);
830 s->obmc_scratchpad = NULL;
832 av_freep(&s->dct_error_sum);
833 av_freep(&s->me.map);
834 av_freep(&s->me.score_map);
835 av_freep(&s->blocks);
836 av_freep(&s->ac_val_base);
840 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
842 #define COPY(a) bak->a = src->a
843 COPY(edge_emu_buffer);
848 COPY(obmc_scratchpad);
855 COPY(me.map_generation);
867 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
871 // FIXME copy only needed parts
873 backup_duplicate_context(&bak, dst);
874 memcpy(dst, src, sizeof(MpegEncContext));
875 backup_duplicate_context(dst, &bak);
876 for (i = 0; i < 12; i++) {
877 dst->pblocks[i] = &dst->block[i];
879 if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
882 tmp = dst->pblocks[4];
883 dst->pblocks[4] = dst->pblocks[5];
884 dst->pblocks[5] = tmp;
886 if (!dst->edge_emu_buffer &&
887 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
888 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
889 "scratch buffers.\n");
892 // STOP_TIMER("update_duplicate_context")
893 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
897 int ff_mpeg_update_thread_context(AVCodecContext *dst,
898 const AVCodecContext *src)
901 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
903 if (dst == src || !s1->context_initialized)
906 // FIXME can parameters change on I-frames?
907 // in that case dst may need a reinit
908 if (!s->context_initialized) {
910 memcpy(s, s1, sizeof(MpegEncContext));
913 s->bitstream_buffer = NULL;
914 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
917 if ((err = ff_mpv_common_init(s)) < 0)
921 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
923 s->context_reinit = 0;
924 s->height = s1->height;
925 s->width = s1->width;
926 if ((err = ff_mpv_common_frame_size_change(s)) < 0)
930 s->avctx->coded_height = s1->avctx->coded_height;
931 s->avctx->coded_width = s1->avctx->coded_width;
932 s->avctx->width = s1->avctx->width;
933 s->avctx->height = s1->avctx->height;
935 s->coded_picture_number = s1->coded_picture_number;
936 s->picture_number = s1->picture_number;
938 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
939 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
940 if (s1->picture[i].f->buf[0] &&
941 (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
945 #define UPDATE_PICTURE(pic)\
947 ff_mpeg_unref_picture(s->avctx, &s->pic);\
948 if (s1->pic.f->buf[0])\
949 ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
951 ret = update_picture_tables(&s->pic, &s1->pic);\
956 UPDATE_PICTURE(current_picture);
957 UPDATE_PICTURE(last_picture);
958 UPDATE_PICTURE(next_picture);
960 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
961 ((pic && pic >= old_ctx->picture && \
962 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
963 &new_ctx->picture[pic - old_ctx->picture] : NULL)
965 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
966 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
967 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
969 // Error/bug resilience
970 s->next_p_frame_damaged = s1->next_p_frame_damaged;
971 s->workaround_bugs = s1->workaround_bugs;
974 memcpy(&s->last_time_base, &s1->last_time_base,
975 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
976 (char *) &s1->last_time_base);
979 s->max_b_frames = s1->max_b_frames;
980 s->low_delay = s1->low_delay;
981 s->droppable = s1->droppable;
983 // DivX handling (doesn't work)
984 s->divx_packed = s1->divx_packed;
986 if (s1->bitstream_buffer) {
987 if (s1->bitstream_buffer_size +
988 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
989 av_fast_malloc(&s->bitstream_buffer,
990 &s->allocated_bitstream_buffer_size,
991 s1->allocated_bitstream_buffer_size);
992 s->bitstream_buffer_size = s1->bitstream_buffer_size;
993 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
994 s1->bitstream_buffer_size);
995 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
996 FF_INPUT_BUFFER_PADDING_SIZE);
999 // linesize dependend scratch buffer allocation
1000 if (!s->edge_emu_buffer)
1002 if (frame_size_alloc(s, s1->linesize) < 0) {
1003 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1004 "scratch buffers.\n");
1005 return AVERROR(ENOMEM);
1008 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1009 "be allocated due to unknown size.\n");
1013 // MPEG2/interlacing info
1014 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1015 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1017 if (!s1->first_field) {
1018 s->last_pict_type = s1->pict_type;
1019 if (s1->current_picture_ptr)
1020 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1027 * Set the given MpegEncContext to common defaults
1028 * (same for encoding and decoding).
1029 * The changed fields will not depend upon the
1030 * prior state of the MpegEncContext.
1032 void ff_mpv_common_defaults(MpegEncContext *s)
1034 s->y_dc_scale_table =
1035 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1036 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1037 s->progressive_frame = 1;
1038 s->progressive_sequence = 1;
1039 s->picture_structure = PICT_FRAME;
1041 s->coded_picture_number = 0;
1042 s->picture_number = 0;
1047 s->slice_context_count = 1;
1051 * Set the given MpegEncContext to defaults for decoding.
1052 * the changed fields will not depend upon
1053 * the prior state of the MpegEncContext.
1055 void ff_mpv_decode_defaults(MpegEncContext *s)
1057 ff_mpv_common_defaults(s);
1060 static int init_er(MpegEncContext *s)
1062 ERContext *er = &s->er;
1063 int mb_array_size = s->mb_height * s->mb_stride;
1066 er->avctx = s->avctx;
1068 er->mb_index2xy = s->mb_index2xy;
1069 er->mb_num = s->mb_num;
1070 er->mb_width = s->mb_width;
1071 er->mb_height = s->mb_height;
1072 er->mb_stride = s->mb_stride;
1073 er->b8_stride = s->b8_stride;
1075 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1076 er->error_status_table = av_mallocz(mb_array_size);
1077 if (!er->er_temp_buffer || !er->error_status_table)
1080 er->mbskip_table = s->mbskip_table;
1081 er->mbintra_table = s->mbintra_table;
1083 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1084 er->dc_val[i] = s->dc_val[i];
1086 er->decode_mb = mpeg_er_decode_mb;
1091 av_freep(&er->er_temp_buffer);
1092 av_freep(&er->error_status_table);
1093 return AVERROR(ENOMEM);
1097 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1099 static int init_context_frame(MpegEncContext *s)
1101 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1103 s->mb_width = (s->width + 15) / 16;
1104 s->mb_stride = s->mb_width + 1;
1105 s->b8_stride = s->mb_width * 2 + 1;
1106 mb_array_size = s->mb_height * s->mb_stride;
1107 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1109 /* set default edge pos, will be overriden
1110 * in decode_header if needed */
1111 s->h_edge_pos = s->mb_width * 16;
1112 s->v_edge_pos = s->mb_height * 16;
1114 s->mb_num = s->mb_width * s->mb_height;
1119 s->block_wrap[3] = s->b8_stride;
1121 s->block_wrap[5] = s->mb_stride;
1123 y_size = s->b8_stride * (2 * s->mb_height + 1);
1124 c_size = s->mb_stride * (s->mb_height + 1);
1125 yc_size = y_size + 2 * c_size;
1127 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
1128 fail); // error ressilience code looks cleaner with this
1129 for (y = 0; y < s->mb_height; y++)
1130 for (x = 0; x < s->mb_width; x++)
1131 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1133 s->mb_index2xy[s->mb_height * s->mb_width] =
1134 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1137 /* Allocate MV tables */
1138 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
1139 mv_table_size * 2 * sizeof(int16_t), fail);
1140 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
1141 mv_table_size * 2 * sizeof(int16_t), fail);
1142 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
1143 mv_table_size * 2 * sizeof(int16_t), fail);
1144 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
1145 mv_table_size * 2 * sizeof(int16_t), fail);
1146 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
1147 mv_table_size * 2 * sizeof(int16_t), fail);
1148 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
1149 mv_table_size * 2 * sizeof(int16_t), fail);
1150 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1151 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1152 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1153 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
1155 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
1157 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1159 /* Allocate MB type table */
1160 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
1161 sizeof(uint16_t), fail); // needed for encoding
1163 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
1166 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1167 mb_array_size * sizeof(float), fail);
1168 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1169 mb_array_size * sizeof(float), fail);
1173 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1174 (s->avctx->flags & CODEC_FLAG_INTERLACED_ME)) {
1175 /* interlaced direct mode decoding tables */
1176 for (i = 0; i < 2; i++) {
1178 for (j = 0; j < 2; j++) {
1179 for (k = 0; k < 2; k++) {
1180 FF_ALLOCZ_OR_GOTO(s->avctx,
1181 s->b_field_mv_table_base[i][j][k],
1182 mv_table_size * 2 * sizeof(int16_t),
1184 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1187 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
1188 mb_array_size * 2 * sizeof(uint8_t), fail);
1189 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
1190 mv_table_size * 2 * sizeof(int16_t), fail);
1191 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
1194 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
1195 mb_array_size * 2 * sizeof(uint8_t), fail);
1198 if (s->out_format == FMT_H263) {
1200 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
1201 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1203 /* cbp, ac_pred, pred_dir */
1204 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
1205 mb_array_size * sizeof(uint8_t), fail);
1206 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
1207 mb_array_size * sizeof(uint8_t), fail);
1210 if (s->h263_pred || s->h263_plus || !s->encoding) {
1212 // MN: we need these for error resilience of intra-frames
1213 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
1214 yc_size * sizeof(int16_t), fail);
1215 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1216 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1217 s->dc_val[2] = s->dc_val[1] + c_size;
1218 for (i = 0; i < yc_size; i++)
1219 s->dc_val_base[i] = 1024;
1222 /* which mb is a intra block */
1223 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1224 memset(s->mbintra_table, 1, mb_array_size);
1226 /* init macroblock skip table */
1227 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1228 // Note the + 1 is for a quicker mpeg4 slice_end detection
1232 return AVERROR(ENOMEM);
1236 * init common structure for both encoder and decoder.
1237 * this assumes that some variables like width/height are already set
1239 av_cold int ff_mpv_common_init(MpegEncContext *s)
1242 int nb_slices = (HAVE_THREADS &&
1243 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1244 s->avctx->thread_count : 1;
1246 if (s->encoding && s->avctx->slices)
1247 nb_slices = s->avctx->slices;
1249 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1250 s->mb_height = (s->height + 31) / 32 * 2;
1252 s->mb_height = (s->height + 15) / 16;
1254 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1255 av_log(s->avctx, AV_LOG_ERROR,
1256 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1260 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1263 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1265 max_slices = MAX_THREADS;
1266 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1267 " reducing to %d\n", nb_slices, max_slices);
1268 nb_slices = max_slices;
1271 if ((s->width || s->height) &&
1272 av_image_check_size(s->width, s->height, 0, s->avctx))
1277 /* set chroma shifts */
1278 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1280 &s->chroma_y_shift);
1282 /* convert fourcc to upper case */
1283 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1285 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1286 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1287 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1288 s->picture[i].f = av_frame_alloc();
1289 if (!s->picture[i].f)
1292 memset(&s->next_picture, 0, sizeof(s->next_picture));
1293 memset(&s->last_picture, 0, sizeof(s->last_picture));
1294 memset(&s->current_picture, 0, sizeof(s->current_picture));
1295 memset(&s->new_picture, 0, sizeof(s->new_picture));
1296 s->next_picture.f = av_frame_alloc();
1297 if (!s->next_picture.f)
1299 s->last_picture.f = av_frame_alloc();
1300 if (!s->last_picture.f)
1302 s->current_picture.f = av_frame_alloc();
1303 if (!s->current_picture.f)
1305 s->new_picture.f = av_frame_alloc();
1306 if (!s->new_picture.f)
1309 if (s->width && s->height) {
1310 if (init_context_frame(s))
1313 s->parse_context.state = -1;
1316 s->context_initialized = 1;
1317 s->thread_context[0] = s;
1319 if (s->width && s->height) {
1320 if (nb_slices > 1) {
1321 for (i = 1; i < nb_slices; i++) {
1322 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1323 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1326 for (i = 0; i < nb_slices; i++) {
1327 if (init_duplicate_context(s->thread_context[i]) < 0)
1329 s->thread_context[i]->start_mb_y =
1330 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1331 s->thread_context[i]->end_mb_y =
1332 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1335 if (init_duplicate_context(s) < 0)
1338 s->end_mb_y = s->mb_height;
1340 s->slice_context_count = nb_slices;
1345 ff_mpv_common_end(s);
1350 * Frees and resets MpegEncContext fields depending on the resolution.
1351 * Is used during resolution changes to avoid a full reinitialization of the
1354 static void free_context_frame(MpegEncContext *s)
1358 av_freep(&s->mb_type);
1359 av_freep(&s->p_mv_table_base);
1360 av_freep(&s->b_forw_mv_table_base);
1361 av_freep(&s->b_back_mv_table_base);
1362 av_freep(&s->b_bidir_forw_mv_table_base);
1363 av_freep(&s->b_bidir_back_mv_table_base);
1364 av_freep(&s->b_direct_mv_table_base);
1365 s->p_mv_table = NULL;
1366 s->b_forw_mv_table = NULL;
1367 s->b_back_mv_table = NULL;
1368 s->b_bidir_forw_mv_table = NULL;
1369 s->b_bidir_back_mv_table = NULL;
1370 s->b_direct_mv_table = NULL;
1371 for (i = 0; i < 2; i++) {
1372 for (j = 0; j < 2; j++) {
1373 for (k = 0; k < 2; k++) {
1374 av_freep(&s->b_field_mv_table_base[i][j][k]);
1375 s->b_field_mv_table[i][j][k] = NULL;
1377 av_freep(&s->b_field_select_table[i][j]);
1378 av_freep(&s->p_field_mv_table_base[i][j]);
1379 s->p_field_mv_table[i][j] = NULL;
1381 av_freep(&s->p_field_select_table[i]);
1384 av_freep(&s->dc_val_base);
1385 av_freep(&s->coded_block_base);
1386 av_freep(&s->mbintra_table);
1387 av_freep(&s->cbp_table);
1388 av_freep(&s->pred_dir_table);
1390 av_freep(&s->mbskip_table);
1392 av_freep(&s->er.error_status_table);
1393 av_freep(&s->er.er_temp_buffer);
1394 av_freep(&s->mb_index2xy);
1395 av_freep(&s->lambda_table);
1396 av_freep(&s->cplx_tab);
1397 av_freep(&s->bits_tab);
1399 s->linesize = s->uvlinesize = 0;
1402 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1406 if (s->slice_context_count > 1) {
1407 for (i = 0; i < s->slice_context_count; i++) {
1408 free_duplicate_context(s->thread_context[i]);
1410 for (i = 1; i < s->slice_context_count; i++) {
1411 av_freep(&s->thread_context[i]);
1414 free_duplicate_context(s);
1416 free_context_frame(s);
1419 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1420 s->picture[i].needs_realloc = 1;
1423 s->last_picture_ptr =
1424 s->next_picture_ptr =
1425 s->current_picture_ptr = NULL;
1428 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1429 s->mb_height = (s->height + 31) / 32 * 2;
1431 s->mb_height = (s->height + 15) / 16;
1433 if ((s->width || s->height) &&
1434 (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1437 if ((err = init_context_frame(s)))
1440 s->thread_context[0] = s;
1442 if (s->width && s->height) {
1443 int nb_slices = s->slice_context_count;
1444 if (nb_slices > 1) {
1445 for (i = 1; i < nb_slices; i++) {
1446 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1447 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1450 for (i = 0; i < nb_slices; i++) {
1451 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1453 s->thread_context[i]->start_mb_y =
1454 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1455 s->thread_context[i]->end_mb_y =
1456 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1459 if (init_duplicate_context(s) < 0)
1462 s->end_mb_y = s->mb_height;
1464 s->slice_context_count = nb_slices;
1469 ff_mpv_common_end(s);
1473 /* init common structure for both encoder and decoder */
1474 void ff_mpv_common_end(MpegEncContext *s)
1478 if (s->slice_context_count > 1) {
1479 for (i = 0; i < s->slice_context_count; i++) {
1480 free_duplicate_context(s->thread_context[i]);
1482 for (i = 1; i < s->slice_context_count; i++) {
1483 av_freep(&s->thread_context[i]);
1485 s->slice_context_count = 1;
1486 } else free_duplicate_context(s);
1488 av_freep(&s->parse_context.buffer);
1489 s->parse_context.buffer_size = 0;
1491 av_freep(&s->bitstream_buffer);
1492 s->allocated_bitstream_buffer_size = 0;
1495 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1496 ff_free_picture_tables(&s->picture[i]);
1497 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1498 av_frame_free(&s->picture[i].f);
1501 av_freep(&s->picture);
1502 ff_free_picture_tables(&s->last_picture);
1503 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1504 av_frame_free(&s->last_picture.f);
1505 ff_free_picture_tables(&s->current_picture);
1506 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1507 av_frame_free(&s->current_picture.f);
1508 ff_free_picture_tables(&s->next_picture);
1509 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1510 av_frame_free(&s->next_picture.f);
1511 ff_free_picture_tables(&s->new_picture);
1512 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1513 av_frame_free(&s->new_picture.f);
1515 free_context_frame(s);
1517 s->context_initialized = 0;
1518 s->last_picture_ptr =
1519 s->next_picture_ptr =
1520 s->current_picture_ptr = NULL;
1521 s->linesize = s->uvlinesize = 0;
1524 av_cold void ff_init_rl(RLTable *rl,
1525 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1527 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1528 uint8_t index_run[MAX_RUN + 1];
1529 int last, run, level, start, end, i;
1531 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1532 if (static_store && rl->max_level[0])
1535 /* compute max_level[], max_run[] and index_run[] */
1536 for (last = 0; last < 2; last++) {
1545 memset(max_level, 0, MAX_RUN + 1);
1546 memset(max_run, 0, MAX_LEVEL + 1);
1547 memset(index_run, rl->n, MAX_RUN + 1);
1548 for (i = start; i < end; i++) {
1549 run = rl->table_run[i];
1550 level = rl->table_level[i];
1551 if (index_run[run] == rl->n)
1553 if (level > max_level[run])
1554 max_level[run] = level;
1555 if (run > max_run[level])
1556 max_run[level] = run;
1559 rl->max_level[last] = static_store[last];
1561 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1562 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1564 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1566 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1567 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1569 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1571 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1572 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1576 av_cold void ff_init_vlc_rl(RLTable *rl)
1580 for (q = 0; q < 32; q++) {
1582 int qadd = (q - 1) | 1;
1588 for (i = 0; i < rl->vlc.table_size; i++) {
1589 int code = rl->vlc.table[i][0];
1590 int len = rl->vlc.table[i][1];
1593 if (len == 0) { // illegal code
1596 } else if (len < 0) { // more bits needed
1600 if (code == rl->n) { // esc
1604 run = rl->table_run[code] + 1;
1605 level = rl->table_level[code] * qmul + qadd;
1606 if (code >= rl->last) run += 192;
1609 rl->rl_vlc[q][i].len = len;
1610 rl->rl_vlc[q][i].level = level;
1611 rl->rl_vlc[q][i].run = run;
1616 static void release_unused_pictures(AVCodecContext *avctx, Picture *picture)
1620 /* release non reference frames */
1621 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1622 if (!picture[i].reference)
1623 ff_mpeg_unref_picture(avctx, &picture[i]);
1627 static inline int pic_is_unused(Picture *pic)
1629 if (!pic->f->buf[0])
1631 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1636 static int find_unused_picture(Picture *picture, int shared)
1641 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1642 if (!picture[i].f->buf[0])
1646 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1647 if (pic_is_unused(&picture[i]))
1652 return AVERROR_INVALIDDATA;
1655 int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
1657 int ret = find_unused_picture(picture, shared);
1659 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1660 if (picture[ret].needs_realloc) {
1661 picture[ret].needs_realloc = 0;
1662 ff_free_picture_tables(&picture[ret]);
1663 ff_mpeg_unref_picture(avctx, &picture[ret]);
1670 * generic function called after decoding
1671 * the header and before a frame is decoded.
1673 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1679 /* mark & release old frames */
1680 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1681 s->last_picture_ptr != s->next_picture_ptr &&
1682 s->last_picture_ptr->f->buf[0]) {
1683 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1686 /* release forgotten pictures */
1687 /* if (mpeg124/h263) */
1688 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1689 if (&s->picture[i] != s->last_picture_ptr &&
1690 &s->picture[i] != s->next_picture_ptr &&
1691 s->picture[i].reference && !s->picture[i].needs_realloc) {
1692 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1693 av_log(avctx, AV_LOG_ERROR,
1694 "releasing zombie picture\n");
1695 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1699 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1701 release_unused_pictures(s->avctx, s->picture);
1703 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1704 // we already have a unused image
1705 // (maybe it was set before reading the header)
1706 pic = s->current_picture_ptr;
1708 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1710 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1713 pic = &s->picture[i];
1717 if (!s->droppable) {
1718 if (s->pict_type != AV_PICTURE_TYPE_B)
1722 pic->f->coded_picture_number = s->coded_picture_number++;
1724 if (ff_alloc_picture(s, pic, 0) < 0)
1727 s->current_picture_ptr = pic;
1728 // FIXME use only the vars from current_pic
1729 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1730 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1731 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1732 if (s->picture_structure != PICT_FRAME)
1733 s->current_picture_ptr->f->top_field_first =
1734 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1736 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1737 !s->progressive_sequence;
1738 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1740 s->current_picture_ptr->f->pict_type = s->pict_type;
1741 // if (s->avctx->flags && CODEC_FLAG_QSCALE)
1742 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1743 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1745 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1746 s->current_picture_ptr)) < 0)
1749 if (s->pict_type != AV_PICTURE_TYPE_B) {
1750 s->last_picture_ptr = s->next_picture_ptr;
1752 s->next_picture_ptr = s->current_picture_ptr;
1754 ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1755 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1756 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1757 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1758 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1759 s->pict_type, s->droppable);
1761 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1762 (s->pict_type != AV_PICTURE_TYPE_I ||
1763 s->picture_structure != PICT_FRAME)) {
1764 int h_chroma_shift, v_chroma_shift;
1765 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1766 &h_chroma_shift, &v_chroma_shift);
1767 if (s->pict_type != AV_PICTURE_TYPE_I)
1768 av_log(avctx, AV_LOG_ERROR,
1769 "warning: first frame is no keyframe\n");
1770 else if (s->picture_structure != PICT_FRAME)
1771 av_log(avctx, AV_LOG_INFO,
1772 "allocate dummy last picture for field based first keyframe\n");
1774 /* Allocate a dummy frame */
1775 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1777 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1780 s->last_picture_ptr = &s->picture[i];
1782 s->last_picture_ptr->reference = 3;
1783 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1785 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1786 s->last_picture_ptr = NULL;
1790 memset(s->last_picture_ptr->f->data[0], 0,
1791 avctx->height * s->last_picture_ptr->f->linesize[0]);
1792 memset(s->last_picture_ptr->f->data[1], 0x80,
1793 (avctx->height >> v_chroma_shift) *
1794 s->last_picture_ptr->f->linesize[1]);
1795 memset(s->last_picture_ptr->f->data[2], 0x80,
1796 (avctx->height >> v_chroma_shift) *
1797 s->last_picture_ptr->f->linesize[2]);
1799 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1800 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1802 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1803 s->pict_type == AV_PICTURE_TYPE_B) {
1804 /* Allocate a dummy frame */
1805 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1807 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1810 s->next_picture_ptr = &s->picture[i];
1812 s->next_picture_ptr->reference = 3;
1813 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1815 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1816 s->next_picture_ptr = NULL;
1819 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1820 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1823 if (s->last_picture_ptr) {
1824 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1825 if (s->last_picture_ptr->f->buf[0] &&
1826 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1827 s->last_picture_ptr)) < 0)
1830 if (s->next_picture_ptr) {
1831 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1832 if (s->next_picture_ptr->f->buf[0] &&
1833 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1834 s->next_picture_ptr)) < 0)
1838 if (s->pict_type != AV_PICTURE_TYPE_I &&
1839 !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1840 av_log(s, AV_LOG_ERROR,
1841 "Non-reference picture received and no reference available\n");
1842 return AVERROR_INVALIDDATA;
1845 if (s->picture_structure!= PICT_FRAME) {
1847 for (i = 0; i < 4; i++) {
1848 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1849 s->current_picture.f->data[i] +=
1850 s->current_picture.f->linesize[i];
1852 s->current_picture.f->linesize[i] *= 2;
1853 s->last_picture.f->linesize[i] *= 2;
1854 s->next_picture.f->linesize[i] *= 2;
1858 /* set dequantizer, we can't do it during init as
1859 * it might change for mpeg4 and we can't do it in the header
1860 * decode as init is not called for mpeg4 there yet */
1861 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1862 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1863 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1864 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1865 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1866 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1868 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1869 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1873 FF_DISABLE_DEPRECATION_WARNINGS
1874 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1875 return ff_xvmc_field_start(s, avctx);
1876 FF_ENABLE_DEPRECATION_WARNINGS
1877 #endif /* FF_API_XVMC */
1882 /* called after a frame has been decoded. */
1883 void ff_mpv_frame_end(MpegEncContext *s)
1886 FF_DISABLE_DEPRECATION_WARNINGS
1887 /* redraw edges for the frame if decoding didn't complete */
1888 // just to make sure that all data is rendered.
1889 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1890 ff_xvmc_field_end(s);
1892 FF_ENABLE_DEPRECATION_WARNINGS
1893 #endif /* FF_API_XVMC */
1897 if (s->current_picture.reference)
1898 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1902 * Print debugging info for the given picture.
1904 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1907 if (s->avctx->hwaccel || !p || !p->mb_type)
1911 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1914 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1915 switch (pict->pict_type) {
1916 case AV_PICTURE_TYPE_I:
1917 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1919 case AV_PICTURE_TYPE_P:
1920 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1922 case AV_PICTURE_TYPE_B:
1923 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1925 case AV_PICTURE_TYPE_S:
1926 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1928 case AV_PICTURE_TYPE_SI:
1929 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1931 case AV_PICTURE_TYPE_SP:
1932 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1935 for (y = 0; y < s->mb_height; y++) {
1936 for (x = 0; x < s->mb_width; x++) {
1937 if (s->avctx->debug & FF_DEBUG_SKIP) {
1938 int count = s->mbskip_table[x + y * s->mb_stride];
1941 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1943 if (s->avctx->debug & FF_DEBUG_QP) {
1944 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1945 p->qscale_table[x + y * s->mb_stride]);
1947 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1948 int mb_type = p->mb_type[x + y * s->mb_stride];
1949 // Type & MV direction
1950 if (IS_PCM(mb_type))
1951 av_log(s->avctx, AV_LOG_DEBUG, "P");
1952 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1953 av_log(s->avctx, AV_LOG_DEBUG, "A");
1954 else if (IS_INTRA4x4(mb_type))
1955 av_log(s->avctx, AV_LOG_DEBUG, "i");
1956 else if (IS_INTRA16x16(mb_type))
1957 av_log(s->avctx, AV_LOG_DEBUG, "I");
1958 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1959 av_log(s->avctx, AV_LOG_DEBUG, "d");
1960 else if (IS_DIRECT(mb_type))
1961 av_log(s->avctx, AV_LOG_DEBUG, "D");
1962 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1963 av_log(s->avctx, AV_LOG_DEBUG, "g");
1964 else if (IS_GMC(mb_type))
1965 av_log(s->avctx, AV_LOG_DEBUG, "G");
1966 else if (IS_SKIP(mb_type))
1967 av_log(s->avctx, AV_LOG_DEBUG, "S");
1968 else if (!USES_LIST(mb_type, 1))
1969 av_log(s->avctx, AV_LOG_DEBUG, ">");
1970 else if (!USES_LIST(mb_type, 0))
1971 av_log(s->avctx, AV_LOG_DEBUG, "<");
1973 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1974 av_log(s->avctx, AV_LOG_DEBUG, "X");
1978 if (IS_8X8(mb_type))
1979 av_log(s->avctx, AV_LOG_DEBUG, "+");
1980 else if (IS_16X8(mb_type))
1981 av_log(s->avctx, AV_LOG_DEBUG, "-");
1982 else if (IS_8X16(mb_type))
1983 av_log(s->avctx, AV_LOG_DEBUG, "|");
1984 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1985 av_log(s->avctx, AV_LOG_DEBUG, " ");
1987 av_log(s->avctx, AV_LOG_DEBUG, "?");
1990 if (IS_INTERLACED(mb_type))
1991 av_log(s->avctx, AV_LOG_DEBUG, "=");
1993 av_log(s->avctx, AV_LOG_DEBUG, " ");
1996 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2002 * find the lowest MB row referenced in the MVs
2004 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2006 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2007 int my, off, i, mvs;
2009 if (s->picture_structure != PICT_FRAME || s->mcsel)
2012 switch (s->mv_type) {
2026 for (i = 0; i < mvs; i++) {
2027 my = s->mv[dir][i][1]<<qpel_shift;
2028 my_max = FFMAX(my_max, my);
2029 my_min = FFMIN(my_min, my);
2032 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2034 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2036 return s->mb_height-1;
2039 /* put block[] to dest[] */
2040 static inline void put_dct(MpegEncContext *s,
2041 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2043 s->dct_unquantize_intra(s, block, i, qscale);
2044 s->idsp.idct_put(dest, line_size, block);
2047 /* add block[] to dest[] */
2048 static inline void add_dct(MpegEncContext *s,
2049 int16_t *block, int i, uint8_t *dest, int line_size)
2051 if (s->block_last_index[i] >= 0) {
2052 s->idsp.idct_add(dest, line_size, block);
2056 static inline void add_dequant_dct(MpegEncContext *s,
2057 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2059 if (s->block_last_index[i] >= 0) {
2060 s->dct_unquantize_inter(s, block, i, qscale);
2062 s->idsp.idct_add(dest, line_size, block);
2067 * Clean dc, ac, coded_block for the current non-intra MB.
2069 void ff_clean_intra_table_entries(MpegEncContext *s)
2071 int wrap = s->b8_stride;
2072 int xy = s->block_index[0];
2075 s->dc_val[0][xy + 1 ] =
2076 s->dc_val[0][xy + wrap] =
2077 s->dc_val[0][xy + 1 + wrap] = 1024;
2079 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2080 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2081 if (s->msmpeg4_version>=3) {
2082 s->coded_block[xy ] =
2083 s->coded_block[xy + 1 ] =
2084 s->coded_block[xy + wrap] =
2085 s->coded_block[xy + 1 + wrap] = 0;
2088 wrap = s->mb_stride;
2089 xy = s->mb_x + s->mb_y * wrap;
2091 s->dc_val[2][xy] = 1024;
2093 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2094 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2096 s->mbintra_table[xy]= 0;
2099 /* generic function called after a macroblock has been parsed by the
2100 decoder or after it has been encoded by the encoder.
2102 Important variables used:
2103 s->mb_intra : true if intra macroblock
2104 s->mv_dir : motion vector direction
2105 s->mv_type : motion vector type
2106 s->mv : motion vector
2107 s->interlaced_dct : true if interlaced dct used (mpeg2)
2109 static av_always_inline
2110 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2113 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2116 FF_DISABLE_DEPRECATION_WARNINGS
2117 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2118 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2121 FF_ENABLE_DEPRECATION_WARNINGS
2122 #endif /* FF_API_XVMC */
2124 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2125 /* print DCT coefficients */
2127 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2129 for(j=0; j<64; j++){
2130 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2131 block[i][s->idsp.idct_permutation[j]]);
2133 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2137 s->current_picture.qscale_table[mb_xy] = s->qscale;
2139 /* update DC predictors for P macroblocks */
2141 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2142 if(s->mbintra_table[mb_xy])
2143 ff_clean_intra_table_entries(s);
2147 s->last_dc[2] = 128 << s->intra_dc_precision;
2150 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2151 s->mbintra_table[mb_xy]=1;
2153 if ((s->avctx->flags & CODEC_FLAG_PSNR) ||
2154 !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
2155 s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
2156 uint8_t *dest_y, *dest_cb, *dest_cr;
2157 int dct_linesize, dct_offset;
2158 op_pixels_func (*op_pix)[4];
2159 qpel_mc_func (*op_qpix)[16];
2160 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2161 const int uvlinesize = s->current_picture.f->linesize[1];
2162 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2163 const int block_size = 8;
2165 /* avoid copy if macroblock skipped in last frame too */
2166 /* skip only during decoding as we might trash the buffers during encoding a bit */
2168 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2170 if (s->mb_skipped) {
2172 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2174 } else if(!s->current_picture.reference) {
2177 *mbskip_ptr = 0; /* not skipped */
2181 dct_linesize = linesize << s->interlaced_dct;
2182 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2186 dest_cb= s->dest[1];
2187 dest_cr= s->dest[2];
2189 dest_y = s->b_scratchpad;
2190 dest_cb= s->b_scratchpad+16*linesize;
2191 dest_cr= s->b_scratchpad+32*linesize;
2195 /* motion handling */
2196 /* decoding or more than one mb_type (MC was already done otherwise) */
2199 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2200 if (s->mv_dir & MV_DIR_FORWARD) {
2201 ff_thread_await_progress(&s->last_picture_ptr->tf,
2202 ff_mpv_lowest_referenced_row(s, 0),
2205 if (s->mv_dir & MV_DIR_BACKWARD) {
2206 ff_thread_await_progress(&s->next_picture_ptr->tf,
2207 ff_mpv_lowest_referenced_row(s, 1),
2212 op_qpix= s->me.qpel_put;
2213 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2214 op_pix = s->hdsp.put_pixels_tab;
2216 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2218 if (s->mv_dir & MV_DIR_FORWARD) {
2219 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2220 op_pix = s->hdsp.avg_pixels_tab;
2221 op_qpix= s->me.qpel_avg;
2223 if (s->mv_dir & MV_DIR_BACKWARD) {
2224 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2228 /* skip dequant / idct if we are really late ;) */
2229 if(s->avctx->skip_idct){
2230 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2231 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2232 || s->avctx->skip_idct >= AVDISCARD_ALL)
2236 /* add dct residue */
2237 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2238 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2239 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2240 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2241 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2242 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2244 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2245 if (s->chroma_y_shift){
2246 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2247 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2251 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2252 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2253 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2254 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2257 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2258 add_dct(s, block[0], 0, dest_y , dct_linesize);
2259 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2260 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2261 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2263 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2264 if(s->chroma_y_shift){//Chroma420
2265 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2266 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2269 dct_linesize = uvlinesize << s->interlaced_dct;
2270 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2272 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2273 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2274 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2275 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2276 if(!s->chroma_x_shift){//Chroma444
2277 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2278 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2279 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2280 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2285 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2286 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2289 /* dct only in intra block */
2290 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2291 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2292 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2293 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2294 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2296 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2297 if(s->chroma_y_shift){
2298 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2299 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2303 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2304 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2305 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2306 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2310 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2311 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2312 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2313 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2315 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2316 if(s->chroma_y_shift){
2317 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2318 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2321 dct_linesize = uvlinesize << s->interlaced_dct;
2322 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2324 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2325 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2326 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2327 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2328 if(!s->chroma_x_shift){//Chroma444
2329 s->idsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2330 s->idsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2331 s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2332 s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2340 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2341 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2342 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2347 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
2350 if(s->out_format == FMT_MPEG1) {
2351 mpv_decode_mb_internal(s, block, 1);
2354 mpv_decode_mb_internal(s, block, 0);
2357 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2359 ff_draw_horiz_band(s->avctx, s->current_picture.f,
2360 s->last_picture.f, y, h, s->picture_structure,
2361 s->first_field, s->low_delay);
2364 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2365 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2366 const int uvlinesize = s->current_picture.f->linesize[1];
2367 const int mb_size= 4;
2369 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2370 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2371 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2372 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2373 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2374 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2375 //block_index is not used by mpeg2, so it is not affected by chroma_format
2377 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
2378 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2379 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2381 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2383 if(s->picture_structure==PICT_FRAME){
2384 s->dest[0] += s->mb_y * linesize << mb_size;
2385 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2386 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2388 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2389 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2390 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2391 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2397 * Permute an 8x8 block.
2398 * @param block the block which will be permuted according to the given permutation vector
2399 * @param permutation the permutation vector
2400 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2401 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2402 * (inverse) permutated to scantable order!
2404 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2410 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2412 for(i=0; i<=last; i++){
2413 const int j= scantable[i];
2418 for(i=0; i<=last; i++){
2419 const int j= scantable[i];
2420 const int perm_j= permutation[j];
2421 block[perm_j]= temp[j];
2425 void ff_mpeg_flush(AVCodecContext *avctx){
2427 MpegEncContext *s = avctx->priv_data;
2429 if (!s || !s->picture)
2432 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2433 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2434 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2436 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
2437 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
2438 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
2440 s->mb_x= s->mb_y= 0;
2442 s->parse_context.state= -1;
2443 s->parse_context.frame_start_found= 0;
2444 s->parse_context.overread= 0;
2445 s->parse_context.overread_index= 0;
2446 s->parse_context.index= 0;
2447 s->parse_context.last_index= 0;
2448 s->bitstream_buffer_size=0;
2453 * set qscale and update qscale dependent variables.
2455 void ff_set_qscale(MpegEncContext * s, int qscale)
2459 else if (qscale > 31)
2463 s->chroma_qscale= s->chroma_qscale_table[qscale];
2465 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2466 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2469 void ff_mpv_report_decode_progress(MpegEncContext *s)
2471 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2472 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);