2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
45 #include "xvmc_internal.h"
49 static const uint8_t ff_default_chroma_qscale_table[32] = {
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
55 const uint8_t ff_mpeg1_dc_scale_table[128] = {
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const uint8_t ff_alternate_horizontal_scan[64] = {
111 0, 1, 2, 3, 8, 9, 16, 17,
112 10, 11, 4, 5, 6, 7, 15, 14,
113 13, 12, 19, 18, 24, 25, 32, 33,
114 26, 27, 20, 21, 22, 23, 28, 29,
115 30, 31, 34, 35, 40, 41, 48, 49,
116 42, 43, 36, 37, 38, 39, 44, 45,
117 46, 47, 50, 51, 56, 57, 58, 59,
118 52, 53, 54, 55, 60, 61, 62, 63,
121 const uint8_t ff_alternate_vertical_scan[64] = {
122 0, 8, 16, 24, 1, 9, 2, 10,
123 17, 25, 32, 40, 48, 56, 57, 49,
124 41, 33, 26, 18, 3, 11, 4, 12,
125 19, 27, 34, 42, 50, 58, 35, 43,
126 51, 59, 20, 28, 5, 13, 6, 14,
127 21, 29, 36, 44, 52, 60, 37, 45,
128 53, 61, 22, 30, 7, 15, 23, 31,
129 38, 46, 54, 62, 39, 47, 55, 63,
132 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133 int16_t *block, int n, int qscale)
135 int i, level, nCoeffs;
136 const uint16_t *quant_matrix;
138 nCoeffs= s->block_last_index[n];
141 block[0] = block[0] * s->y_dc_scale;
143 block[0] = block[0] * s->c_dc_scale;
144 /* XXX: only mpeg1 */
145 quant_matrix = s->intra_matrix;
146 for(i=1;i<=nCoeffs;i++) {
147 int j= s->intra_scantable.permutated[i];
152 level = (int)(level * qscale * quant_matrix[j]) >> 3;
153 level = (level - 1) | 1;
156 level = (int)(level * qscale * quant_matrix[j]) >> 3;
157 level = (level - 1) | 1;
164 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
165 int16_t *block, int n, int qscale)
167 int i, level, nCoeffs;
168 const uint16_t *quant_matrix;
170 nCoeffs= s->block_last_index[n];
172 quant_matrix = s->inter_matrix;
173 for(i=0; i<=nCoeffs; i++) {
174 int j= s->intra_scantable.permutated[i];
179 level = (((level << 1) + 1) * qscale *
180 ((int) (quant_matrix[j]))) >> 4;
181 level = (level - 1) | 1;
184 level = (((level << 1) + 1) * qscale *
185 ((int) (quant_matrix[j]))) >> 4;
186 level = (level - 1) | 1;
193 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
194 int16_t *block, int n, int qscale)
196 int i, level, nCoeffs;
197 const uint16_t *quant_matrix;
199 if(s->alternate_scan) nCoeffs= 63;
200 else nCoeffs= s->block_last_index[n];
203 block[0] = block[0] * s->y_dc_scale;
205 block[0] = block[0] * s->c_dc_scale;
206 quant_matrix = s->intra_matrix;
207 for(i=1;i<=nCoeffs;i++) {
208 int j= s->intra_scantable.permutated[i];
213 level = (int)(level * qscale * quant_matrix[j]) >> 3;
216 level = (int)(level * qscale * quant_matrix[j]) >> 3;
223 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
224 int16_t *block, int n, int qscale)
226 int i, level, nCoeffs;
227 const uint16_t *quant_matrix;
230 if(s->alternate_scan) nCoeffs= 63;
231 else nCoeffs= s->block_last_index[n];
234 block[0] = block[0] * s->y_dc_scale;
236 block[0] = block[0] * s->c_dc_scale;
237 quant_matrix = s->intra_matrix;
238 for(i=1;i<=nCoeffs;i++) {
239 int j= s->intra_scantable.permutated[i];
244 level = (int)(level * qscale * quant_matrix[j]) >> 3;
247 level = (int)(level * qscale * quant_matrix[j]) >> 3;
256 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
257 int16_t *block, int n, int qscale)
259 int i, level, nCoeffs;
260 const uint16_t *quant_matrix;
263 if(s->alternate_scan) nCoeffs= 63;
264 else nCoeffs= s->block_last_index[n];
266 quant_matrix = s->inter_matrix;
267 for(i=0; i<=nCoeffs; i++) {
268 int j= s->intra_scantable.permutated[i];
273 level = (((level << 1) + 1) * qscale *
274 ((int) (quant_matrix[j]))) >> 4;
277 level = (((level << 1) + 1) * qscale *
278 ((int) (quant_matrix[j]))) >> 4;
287 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
288 int16_t *block, int n, int qscale)
290 int i, level, qmul, qadd;
293 assert(s->block_last_index[n]>=0);
299 block[0] = block[0] * s->y_dc_scale;
301 block[0] = block[0] * s->c_dc_scale;
302 qadd = (qscale - 1) | 1;
309 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
311 for(i=1; i<=nCoeffs; i++) {
315 level = level * qmul - qadd;
317 level = level * qmul + qadd;
324 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
325 int16_t *block, int n, int qscale)
327 int i, level, qmul, qadd;
330 assert(s->block_last_index[n]>=0);
332 qadd = (qscale - 1) | 1;
335 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
337 for(i=0; i<=nCoeffs; i++) {
341 level = level * qmul - qadd;
343 level = level * qmul + qadd;
350 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
352 int mb_x, int mb_y, int mb_intra, int mb_skipped)
354 MpegEncContext *s = opaque;
357 s->mv_type = mv_type;
358 s->mb_intra = mb_intra;
359 s->mb_skipped = mb_skipped;
362 memcpy(s->mv, mv, sizeof(*mv));
364 ff_init_block_index(s);
365 ff_update_block_index(s);
367 s->bdsp.clear_blocks(s->block[0]);
369 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
370 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
371 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
374 av_log(s->avctx, AV_LOG_DEBUG,
375 "Interlaced error concealment is not fully implemented\n");
376 ff_mpv_decode_mb(s, s->block);
379 /* init common dct for both encoder and decoder */
380 static av_cold int dct_init(MpegEncContext *s)
382 ff_blockdsp_init(&s->bdsp, s->avctx);
383 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
384 ff_me_cmp_init(&s->mecc, s->avctx);
385 ff_mpegvideodsp_init(&s->mdsp);
386 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
388 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
389 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
390 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
391 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
392 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
393 if (s->flags & CODEC_FLAG_BITEXACT)
394 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
395 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
397 if (HAVE_INTRINSICS_NEON)
398 ff_mpv_common_init_neon(s);
401 ff_mpv_common_init_arm(s);
403 ff_mpv_common_init_ppc(s);
405 ff_mpv_common_init_x86(s);
410 av_cold void ff_mpv_idct_init(MpegEncContext *s)
412 ff_idctdsp_init(&s->idsp, s->avctx);
414 /* load & permutate scantables
415 * note: only wmv uses different ones
417 if (s->alternate_scan) {
418 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
419 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
421 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
422 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
424 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
425 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
428 static int frame_size_alloc(MpegEncContext *s, int linesize)
430 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
432 // edge emu needs blocksize + filter length - 1
433 // (= 17x17 for halfpel / 21x21 for h264)
434 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
435 // at uvlinesize. It supports only YUV420 so 24x24 is enough
436 // linesize * interlaced * MBsize
437 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
440 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
442 s->me.temp = s->me.scratchpad;
443 s->rd_scratchpad = s->me.scratchpad;
444 s->b_scratchpad = s->me.scratchpad;
445 s->obmc_scratchpad = s->me.scratchpad + 16;
449 av_freep(&s->edge_emu_buffer);
450 return AVERROR(ENOMEM);
454 * Allocate a frame buffer
456 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
458 int edges_needed = av_codec_is_encoder(s->avctx->codec);
462 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
463 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
464 s->codec_id != AV_CODEC_ID_MSS2) {
466 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
467 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
470 r = ff_thread_get_buffer(s->avctx, &pic->tf,
471 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
473 pic->f->width = s->avctx->width;
474 pic->f->height = s->avctx->height;
475 pic->f->format = s->avctx->pix_fmt;
476 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
479 if (r < 0 || !pic->f->buf[0]) {
480 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
487 for (i = 0; pic->f->data[i]; i++) {
488 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
489 pic->f->linesize[i] +
490 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
491 pic->f->data[i] += offset;
493 pic->f->width = s->avctx->width;
494 pic->f->height = s->avctx->height;
497 if (s->avctx->hwaccel) {
498 assert(!pic->hwaccel_picture_private);
499 if (s->avctx->hwaccel->frame_priv_data_size) {
500 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
501 if (!pic->hwaccel_priv_buf) {
502 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
505 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
509 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
510 s->uvlinesize != pic->f->linesize[1])) {
511 av_log(s->avctx, AV_LOG_ERROR,
512 "get_buffer() failed (stride changed)\n");
513 ff_mpeg_unref_picture(s, pic);
517 if (pic->f->linesize[1] != pic->f->linesize[2]) {
518 av_log(s->avctx, AV_LOG_ERROR,
519 "get_buffer() failed (uv stride mismatch)\n");
520 ff_mpeg_unref_picture(s, pic);
524 if (!s->edge_emu_buffer &&
525 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
526 av_log(s->avctx, AV_LOG_ERROR,
527 "get_buffer() failed to allocate context scratch buffers.\n");
528 ff_mpeg_unref_picture(s, pic);
535 void ff_free_picture_tables(Picture *pic)
539 av_buffer_unref(&pic->mb_var_buf);
540 av_buffer_unref(&pic->mc_mb_var_buf);
541 av_buffer_unref(&pic->mb_mean_buf);
542 av_buffer_unref(&pic->mbskip_table_buf);
543 av_buffer_unref(&pic->qscale_table_buf);
544 av_buffer_unref(&pic->mb_type_buf);
546 for (i = 0; i < 2; i++) {
547 av_buffer_unref(&pic->motion_val_buf[i]);
548 av_buffer_unref(&pic->ref_index_buf[i]);
552 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
554 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
555 const int mb_array_size = s->mb_stride * s->mb_height;
556 const int b8_array_size = s->b8_stride * s->mb_height * 2;
560 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
561 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
562 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
564 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
565 return AVERROR(ENOMEM);
568 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
569 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
570 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
571 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
572 return AVERROR(ENOMEM);
575 if (s->out_format == FMT_H263 || s->encoding) {
576 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
577 int ref_index_size = 4 * mb_array_size;
579 for (i = 0; mv_size && i < 2; i++) {
580 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
581 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
582 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
583 return AVERROR(ENOMEM);
590 static int make_tables_writable(Picture *pic)
593 #define MAKE_WRITABLE(table) \
596 (ret = av_buffer_make_writable(&pic->table)) < 0)\
600 MAKE_WRITABLE(mb_var_buf);
601 MAKE_WRITABLE(mc_mb_var_buf);
602 MAKE_WRITABLE(mb_mean_buf);
603 MAKE_WRITABLE(mbskip_table_buf);
604 MAKE_WRITABLE(qscale_table_buf);
605 MAKE_WRITABLE(mb_type_buf);
607 for (i = 0; i < 2; i++) {
608 MAKE_WRITABLE(motion_val_buf[i]);
609 MAKE_WRITABLE(ref_index_buf[i]);
616 * Allocate a Picture.
617 * The pixels are allocated/set by calling get_buffer() if shared = 0
619 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
624 assert(pic->f->data[0]);
627 assert(!pic->f->buf[0]);
629 if (alloc_frame_buffer(s, pic) < 0)
632 s->linesize = pic->f->linesize[0];
633 s->uvlinesize = pic->f->linesize[1];
636 if (!pic->qscale_table_buf)
637 ret = alloc_picture_tables(s, pic);
639 ret = make_tables_writable(pic);
644 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
645 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
646 pic->mb_mean = pic->mb_mean_buf->data;
649 pic->mbskip_table = pic->mbskip_table_buf->data;
650 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
651 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
653 if (pic->motion_val_buf[0]) {
654 for (i = 0; i < 2; i++) {
655 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
656 pic->ref_index[i] = pic->ref_index_buf[i]->data;
662 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
663 ff_mpeg_unref_picture(s, pic);
664 ff_free_picture_tables(pic);
665 return AVERROR(ENOMEM);
669 * Deallocate a picture.
671 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
673 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
676 /* WM Image / Screen codecs allocate internal buffers with different
677 * dimensions / colorspaces; ignore user-defined callbacks for these. */
678 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
679 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
680 s->codec_id != AV_CODEC_ID_MSS2)
681 ff_thread_release_buffer(s->avctx, &pic->tf);
683 av_frame_unref(pic->f);
685 av_buffer_unref(&pic->hwaccel_priv_buf);
687 if (pic->needs_realloc)
688 ff_free_picture_tables(pic);
690 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
693 static int update_picture_tables(Picture *dst, Picture *src)
697 #define UPDATE_TABLE(table)\
700 (!dst->table || dst->table->buffer != src->table->buffer)) {\
701 av_buffer_unref(&dst->table);\
702 dst->table = av_buffer_ref(src->table);\
704 ff_free_picture_tables(dst);\
705 return AVERROR(ENOMEM);\
710 UPDATE_TABLE(mb_var_buf);
711 UPDATE_TABLE(mc_mb_var_buf);
712 UPDATE_TABLE(mb_mean_buf);
713 UPDATE_TABLE(mbskip_table_buf);
714 UPDATE_TABLE(qscale_table_buf);
715 UPDATE_TABLE(mb_type_buf);
716 for (i = 0; i < 2; i++) {
717 UPDATE_TABLE(motion_val_buf[i]);
718 UPDATE_TABLE(ref_index_buf[i]);
721 dst->mb_var = src->mb_var;
722 dst->mc_mb_var = src->mc_mb_var;
723 dst->mb_mean = src->mb_mean;
724 dst->mbskip_table = src->mbskip_table;
725 dst->qscale_table = src->qscale_table;
726 dst->mb_type = src->mb_type;
727 for (i = 0; i < 2; i++) {
728 dst->motion_val[i] = src->motion_val[i];
729 dst->ref_index[i] = src->ref_index[i];
735 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
739 av_assert0(!dst->f->buf[0]);
740 av_assert0(src->f->buf[0]);
744 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
748 ret = update_picture_tables(dst, src);
752 if (src->hwaccel_picture_private) {
753 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
754 if (!dst->hwaccel_priv_buf)
756 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
759 dst->field_picture = src->field_picture;
760 dst->mb_var_sum = src->mb_var_sum;
761 dst->mc_mb_var_sum = src->mc_mb_var_sum;
762 dst->b_frame_score = src->b_frame_score;
763 dst->needs_realloc = src->needs_realloc;
764 dst->reference = src->reference;
765 dst->shared = src->shared;
769 ff_mpeg_unref_picture(s, dst);
773 static void exchange_uv(MpegEncContext *s)
778 s->pblocks[4] = s->pblocks[5];
782 static int init_duplicate_context(MpegEncContext *s)
784 int y_size = s->b8_stride * (2 * s->mb_height + 1);
785 int c_size = s->mb_stride * (s->mb_height + 1);
786 int yc_size = y_size + 2 * c_size;
794 s->obmc_scratchpad = NULL;
797 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
798 ME_MAP_SIZE * sizeof(uint32_t), fail)
799 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
800 ME_MAP_SIZE * sizeof(uint32_t), fail)
801 if (s->avctx->noise_reduction) {
802 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
803 2 * 64 * sizeof(int), fail)
806 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
807 s->block = s->blocks[0];
809 for (i = 0; i < 12; i++) {
810 s->pblocks[i] = &s->block[i];
812 if (s->avctx->codec_tag == AV_RL32("VCR2"))
815 if (s->out_format == FMT_H263) {
817 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
818 yc_size * sizeof(int16_t) * 16, fail);
819 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
820 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
821 s->ac_val[2] = s->ac_val[1] + c_size;
826 return -1; // free() through ff_mpv_common_end()
829 static void free_duplicate_context(MpegEncContext *s)
834 av_freep(&s->edge_emu_buffer);
835 av_freep(&s->me.scratchpad);
839 s->obmc_scratchpad = NULL;
841 av_freep(&s->dct_error_sum);
842 av_freep(&s->me.map);
843 av_freep(&s->me.score_map);
844 av_freep(&s->blocks);
845 av_freep(&s->ac_val_base);
849 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
851 #define COPY(a) bak->a = src->a
852 COPY(edge_emu_buffer);
857 COPY(obmc_scratchpad);
864 COPY(me.map_generation);
876 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
880 // FIXME copy only needed parts
882 backup_duplicate_context(&bak, dst);
883 memcpy(dst, src, sizeof(MpegEncContext));
884 backup_duplicate_context(dst, &bak);
885 for (i = 0; i < 12; i++) {
886 dst->pblocks[i] = &dst->block[i];
888 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
890 if (!dst->edge_emu_buffer &&
891 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
892 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
893 "scratch buffers.\n");
896 // STOP_TIMER("update_duplicate_context")
897 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
901 int ff_mpeg_update_thread_context(AVCodecContext *dst,
902 const AVCodecContext *src)
905 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
907 if (dst == src || !s1->context_initialized)
910 // FIXME can parameters change on I-frames?
911 // in that case dst may need a reinit
912 if (!s->context_initialized) {
914 memcpy(s, s1, sizeof(MpegEncContext));
917 s->bitstream_buffer = NULL;
918 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
921 if ((err = ff_mpv_common_init(s)) < 0)
925 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
927 s->context_reinit = 0;
928 s->height = s1->height;
929 s->width = s1->width;
930 if ((err = ff_mpv_common_frame_size_change(s)) < 0)
934 s->avctx->coded_height = s1->avctx->coded_height;
935 s->avctx->coded_width = s1->avctx->coded_width;
936 s->avctx->width = s1->avctx->width;
937 s->avctx->height = s1->avctx->height;
939 s->coded_picture_number = s1->coded_picture_number;
940 s->picture_number = s1->picture_number;
942 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
943 ff_mpeg_unref_picture(s, &s->picture[i]);
944 if (s1->picture[i].f->buf[0] &&
945 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
949 #define UPDATE_PICTURE(pic)\
951 ff_mpeg_unref_picture(s, &s->pic);\
952 if (s1->pic.f->buf[0])\
953 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
955 ret = update_picture_tables(&s->pic, &s1->pic);\
960 UPDATE_PICTURE(current_picture);
961 UPDATE_PICTURE(last_picture);
962 UPDATE_PICTURE(next_picture);
964 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
965 ((pic && pic >= old_ctx->picture && \
966 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
967 &new_ctx->picture[pic - old_ctx->picture] : NULL)
969 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
970 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
971 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
973 // Error/bug resilience
974 s->next_p_frame_damaged = s1->next_p_frame_damaged;
975 s->workaround_bugs = s1->workaround_bugs;
978 memcpy(&s->last_time_base, &s1->last_time_base,
979 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
980 (char *) &s1->last_time_base);
983 s->max_b_frames = s1->max_b_frames;
984 s->low_delay = s1->low_delay;
985 s->droppable = s1->droppable;
987 // DivX handling (doesn't work)
988 s->divx_packed = s1->divx_packed;
990 if (s1->bitstream_buffer) {
991 if (s1->bitstream_buffer_size +
992 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
993 av_fast_malloc(&s->bitstream_buffer,
994 &s->allocated_bitstream_buffer_size,
995 s1->allocated_bitstream_buffer_size);
996 s->bitstream_buffer_size = s1->bitstream_buffer_size;
997 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
998 s1->bitstream_buffer_size);
999 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1000 FF_INPUT_BUFFER_PADDING_SIZE);
1003 // linesize dependend scratch buffer allocation
1004 if (!s->edge_emu_buffer)
1006 if (frame_size_alloc(s, s1->linesize) < 0) {
1007 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1008 "scratch buffers.\n");
1009 return AVERROR(ENOMEM);
1012 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1013 "be allocated due to unknown size.\n");
1017 // MPEG2/interlacing info
1018 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1019 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1021 if (!s1->first_field) {
1022 s->last_pict_type = s1->pict_type;
1023 if (s1->current_picture_ptr)
1024 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1031 * Set the given MpegEncContext to common defaults
1032 * (same for encoding and decoding).
1033 * The changed fields will not depend upon the
1034 * prior state of the MpegEncContext.
1036 void ff_mpv_common_defaults(MpegEncContext *s)
1038 s->y_dc_scale_table =
1039 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1040 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1041 s->progressive_frame = 1;
1042 s->progressive_sequence = 1;
1043 s->picture_structure = PICT_FRAME;
1045 s->coded_picture_number = 0;
1046 s->picture_number = 0;
1051 s->slice_context_count = 1;
1055 * Set the given MpegEncContext to defaults for decoding.
1056 * the changed fields will not depend upon
1057 * the prior state of the MpegEncContext.
1059 void ff_mpv_decode_defaults(MpegEncContext *s)
1061 ff_mpv_common_defaults(s);
1064 static int init_er(MpegEncContext *s)
1066 ERContext *er = &s->er;
1067 int mb_array_size = s->mb_height * s->mb_stride;
1070 er->avctx = s->avctx;
1071 er->mecc = &s->mecc;
1073 er->mb_index2xy = s->mb_index2xy;
1074 er->mb_num = s->mb_num;
1075 er->mb_width = s->mb_width;
1076 er->mb_height = s->mb_height;
1077 er->mb_stride = s->mb_stride;
1078 er->b8_stride = s->b8_stride;
1080 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1081 er->error_status_table = av_mallocz(mb_array_size);
1082 if (!er->er_temp_buffer || !er->error_status_table)
1085 er->mbskip_table = s->mbskip_table;
1086 er->mbintra_table = s->mbintra_table;
1088 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1089 er->dc_val[i] = s->dc_val[i];
1091 er->decode_mb = mpeg_er_decode_mb;
1096 av_freep(&er->er_temp_buffer);
1097 av_freep(&er->error_status_table);
1098 return AVERROR(ENOMEM);
1102 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1104 static int init_context_frame(MpegEncContext *s)
1106 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1108 s->mb_width = (s->width + 15) / 16;
1109 s->mb_stride = s->mb_width + 1;
1110 s->b8_stride = s->mb_width * 2 + 1;
1111 mb_array_size = s->mb_height * s->mb_stride;
1112 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1114 /* set default edge pos, will be overriden
1115 * in decode_header if needed */
1116 s->h_edge_pos = s->mb_width * 16;
1117 s->v_edge_pos = s->mb_height * 16;
1119 s->mb_num = s->mb_width * s->mb_height;
1124 s->block_wrap[3] = s->b8_stride;
1126 s->block_wrap[5] = s->mb_stride;
1128 y_size = s->b8_stride * (2 * s->mb_height + 1);
1129 c_size = s->mb_stride * (s->mb_height + 1);
1130 yc_size = y_size + 2 * c_size;
1132 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
1133 fail); // error ressilience code looks cleaner with this
1134 for (y = 0; y < s->mb_height; y++)
1135 for (x = 0; x < s->mb_width; x++)
1136 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1138 s->mb_index2xy[s->mb_height * s->mb_width] =
1139 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1142 /* Allocate MV tables */
1143 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
1144 mv_table_size * 2 * sizeof(int16_t), fail);
1145 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
1146 mv_table_size * 2 * sizeof(int16_t), fail);
1147 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
1148 mv_table_size * 2 * sizeof(int16_t), fail);
1149 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
1150 mv_table_size * 2 * sizeof(int16_t), fail);
1151 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
1152 mv_table_size * 2 * sizeof(int16_t), fail);
1153 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
1154 mv_table_size * 2 * sizeof(int16_t), fail);
1155 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1156 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1157 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1158 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
1160 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
1162 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1164 /* Allocate MB type table */
1165 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
1166 sizeof(uint16_t), fail); // needed for encoding
1168 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
1171 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1172 mb_array_size * sizeof(float), fail);
1173 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1174 mb_array_size * sizeof(float), fail);
1178 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1179 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1180 /* interlaced direct mode decoding tables */
1181 for (i = 0; i < 2; i++) {
1183 for (j = 0; j < 2; j++) {
1184 for (k = 0; k < 2; k++) {
1185 FF_ALLOCZ_OR_GOTO(s->avctx,
1186 s->b_field_mv_table_base[i][j][k],
1187 mv_table_size * 2 * sizeof(int16_t),
1189 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1192 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
1193 mb_array_size * 2 * sizeof(uint8_t), fail);
1194 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
1195 mv_table_size * 2 * sizeof(int16_t), fail);
1196 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
1199 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
1200 mb_array_size * 2 * sizeof(uint8_t), fail);
1203 if (s->out_format == FMT_H263) {
1205 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
1206 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1208 /* cbp, ac_pred, pred_dir */
1209 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
1210 mb_array_size * sizeof(uint8_t), fail);
1211 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
1212 mb_array_size * sizeof(uint8_t), fail);
1215 if (s->h263_pred || s->h263_plus || !s->encoding) {
1217 // MN: we need these for error resilience of intra-frames
1218 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
1219 yc_size * sizeof(int16_t), fail);
1220 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1221 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1222 s->dc_val[2] = s->dc_val[1] + c_size;
1223 for (i = 0; i < yc_size; i++)
1224 s->dc_val_base[i] = 1024;
1227 /* which mb is a intra block */
1228 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1229 memset(s->mbintra_table, 1, mb_array_size);
1231 /* init macroblock skip table */
1232 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1233 // Note the + 1 is for a quicker mpeg4 slice_end detection
1237 return AVERROR(ENOMEM);
1241 * init common structure for both encoder and decoder.
1242 * this assumes that some variables like width/height are already set
1244 av_cold int ff_mpv_common_init(MpegEncContext *s)
1247 int nb_slices = (HAVE_THREADS &&
1248 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1249 s->avctx->thread_count : 1;
1251 if (s->encoding && s->avctx->slices)
1252 nb_slices = s->avctx->slices;
1254 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1255 s->mb_height = (s->height + 31) / 32 * 2;
1257 s->mb_height = (s->height + 15) / 16;
1259 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1260 av_log(s->avctx, AV_LOG_ERROR,
1261 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1265 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1268 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1270 max_slices = MAX_THREADS;
1271 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1272 " reducing to %d\n", nb_slices, max_slices);
1273 nb_slices = max_slices;
1276 if ((s->width || s->height) &&
1277 av_image_check_size(s->width, s->height, 0, s->avctx))
1282 s->flags = s->avctx->flags;
1283 s->flags2 = s->avctx->flags2;
1285 /* set chroma shifts */
1286 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1288 &s->chroma_y_shift);
1290 /* convert fourcc to upper case */
1291 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1293 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1295 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1296 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1297 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1298 s->picture[i].f = av_frame_alloc();
1299 if (!s->picture[i].f)
1302 memset(&s->next_picture, 0, sizeof(s->next_picture));
1303 memset(&s->last_picture, 0, sizeof(s->last_picture));
1304 memset(&s->current_picture, 0, sizeof(s->current_picture));
1305 memset(&s->new_picture, 0, sizeof(s->new_picture));
1306 s->next_picture.f = av_frame_alloc();
1307 if (!s->next_picture.f)
1309 s->last_picture.f = av_frame_alloc();
1310 if (!s->last_picture.f)
1312 s->current_picture.f = av_frame_alloc();
1313 if (!s->current_picture.f)
1315 s->new_picture.f = av_frame_alloc();
1316 if (!s->new_picture.f)
1319 if (s->width && s->height) {
1320 if (init_context_frame(s))
1323 s->parse_context.state = -1;
1326 s->context_initialized = 1;
1327 s->thread_context[0] = s;
1329 if (s->width && s->height) {
1330 if (nb_slices > 1) {
1331 for (i = 1; i < nb_slices; i++) {
1332 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1333 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1336 for (i = 0; i < nb_slices; i++) {
1337 if (init_duplicate_context(s->thread_context[i]) < 0)
1339 s->thread_context[i]->start_mb_y =
1340 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1341 s->thread_context[i]->end_mb_y =
1342 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1345 if (init_duplicate_context(s) < 0)
1348 s->end_mb_y = s->mb_height;
1350 s->slice_context_count = nb_slices;
1355 ff_mpv_common_end(s);
1360 * Frees and resets MpegEncContext fields depending on the resolution.
1361 * Is used during resolution changes to avoid a full reinitialization of the
1364 static void free_context_frame(MpegEncContext *s)
1368 av_freep(&s->mb_type);
1369 av_freep(&s->p_mv_table_base);
1370 av_freep(&s->b_forw_mv_table_base);
1371 av_freep(&s->b_back_mv_table_base);
1372 av_freep(&s->b_bidir_forw_mv_table_base);
1373 av_freep(&s->b_bidir_back_mv_table_base);
1374 av_freep(&s->b_direct_mv_table_base);
1375 s->p_mv_table = NULL;
1376 s->b_forw_mv_table = NULL;
1377 s->b_back_mv_table = NULL;
1378 s->b_bidir_forw_mv_table = NULL;
1379 s->b_bidir_back_mv_table = NULL;
1380 s->b_direct_mv_table = NULL;
1381 for (i = 0; i < 2; i++) {
1382 for (j = 0; j < 2; j++) {
1383 for (k = 0; k < 2; k++) {
1384 av_freep(&s->b_field_mv_table_base[i][j][k]);
1385 s->b_field_mv_table[i][j][k] = NULL;
1387 av_freep(&s->b_field_select_table[i][j]);
1388 av_freep(&s->p_field_mv_table_base[i][j]);
1389 s->p_field_mv_table[i][j] = NULL;
1391 av_freep(&s->p_field_select_table[i]);
1394 av_freep(&s->dc_val_base);
1395 av_freep(&s->coded_block_base);
1396 av_freep(&s->mbintra_table);
1397 av_freep(&s->cbp_table);
1398 av_freep(&s->pred_dir_table);
1400 av_freep(&s->mbskip_table);
1402 av_freep(&s->er.error_status_table);
1403 av_freep(&s->er.er_temp_buffer);
1404 av_freep(&s->mb_index2xy);
1405 av_freep(&s->lambda_table);
1406 av_freep(&s->cplx_tab);
1407 av_freep(&s->bits_tab);
1409 s->linesize = s->uvlinesize = 0;
1412 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1416 if (s->slice_context_count > 1) {
1417 for (i = 0; i < s->slice_context_count; i++) {
1418 free_duplicate_context(s->thread_context[i]);
1420 for (i = 1; i < s->slice_context_count; i++) {
1421 av_freep(&s->thread_context[i]);
1424 free_duplicate_context(s);
1426 free_context_frame(s);
1429 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1430 s->picture[i].needs_realloc = 1;
1433 s->last_picture_ptr =
1434 s->next_picture_ptr =
1435 s->current_picture_ptr = NULL;
1438 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1439 s->mb_height = (s->height + 31) / 32 * 2;
1441 s->mb_height = (s->height + 15) / 16;
1443 if ((s->width || s->height) &&
1444 (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1447 if ((err = init_context_frame(s)))
1450 s->thread_context[0] = s;
1452 if (s->width && s->height) {
1453 int nb_slices = s->slice_context_count;
1454 if (nb_slices > 1) {
1455 for (i = 1; i < nb_slices; i++) {
1456 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1457 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1460 for (i = 0; i < nb_slices; i++) {
1461 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1463 s->thread_context[i]->start_mb_y =
1464 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1465 s->thread_context[i]->end_mb_y =
1466 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1469 if (init_duplicate_context(s) < 0)
1472 s->end_mb_y = s->mb_height;
1474 s->slice_context_count = nb_slices;
1479 ff_mpv_common_end(s);
1483 /* init common structure for both encoder and decoder */
1484 void ff_mpv_common_end(MpegEncContext *s)
1488 if (s->slice_context_count > 1) {
1489 for (i = 0; i < s->slice_context_count; i++) {
1490 free_duplicate_context(s->thread_context[i]);
1492 for (i = 1; i < s->slice_context_count; i++) {
1493 av_freep(&s->thread_context[i]);
1495 s->slice_context_count = 1;
1496 } else free_duplicate_context(s);
1498 av_freep(&s->parse_context.buffer);
1499 s->parse_context.buffer_size = 0;
1501 av_freep(&s->bitstream_buffer);
1502 s->allocated_bitstream_buffer_size = 0;
1505 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1506 ff_free_picture_tables(&s->picture[i]);
1507 ff_mpeg_unref_picture(s, &s->picture[i]);
1508 av_frame_free(&s->picture[i].f);
1511 av_freep(&s->picture);
1512 ff_free_picture_tables(&s->last_picture);
1513 ff_mpeg_unref_picture(s, &s->last_picture);
1514 av_frame_free(&s->last_picture.f);
1515 ff_free_picture_tables(&s->current_picture);
1516 ff_mpeg_unref_picture(s, &s->current_picture);
1517 av_frame_free(&s->current_picture.f);
1518 ff_free_picture_tables(&s->next_picture);
1519 ff_mpeg_unref_picture(s, &s->next_picture);
1520 av_frame_free(&s->next_picture.f);
1521 ff_free_picture_tables(&s->new_picture);
1522 ff_mpeg_unref_picture(s, &s->new_picture);
1523 av_frame_free(&s->new_picture.f);
1525 free_context_frame(s);
1527 s->context_initialized = 0;
1528 s->last_picture_ptr =
1529 s->next_picture_ptr =
1530 s->current_picture_ptr = NULL;
1531 s->linesize = s->uvlinesize = 0;
1534 av_cold void ff_init_rl(RLTable *rl,
1535 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1537 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1538 uint8_t index_run[MAX_RUN + 1];
1539 int last, run, level, start, end, i;
1541 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1542 if (static_store && rl->max_level[0])
1545 /* compute max_level[], max_run[] and index_run[] */
1546 for (last = 0; last < 2; last++) {
1555 memset(max_level, 0, MAX_RUN + 1);
1556 memset(max_run, 0, MAX_LEVEL + 1);
1557 memset(index_run, rl->n, MAX_RUN + 1);
1558 for (i = start; i < end; i++) {
1559 run = rl->table_run[i];
1560 level = rl->table_level[i];
1561 if (index_run[run] == rl->n)
1563 if (level > max_level[run])
1564 max_level[run] = level;
1565 if (run > max_run[level])
1566 max_run[level] = run;
1569 rl->max_level[last] = static_store[last];
1571 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1572 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1574 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1576 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1577 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1579 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1581 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1582 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1586 av_cold void ff_init_vlc_rl(RLTable *rl)
1590 for (q = 0; q < 32; q++) {
1592 int qadd = (q - 1) | 1;
1598 for (i = 0; i < rl->vlc.table_size; i++) {
1599 int code = rl->vlc.table[i][0];
1600 int len = rl->vlc.table[i][1];
1603 if (len == 0) { // illegal code
1606 } else if (len < 0) { // more bits needed
1610 if (code == rl->n) { // esc
1614 run = rl->table_run[code] + 1;
1615 level = rl->table_level[code] * qmul + qadd;
1616 if (code >= rl->last) run += 192;
1619 rl->rl_vlc[q][i].len = len;
1620 rl->rl_vlc[q][i].level = level;
1621 rl->rl_vlc[q][i].run = run;
1626 static void release_unused_pictures(MpegEncContext *s)
1630 /* release non reference frames */
1631 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1632 if (!s->picture[i].reference)
1633 ff_mpeg_unref_picture(s, &s->picture[i]);
1637 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1639 if (!pic->f->buf[0])
1641 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1646 static int find_unused_picture(MpegEncContext *s, int shared)
1651 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1652 if (!s->picture[i].f->buf[0])
1656 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1657 if (pic_is_unused(s, &s->picture[i]))
1662 return AVERROR_INVALIDDATA;
1665 int ff_find_unused_picture(MpegEncContext *s, int shared)
1667 int ret = find_unused_picture(s, shared);
1669 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1670 if (s->picture[ret].needs_realloc) {
1671 s->picture[ret].needs_realloc = 0;
1672 ff_free_picture_tables(&s->picture[ret]);
1673 ff_mpeg_unref_picture(s, &s->picture[ret]);
1680 * generic function called after decoding
1681 * the header and before a frame is decoded.
1683 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1689 /* mark & release old frames */
1690 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1691 s->last_picture_ptr != s->next_picture_ptr &&
1692 s->last_picture_ptr->f->buf[0]) {
1693 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1696 /* release forgotten pictures */
1697 /* if (mpeg124/h263) */
1698 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1699 if (&s->picture[i] != s->last_picture_ptr &&
1700 &s->picture[i] != s->next_picture_ptr &&
1701 s->picture[i].reference && !s->picture[i].needs_realloc) {
1702 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1703 av_log(avctx, AV_LOG_ERROR,
1704 "releasing zombie picture\n");
1705 ff_mpeg_unref_picture(s, &s->picture[i]);
1709 ff_mpeg_unref_picture(s, &s->current_picture);
1711 release_unused_pictures(s);
1713 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1714 // we already have a unused image
1715 // (maybe it was set before reading the header)
1716 pic = s->current_picture_ptr;
1718 i = ff_find_unused_picture(s, 0);
1720 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1723 pic = &s->picture[i];
1727 if (!s->droppable) {
1728 if (s->pict_type != AV_PICTURE_TYPE_B)
1732 pic->f->coded_picture_number = s->coded_picture_number++;
1734 if (ff_alloc_picture(s, pic, 0) < 0)
1737 s->current_picture_ptr = pic;
1738 // FIXME use only the vars from current_pic
1739 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1740 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1741 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1742 if (s->picture_structure != PICT_FRAME)
1743 s->current_picture_ptr->f->top_field_first =
1744 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1746 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1747 !s->progressive_sequence;
1748 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1750 s->current_picture_ptr->f->pict_type = s->pict_type;
1751 // if (s->flags && CODEC_FLAG_QSCALE)
1752 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1753 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1755 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1756 s->current_picture_ptr)) < 0)
1759 if (s->pict_type != AV_PICTURE_TYPE_B) {
1760 s->last_picture_ptr = s->next_picture_ptr;
1762 s->next_picture_ptr = s->current_picture_ptr;
1764 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1765 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1766 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1767 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1768 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1769 s->pict_type, s->droppable);
1771 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1772 (s->pict_type != AV_PICTURE_TYPE_I ||
1773 s->picture_structure != PICT_FRAME)) {
1774 int h_chroma_shift, v_chroma_shift;
1775 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1776 &h_chroma_shift, &v_chroma_shift);
1777 if (s->pict_type != AV_PICTURE_TYPE_I)
1778 av_log(avctx, AV_LOG_ERROR,
1779 "warning: first frame is no keyframe\n");
1780 else if (s->picture_structure != PICT_FRAME)
1781 av_log(avctx, AV_LOG_INFO,
1782 "allocate dummy last picture for field based first keyframe\n");
1784 /* Allocate a dummy frame */
1785 i = ff_find_unused_picture(s, 0);
1787 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1790 s->last_picture_ptr = &s->picture[i];
1792 s->last_picture_ptr->reference = 3;
1793 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1795 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1796 s->last_picture_ptr = NULL;
1800 memset(s->last_picture_ptr->f->data[0], 0,
1801 avctx->height * s->last_picture_ptr->f->linesize[0]);
1802 memset(s->last_picture_ptr->f->data[1], 0x80,
1803 (avctx->height >> v_chroma_shift) *
1804 s->last_picture_ptr->f->linesize[1]);
1805 memset(s->last_picture_ptr->f->data[2], 0x80,
1806 (avctx->height >> v_chroma_shift) *
1807 s->last_picture_ptr->f->linesize[2]);
1809 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1810 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1812 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1813 s->pict_type == AV_PICTURE_TYPE_B) {
1814 /* Allocate a dummy frame */
1815 i = ff_find_unused_picture(s, 0);
1817 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1820 s->next_picture_ptr = &s->picture[i];
1822 s->next_picture_ptr->reference = 3;
1823 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1825 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1826 s->next_picture_ptr = NULL;
1829 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1830 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1833 if (s->last_picture_ptr) {
1834 ff_mpeg_unref_picture(s, &s->last_picture);
1835 if (s->last_picture_ptr->f->buf[0] &&
1836 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1837 s->last_picture_ptr)) < 0)
1840 if (s->next_picture_ptr) {
1841 ff_mpeg_unref_picture(s, &s->next_picture);
1842 if (s->next_picture_ptr->f->buf[0] &&
1843 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1844 s->next_picture_ptr)) < 0)
1848 if (s->pict_type != AV_PICTURE_TYPE_I &&
1849 !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1850 av_log(s, AV_LOG_ERROR,
1851 "Non-reference picture received and no reference available\n");
1852 return AVERROR_INVALIDDATA;
1855 if (s->picture_structure!= PICT_FRAME) {
1857 for (i = 0; i < 4; i++) {
1858 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1859 s->current_picture.f->data[i] +=
1860 s->current_picture.f->linesize[i];
1862 s->current_picture.f->linesize[i] *= 2;
1863 s->last_picture.f->linesize[i] *= 2;
1864 s->next_picture.f->linesize[i] *= 2;
1868 s->err_recognition = avctx->err_recognition;
1870 /* set dequantizer, we can't do it during init as
1871 * it might change for mpeg4 and we can't do it in the header
1872 * decode as init is not called for mpeg4 there yet */
1873 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1874 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1875 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1876 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1877 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1878 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1880 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1881 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1885 FF_DISABLE_DEPRECATION_WARNINGS
1886 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1887 return ff_xvmc_field_start(s, avctx);
1888 FF_ENABLE_DEPRECATION_WARNINGS
1889 #endif /* FF_API_XVMC */
1894 /* called after a frame has been decoded. */
1895 void ff_mpv_frame_end(MpegEncContext *s)
1898 FF_DISABLE_DEPRECATION_WARNINGS
1899 /* redraw edges for the frame if decoding didn't complete */
1900 // just to make sure that all data is rendered.
1901 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1902 ff_xvmc_field_end(s);
1904 FF_ENABLE_DEPRECATION_WARNINGS
1905 #endif /* FF_API_XVMC */
1909 if (s->current_picture.reference)
1910 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1914 * Print debugging info for the given picture.
1916 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1919 if (s->avctx->hwaccel || !p || !p->mb_type)
1923 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1926 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1927 switch (pict->pict_type) {
1928 case AV_PICTURE_TYPE_I:
1929 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1931 case AV_PICTURE_TYPE_P:
1932 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1934 case AV_PICTURE_TYPE_B:
1935 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1937 case AV_PICTURE_TYPE_S:
1938 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1940 case AV_PICTURE_TYPE_SI:
1941 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1943 case AV_PICTURE_TYPE_SP:
1944 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1947 for (y = 0; y < s->mb_height; y++) {
1948 for (x = 0; x < s->mb_width; x++) {
1949 if (s->avctx->debug & FF_DEBUG_SKIP) {
1950 int count = s->mbskip_table[x + y * s->mb_stride];
1953 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1955 if (s->avctx->debug & FF_DEBUG_QP) {
1956 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1957 p->qscale_table[x + y * s->mb_stride]);
1959 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1960 int mb_type = p->mb_type[x + y * s->mb_stride];
1961 // Type & MV direction
1962 if (IS_PCM(mb_type))
1963 av_log(s->avctx, AV_LOG_DEBUG, "P");
1964 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1965 av_log(s->avctx, AV_LOG_DEBUG, "A");
1966 else if (IS_INTRA4x4(mb_type))
1967 av_log(s->avctx, AV_LOG_DEBUG, "i");
1968 else if (IS_INTRA16x16(mb_type))
1969 av_log(s->avctx, AV_LOG_DEBUG, "I");
1970 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1971 av_log(s->avctx, AV_LOG_DEBUG, "d");
1972 else if (IS_DIRECT(mb_type))
1973 av_log(s->avctx, AV_LOG_DEBUG, "D");
1974 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1975 av_log(s->avctx, AV_LOG_DEBUG, "g");
1976 else if (IS_GMC(mb_type))
1977 av_log(s->avctx, AV_LOG_DEBUG, "G");
1978 else if (IS_SKIP(mb_type))
1979 av_log(s->avctx, AV_LOG_DEBUG, "S");
1980 else if (!USES_LIST(mb_type, 1))
1981 av_log(s->avctx, AV_LOG_DEBUG, ">");
1982 else if (!USES_LIST(mb_type, 0))
1983 av_log(s->avctx, AV_LOG_DEBUG, "<");
1985 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1986 av_log(s->avctx, AV_LOG_DEBUG, "X");
1990 if (IS_8X8(mb_type))
1991 av_log(s->avctx, AV_LOG_DEBUG, "+");
1992 else if (IS_16X8(mb_type))
1993 av_log(s->avctx, AV_LOG_DEBUG, "-");
1994 else if (IS_8X16(mb_type))
1995 av_log(s->avctx, AV_LOG_DEBUG, "|");
1996 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1997 av_log(s->avctx, AV_LOG_DEBUG, " ");
1999 av_log(s->avctx, AV_LOG_DEBUG, "?");
2002 if (IS_INTERLACED(mb_type))
2003 av_log(s->avctx, AV_LOG_DEBUG, "=");
2005 av_log(s->avctx, AV_LOG_DEBUG, " ");
2008 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2014 * find the lowest MB row referenced in the MVs
2016 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2018 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2019 int my, off, i, mvs;
2021 if (s->picture_structure != PICT_FRAME || s->mcsel)
2024 switch (s->mv_type) {
2038 for (i = 0; i < mvs; i++) {
2039 my = s->mv[dir][i][1]<<qpel_shift;
2040 my_max = FFMAX(my_max, my);
2041 my_min = FFMIN(my_min, my);
2044 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2046 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2048 return s->mb_height-1;
2051 /* put block[] to dest[] */
2052 static inline void put_dct(MpegEncContext *s,
2053 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2055 s->dct_unquantize_intra(s, block, i, qscale);
2056 s->idsp.idct_put(dest, line_size, block);
2059 /* add block[] to dest[] */
2060 static inline void add_dct(MpegEncContext *s,
2061 int16_t *block, int i, uint8_t *dest, int line_size)
2063 if (s->block_last_index[i] >= 0) {
2064 s->idsp.idct_add(dest, line_size, block);
2068 static inline void add_dequant_dct(MpegEncContext *s,
2069 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2071 if (s->block_last_index[i] >= 0) {
2072 s->dct_unquantize_inter(s, block, i, qscale);
2074 s->idsp.idct_add(dest, line_size, block);
2079 * Clean dc, ac, coded_block for the current non-intra MB.
2081 void ff_clean_intra_table_entries(MpegEncContext *s)
2083 int wrap = s->b8_stride;
2084 int xy = s->block_index[0];
2087 s->dc_val[0][xy + 1 ] =
2088 s->dc_val[0][xy + wrap] =
2089 s->dc_val[0][xy + 1 + wrap] = 1024;
2091 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2092 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2093 if (s->msmpeg4_version>=3) {
2094 s->coded_block[xy ] =
2095 s->coded_block[xy + 1 ] =
2096 s->coded_block[xy + wrap] =
2097 s->coded_block[xy + 1 + wrap] = 0;
2100 wrap = s->mb_stride;
2101 xy = s->mb_x + s->mb_y * wrap;
2103 s->dc_val[2][xy] = 1024;
2105 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2106 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2108 s->mbintra_table[xy]= 0;
2111 /* generic function called after a macroblock has been parsed by the
2112 decoder or after it has been encoded by the encoder.
2114 Important variables used:
2115 s->mb_intra : true if intra macroblock
2116 s->mv_dir : motion vector direction
2117 s->mv_type : motion vector type
2118 s->mv : motion vector
2119 s->interlaced_dct : true if interlaced dct used (mpeg2)
2121 static av_always_inline
2122 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2125 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2128 FF_DISABLE_DEPRECATION_WARNINGS
2129 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2130 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2133 FF_ENABLE_DEPRECATION_WARNINGS
2134 #endif /* FF_API_XVMC */
2136 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2137 /* print DCT coefficients */
2139 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2141 for(j=0; j<64; j++){
2142 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2143 block[i][s->idsp.idct_permutation[j]]);
2145 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2149 s->current_picture.qscale_table[mb_xy] = s->qscale;
2151 /* update DC predictors for P macroblocks */
2153 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2154 if(s->mbintra_table[mb_xy])
2155 ff_clean_intra_table_entries(s);
2159 s->last_dc[2] = 128 << s->intra_dc_precision;
2162 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2163 s->mbintra_table[mb_xy]=1;
2165 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2166 uint8_t *dest_y, *dest_cb, *dest_cr;
2167 int dct_linesize, dct_offset;
2168 op_pixels_func (*op_pix)[4];
2169 qpel_mc_func (*op_qpix)[16];
2170 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2171 const int uvlinesize = s->current_picture.f->linesize[1];
2172 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2173 const int block_size = 8;
2175 /* avoid copy if macroblock skipped in last frame too */
2176 /* skip only during decoding as we might trash the buffers during encoding a bit */
2178 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2180 if (s->mb_skipped) {
2182 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2184 } else if(!s->current_picture.reference) {
2187 *mbskip_ptr = 0; /* not skipped */
2191 dct_linesize = linesize << s->interlaced_dct;
2192 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2196 dest_cb= s->dest[1];
2197 dest_cr= s->dest[2];
2199 dest_y = s->b_scratchpad;
2200 dest_cb= s->b_scratchpad+16*linesize;
2201 dest_cr= s->b_scratchpad+32*linesize;
2205 /* motion handling */
2206 /* decoding or more than one mb_type (MC was already done otherwise) */
2209 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2210 if (s->mv_dir & MV_DIR_FORWARD) {
2211 ff_thread_await_progress(&s->last_picture_ptr->tf,
2212 ff_mpv_lowest_referenced_row(s, 0),
2215 if (s->mv_dir & MV_DIR_BACKWARD) {
2216 ff_thread_await_progress(&s->next_picture_ptr->tf,
2217 ff_mpv_lowest_referenced_row(s, 1),
2222 op_qpix= s->me.qpel_put;
2223 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2224 op_pix = s->hdsp.put_pixels_tab;
2226 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2228 if (s->mv_dir & MV_DIR_FORWARD) {
2229 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2230 op_pix = s->hdsp.avg_pixels_tab;
2231 op_qpix= s->me.qpel_avg;
2233 if (s->mv_dir & MV_DIR_BACKWARD) {
2234 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2238 /* skip dequant / idct if we are really late ;) */
2239 if(s->avctx->skip_idct){
2240 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2241 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2242 || s->avctx->skip_idct >= AVDISCARD_ALL)
2246 /* add dct residue */
2247 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2248 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2249 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2250 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2251 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2252 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2254 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2255 if (s->chroma_y_shift){
2256 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2257 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2261 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2262 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2263 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2264 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2267 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2268 add_dct(s, block[0], 0, dest_y , dct_linesize);
2269 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2270 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2271 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2273 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2274 if(s->chroma_y_shift){//Chroma420
2275 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2276 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2279 dct_linesize = uvlinesize << s->interlaced_dct;
2280 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2282 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2283 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2284 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2285 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2286 if(!s->chroma_x_shift){//Chroma444
2287 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2288 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2289 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2290 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2295 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2296 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2299 /* dct only in intra block */
2300 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2301 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2302 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2303 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2304 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2306 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2307 if(s->chroma_y_shift){
2308 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2309 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2313 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2314 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2315 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2316 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2320 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2321 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2322 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2323 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2325 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2326 if(s->chroma_y_shift){
2327 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2328 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2331 dct_linesize = uvlinesize << s->interlaced_dct;
2332 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2334 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2335 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2336 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2337 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2338 if(!s->chroma_x_shift){//Chroma444
2339 s->idsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2340 s->idsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2341 s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2342 s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2350 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2351 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2352 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2357 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
2360 if(s->out_format == FMT_MPEG1) {
2361 mpv_decode_mb_internal(s, block, 1);
2364 mpv_decode_mb_internal(s, block, 0);
2367 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2369 ff_draw_horiz_band(s->avctx, s->current_picture.f,
2370 s->last_picture.f, y, h, s->picture_structure,
2371 s->first_field, s->low_delay);
2374 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2375 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2376 const int uvlinesize = s->current_picture.f->linesize[1];
2377 const int mb_size= 4;
2379 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2380 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2381 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2382 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2383 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2384 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2385 //block_index is not used by mpeg2, so it is not affected by chroma_format
2387 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
2388 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2389 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2391 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2393 if(s->picture_structure==PICT_FRAME){
2394 s->dest[0] += s->mb_y * linesize << mb_size;
2395 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2396 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2398 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2399 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2400 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2401 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2407 * Permute an 8x8 block.
2408 * @param block the block which will be permuted according to the given permutation vector
2409 * @param permutation the permutation vector
2410 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2411 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2412 * (inverse) permutated to scantable order!
2414 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2420 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2422 for(i=0; i<=last; i++){
2423 const int j= scantable[i];
2428 for(i=0; i<=last; i++){
2429 const int j= scantable[i];
2430 const int perm_j= permutation[j];
2431 block[perm_j]= temp[j];
2435 void ff_mpeg_flush(AVCodecContext *avctx){
2437 MpegEncContext *s = avctx->priv_data;
2439 if (!s || !s->picture)
2442 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2443 ff_mpeg_unref_picture(s, &s->picture[i]);
2444 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2446 ff_mpeg_unref_picture(s, &s->current_picture);
2447 ff_mpeg_unref_picture(s, &s->last_picture);
2448 ff_mpeg_unref_picture(s, &s->next_picture);
2450 s->mb_x= s->mb_y= 0;
2452 s->parse_context.state= -1;
2453 s->parse_context.frame_start_found= 0;
2454 s->parse_context.overread= 0;
2455 s->parse_context.overread_index= 0;
2456 s->parse_context.index= 0;
2457 s->parse_context.last_index= 0;
2458 s->bitstream_buffer_size=0;
2463 * set qscale and update qscale dependent variables.
2465 void ff_set_qscale(MpegEncContext * s, int qscale)
2469 else if (qscale > 31)
2473 s->chroma_qscale= s->chroma_qscale_table[qscale];
2475 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2476 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2479 void ff_mpv_report_decode_progress(MpegEncContext *s)
2481 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2482 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);