2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
45 #include "xvmc_internal.h"
49 static const uint8_t ff_default_chroma_qscale_table[32] = {
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
55 const uint8_t ff_mpeg1_dc_scale_table[128] = {
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const uint8_t ff_alternate_horizontal_scan[64] = {
111 0, 1, 2, 3, 8, 9, 16, 17,
112 10, 11, 4, 5, 6, 7, 15, 14,
113 13, 12, 19, 18, 24, 25, 32, 33,
114 26, 27, 20, 21, 22, 23, 28, 29,
115 30, 31, 34, 35, 40, 41, 48, 49,
116 42, 43, 36, 37, 38, 39, 44, 45,
117 46, 47, 50, 51, 56, 57, 58, 59,
118 52, 53, 54, 55, 60, 61, 62, 63,
121 const uint8_t ff_alternate_vertical_scan[64] = {
122 0, 8, 16, 24, 1, 9, 2, 10,
123 17, 25, 32, 40, 48, 56, 57, 49,
124 41, 33, 26, 18, 3, 11, 4, 12,
125 19, 27, 34, 42, 50, 58, 35, 43,
126 51, 59, 20, 28, 5, 13, 6, 14,
127 21, 29, 36, 44, 52, 60, 37, 45,
128 53, 61, 22, 30, 7, 15, 23, 31,
129 38, 46, 54, 62, 39, 47, 55, 63,
132 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133 int16_t *block, int n, int qscale)
135 int i, level, nCoeffs;
136 const uint16_t *quant_matrix;
138 nCoeffs= s->block_last_index[n];
141 block[0] = block[0] * s->y_dc_scale;
143 block[0] = block[0] * s->c_dc_scale;
144 /* XXX: only mpeg1 */
145 quant_matrix = s->intra_matrix;
146 for(i=1;i<=nCoeffs;i++) {
147 int j= s->intra_scantable.permutated[i];
152 level = (int)(level * qscale * quant_matrix[j]) >> 3;
153 level = (level - 1) | 1;
156 level = (int)(level * qscale * quant_matrix[j]) >> 3;
157 level = (level - 1) | 1;
164 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
165 int16_t *block, int n, int qscale)
167 int i, level, nCoeffs;
168 const uint16_t *quant_matrix;
170 nCoeffs= s->block_last_index[n];
172 quant_matrix = s->inter_matrix;
173 for(i=0; i<=nCoeffs; i++) {
174 int j= s->intra_scantable.permutated[i];
179 level = (((level << 1) + 1) * qscale *
180 ((int) (quant_matrix[j]))) >> 4;
181 level = (level - 1) | 1;
184 level = (((level << 1) + 1) * qscale *
185 ((int) (quant_matrix[j]))) >> 4;
186 level = (level - 1) | 1;
193 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
194 int16_t *block, int n, int qscale)
196 int i, level, nCoeffs;
197 const uint16_t *quant_matrix;
199 if(s->alternate_scan) nCoeffs= 63;
200 else nCoeffs= s->block_last_index[n];
203 block[0] = block[0] * s->y_dc_scale;
205 block[0] = block[0] * s->c_dc_scale;
206 quant_matrix = s->intra_matrix;
207 for(i=1;i<=nCoeffs;i++) {
208 int j= s->intra_scantable.permutated[i];
213 level = (int)(level * qscale * quant_matrix[j]) >> 3;
216 level = (int)(level * qscale * quant_matrix[j]) >> 3;
223 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
224 int16_t *block, int n, int qscale)
226 int i, level, nCoeffs;
227 const uint16_t *quant_matrix;
230 if(s->alternate_scan) nCoeffs= 63;
231 else nCoeffs= s->block_last_index[n];
234 block[0] = block[0] * s->y_dc_scale;
236 block[0] = block[0] * s->c_dc_scale;
237 quant_matrix = s->intra_matrix;
238 for(i=1;i<=nCoeffs;i++) {
239 int j= s->intra_scantable.permutated[i];
244 level = (int)(level * qscale * quant_matrix[j]) >> 3;
247 level = (int)(level * qscale * quant_matrix[j]) >> 3;
256 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
257 int16_t *block, int n, int qscale)
259 int i, level, nCoeffs;
260 const uint16_t *quant_matrix;
263 if(s->alternate_scan) nCoeffs= 63;
264 else nCoeffs= s->block_last_index[n];
266 quant_matrix = s->inter_matrix;
267 for(i=0; i<=nCoeffs; i++) {
268 int j= s->intra_scantable.permutated[i];
273 level = (((level << 1) + 1) * qscale *
274 ((int) (quant_matrix[j]))) >> 4;
277 level = (((level << 1) + 1) * qscale *
278 ((int) (quant_matrix[j]))) >> 4;
287 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
288 int16_t *block, int n, int qscale)
290 int i, level, qmul, qadd;
293 assert(s->block_last_index[n]>=0);
299 block[0] = block[0] * s->y_dc_scale;
301 block[0] = block[0] * s->c_dc_scale;
302 qadd = (qscale - 1) | 1;
309 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
311 for(i=1; i<=nCoeffs; i++) {
315 level = level * qmul - qadd;
317 level = level * qmul + qadd;
324 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
325 int16_t *block, int n, int qscale)
327 int i, level, qmul, qadd;
330 assert(s->block_last_index[n]>=0);
332 qadd = (qscale - 1) | 1;
335 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
337 for(i=0; i<=nCoeffs; i++) {
341 level = level * qmul - qadd;
343 level = level * qmul + qadd;
350 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
352 int mb_x, int mb_y, int mb_intra, int mb_skipped)
354 MpegEncContext *s = opaque;
357 s->mv_type = mv_type;
358 s->mb_intra = mb_intra;
359 s->mb_skipped = mb_skipped;
362 memcpy(s->mv, mv, sizeof(*mv));
364 ff_init_block_index(s);
365 ff_update_block_index(s);
367 s->bdsp.clear_blocks(s->block[0]);
369 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
370 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
371 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
374 av_log(s->avctx, AV_LOG_DEBUG,
375 "Interlaced error concealment is not fully implemented\n");
376 ff_mpv_decode_mb(s, s->block);
379 /* init common dct for both encoder and decoder */
380 static av_cold int dct_init(MpegEncContext *s)
382 ff_blockdsp_init(&s->bdsp, s->avctx);
383 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
384 ff_mpegvideodsp_init(&s->mdsp);
385 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
387 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
388 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
389 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
390 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
391 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
392 if (s->flags & CODEC_FLAG_BITEXACT)
393 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
394 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
396 if (HAVE_INTRINSICS_NEON)
397 ff_mpv_common_init_neon(s);
400 ff_mpv_common_init_arm(s);
402 ff_mpv_common_init_ppc(s);
404 ff_mpv_common_init_x86(s);
409 av_cold void ff_mpv_idct_init(MpegEncContext *s)
411 ff_idctdsp_init(&s->idsp, s->avctx);
413 /* load & permutate scantables
414 * note: only wmv uses different ones
416 if (s->alternate_scan) {
417 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
418 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
420 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
421 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
423 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
424 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
427 static int frame_size_alloc(MpegEncContext *s, int linesize)
429 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
431 // edge emu needs blocksize + filter length - 1
432 // (= 17x17 for halfpel / 21x21 for h264)
433 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
434 // at uvlinesize. It supports only YUV420 so 24x24 is enough
435 // linesize * interlaced * MBsize
436 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
439 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
441 s->me.temp = s->me.scratchpad;
442 s->rd_scratchpad = s->me.scratchpad;
443 s->b_scratchpad = s->me.scratchpad;
444 s->obmc_scratchpad = s->me.scratchpad + 16;
448 av_freep(&s->edge_emu_buffer);
449 return AVERROR(ENOMEM);
453 * Allocate a frame buffer
455 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
457 int edges_needed = av_codec_is_encoder(s->avctx->codec);
461 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
462 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
463 s->codec_id != AV_CODEC_ID_MSS2) {
465 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
466 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
469 r = ff_thread_get_buffer(s->avctx, &pic->tf,
470 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
472 pic->f->width = s->avctx->width;
473 pic->f->height = s->avctx->height;
474 pic->f->format = s->avctx->pix_fmt;
475 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
478 if (r < 0 || !pic->f->buf[0]) {
479 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
486 for (i = 0; pic->f->data[i]; i++) {
487 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
488 pic->f->linesize[i] +
489 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
490 pic->f->data[i] += offset;
492 pic->f->width = s->avctx->width;
493 pic->f->height = s->avctx->height;
496 if (s->avctx->hwaccel) {
497 assert(!pic->hwaccel_picture_private);
498 if (s->avctx->hwaccel->frame_priv_data_size) {
499 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
500 if (!pic->hwaccel_priv_buf) {
501 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
504 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
508 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
509 s->uvlinesize != pic->f->linesize[1])) {
510 av_log(s->avctx, AV_LOG_ERROR,
511 "get_buffer() failed (stride changed)\n");
512 ff_mpeg_unref_picture(s, pic);
516 if (pic->f->linesize[1] != pic->f->linesize[2]) {
517 av_log(s->avctx, AV_LOG_ERROR,
518 "get_buffer() failed (uv stride mismatch)\n");
519 ff_mpeg_unref_picture(s, pic);
523 if (!s->edge_emu_buffer &&
524 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
525 av_log(s->avctx, AV_LOG_ERROR,
526 "get_buffer() failed to allocate context scratch buffers.\n");
527 ff_mpeg_unref_picture(s, pic);
534 void ff_free_picture_tables(Picture *pic)
538 av_buffer_unref(&pic->mb_var_buf);
539 av_buffer_unref(&pic->mc_mb_var_buf);
540 av_buffer_unref(&pic->mb_mean_buf);
541 av_buffer_unref(&pic->mbskip_table_buf);
542 av_buffer_unref(&pic->qscale_table_buf);
543 av_buffer_unref(&pic->mb_type_buf);
545 for (i = 0; i < 2; i++) {
546 av_buffer_unref(&pic->motion_val_buf[i]);
547 av_buffer_unref(&pic->ref_index_buf[i]);
551 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
553 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
554 const int mb_array_size = s->mb_stride * s->mb_height;
555 const int b8_array_size = s->b8_stride * s->mb_height * 2;
559 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
560 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
561 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
563 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
564 return AVERROR(ENOMEM);
567 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
568 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
569 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
570 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
571 return AVERROR(ENOMEM);
574 if (s->out_format == FMT_H263 || s->encoding) {
575 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
576 int ref_index_size = 4 * mb_array_size;
578 for (i = 0; mv_size && i < 2; i++) {
579 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
580 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
581 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
582 return AVERROR(ENOMEM);
589 static int make_tables_writable(Picture *pic)
592 #define MAKE_WRITABLE(table) \
595 (ret = av_buffer_make_writable(&pic->table)) < 0)\
599 MAKE_WRITABLE(mb_var_buf);
600 MAKE_WRITABLE(mc_mb_var_buf);
601 MAKE_WRITABLE(mb_mean_buf);
602 MAKE_WRITABLE(mbskip_table_buf);
603 MAKE_WRITABLE(qscale_table_buf);
604 MAKE_WRITABLE(mb_type_buf);
606 for (i = 0; i < 2; i++) {
607 MAKE_WRITABLE(motion_val_buf[i]);
608 MAKE_WRITABLE(ref_index_buf[i]);
615 * Allocate a Picture.
616 * The pixels are allocated/set by calling get_buffer() if shared = 0
618 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
623 assert(pic->f->data[0]);
626 assert(!pic->f->buf[0]);
628 if (alloc_frame_buffer(s, pic) < 0)
631 s->linesize = pic->f->linesize[0];
632 s->uvlinesize = pic->f->linesize[1];
635 if (!pic->qscale_table_buf)
636 ret = alloc_picture_tables(s, pic);
638 ret = make_tables_writable(pic);
643 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
644 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
645 pic->mb_mean = pic->mb_mean_buf->data;
648 pic->mbskip_table = pic->mbskip_table_buf->data;
649 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
650 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
652 if (pic->motion_val_buf[0]) {
653 for (i = 0; i < 2; i++) {
654 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
655 pic->ref_index[i] = pic->ref_index_buf[i]->data;
661 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
662 ff_mpeg_unref_picture(s, pic);
663 ff_free_picture_tables(pic);
664 return AVERROR(ENOMEM);
668 * Deallocate a picture.
670 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
672 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
675 /* WM Image / Screen codecs allocate internal buffers with different
676 * dimensions / colorspaces; ignore user-defined callbacks for these. */
677 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
678 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
679 s->codec_id != AV_CODEC_ID_MSS2)
680 ff_thread_release_buffer(s->avctx, &pic->tf);
682 av_frame_unref(pic->f);
684 av_buffer_unref(&pic->hwaccel_priv_buf);
686 if (pic->needs_realloc)
687 ff_free_picture_tables(pic);
689 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
692 static int update_picture_tables(Picture *dst, Picture *src)
696 #define UPDATE_TABLE(table)\
699 (!dst->table || dst->table->buffer != src->table->buffer)) {\
700 av_buffer_unref(&dst->table);\
701 dst->table = av_buffer_ref(src->table);\
703 ff_free_picture_tables(dst);\
704 return AVERROR(ENOMEM);\
709 UPDATE_TABLE(mb_var_buf);
710 UPDATE_TABLE(mc_mb_var_buf);
711 UPDATE_TABLE(mb_mean_buf);
712 UPDATE_TABLE(mbskip_table_buf);
713 UPDATE_TABLE(qscale_table_buf);
714 UPDATE_TABLE(mb_type_buf);
715 for (i = 0; i < 2; i++) {
716 UPDATE_TABLE(motion_val_buf[i]);
717 UPDATE_TABLE(ref_index_buf[i]);
720 dst->mb_var = src->mb_var;
721 dst->mc_mb_var = src->mc_mb_var;
722 dst->mb_mean = src->mb_mean;
723 dst->mbskip_table = src->mbskip_table;
724 dst->qscale_table = src->qscale_table;
725 dst->mb_type = src->mb_type;
726 for (i = 0; i < 2; i++) {
727 dst->motion_val[i] = src->motion_val[i];
728 dst->ref_index[i] = src->ref_index[i];
734 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
738 av_assert0(!dst->f->buf[0]);
739 av_assert0(src->f->buf[0]);
743 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
747 ret = update_picture_tables(dst, src);
751 if (src->hwaccel_picture_private) {
752 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
753 if (!dst->hwaccel_priv_buf)
755 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
758 dst->field_picture = src->field_picture;
759 dst->mb_var_sum = src->mb_var_sum;
760 dst->mc_mb_var_sum = src->mc_mb_var_sum;
761 dst->b_frame_score = src->b_frame_score;
762 dst->needs_realloc = src->needs_realloc;
763 dst->reference = src->reference;
764 dst->shared = src->shared;
768 ff_mpeg_unref_picture(s, dst);
772 static void exchange_uv(MpegEncContext *s)
777 s->pblocks[4] = s->pblocks[5];
781 static int init_duplicate_context(MpegEncContext *s)
783 int y_size = s->b8_stride * (2 * s->mb_height + 1);
784 int c_size = s->mb_stride * (s->mb_height + 1);
785 int yc_size = y_size + 2 * c_size;
793 s->obmc_scratchpad = NULL;
796 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
797 ME_MAP_SIZE * sizeof(uint32_t), fail)
798 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
799 ME_MAP_SIZE * sizeof(uint32_t), fail)
800 if (s->avctx->noise_reduction) {
801 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
802 2 * 64 * sizeof(int), fail)
805 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
806 s->block = s->blocks[0];
808 for (i = 0; i < 12; i++) {
809 s->pblocks[i] = &s->block[i];
811 if (s->avctx->codec_tag == AV_RL32("VCR2"))
814 if (s->out_format == FMT_H263) {
816 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
817 yc_size * sizeof(int16_t) * 16, fail);
818 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
819 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
820 s->ac_val[2] = s->ac_val[1] + c_size;
825 return -1; // free() through ff_mpv_common_end()
828 static void free_duplicate_context(MpegEncContext *s)
833 av_freep(&s->edge_emu_buffer);
834 av_freep(&s->me.scratchpad);
838 s->obmc_scratchpad = NULL;
840 av_freep(&s->dct_error_sum);
841 av_freep(&s->me.map);
842 av_freep(&s->me.score_map);
843 av_freep(&s->blocks);
844 av_freep(&s->ac_val_base);
848 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
850 #define COPY(a) bak->a = src->a
851 COPY(edge_emu_buffer);
856 COPY(obmc_scratchpad);
863 COPY(me.map_generation);
875 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
879 // FIXME copy only needed parts
881 backup_duplicate_context(&bak, dst);
882 memcpy(dst, src, sizeof(MpegEncContext));
883 backup_duplicate_context(dst, &bak);
884 for (i = 0; i < 12; i++) {
885 dst->pblocks[i] = &dst->block[i];
887 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
889 if (!dst->edge_emu_buffer &&
890 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
891 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
892 "scratch buffers.\n");
895 // STOP_TIMER("update_duplicate_context")
896 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
900 int ff_mpeg_update_thread_context(AVCodecContext *dst,
901 const AVCodecContext *src)
904 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
906 if (dst == src || !s1->context_initialized)
909 // FIXME can parameters change on I-frames?
910 // in that case dst may need a reinit
911 if (!s->context_initialized) {
913 memcpy(s, s1, sizeof(MpegEncContext));
916 s->bitstream_buffer = NULL;
917 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
920 if ((err = ff_mpv_common_init(s)) < 0)
924 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
926 s->context_reinit = 0;
927 s->height = s1->height;
928 s->width = s1->width;
929 if ((err = ff_mpv_common_frame_size_change(s)) < 0)
933 s->avctx->coded_height = s1->avctx->coded_height;
934 s->avctx->coded_width = s1->avctx->coded_width;
935 s->avctx->width = s1->avctx->width;
936 s->avctx->height = s1->avctx->height;
938 s->coded_picture_number = s1->coded_picture_number;
939 s->picture_number = s1->picture_number;
941 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
942 ff_mpeg_unref_picture(s, &s->picture[i]);
943 if (s1->picture[i].f->buf[0] &&
944 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
948 #define UPDATE_PICTURE(pic)\
950 ff_mpeg_unref_picture(s, &s->pic);\
951 if (s1->pic.f->buf[0])\
952 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
954 ret = update_picture_tables(&s->pic, &s1->pic);\
959 UPDATE_PICTURE(current_picture);
960 UPDATE_PICTURE(last_picture);
961 UPDATE_PICTURE(next_picture);
963 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
964 ((pic && pic >= old_ctx->picture && \
965 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
966 &new_ctx->picture[pic - old_ctx->picture] : NULL)
968 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
969 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
970 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
972 // Error/bug resilience
973 s->next_p_frame_damaged = s1->next_p_frame_damaged;
974 s->workaround_bugs = s1->workaround_bugs;
977 memcpy(&s->last_time_base, &s1->last_time_base,
978 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
979 (char *) &s1->last_time_base);
982 s->max_b_frames = s1->max_b_frames;
983 s->low_delay = s1->low_delay;
984 s->droppable = s1->droppable;
986 // DivX handling (doesn't work)
987 s->divx_packed = s1->divx_packed;
989 if (s1->bitstream_buffer) {
990 if (s1->bitstream_buffer_size +
991 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
992 av_fast_malloc(&s->bitstream_buffer,
993 &s->allocated_bitstream_buffer_size,
994 s1->allocated_bitstream_buffer_size);
995 s->bitstream_buffer_size = s1->bitstream_buffer_size;
996 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
997 s1->bitstream_buffer_size);
998 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
999 FF_INPUT_BUFFER_PADDING_SIZE);
1002 // linesize dependend scratch buffer allocation
1003 if (!s->edge_emu_buffer)
1005 if (frame_size_alloc(s, s1->linesize) < 0) {
1006 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1007 "scratch buffers.\n");
1008 return AVERROR(ENOMEM);
1011 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1012 "be allocated due to unknown size.\n");
1016 // MPEG2/interlacing info
1017 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1018 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1020 if (!s1->first_field) {
1021 s->last_pict_type = s1->pict_type;
1022 if (s1->current_picture_ptr)
1023 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1030 * Set the given MpegEncContext to common defaults
1031 * (same for encoding and decoding).
1032 * The changed fields will not depend upon the
1033 * prior state of the MpegEncContext.
1035 void ff_mpv_common_defaults(MpegEncContext *s)
1037 s->y_dc_scale_table =
1038 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1039 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1040 s->progressive_frame = 1;
1041 s->progressive_sequence = 1;
1042 s->picture_structure = PICT_FRAME;
1044 s->coded_picture_number = 0;
1045 s->picture_number = 0;
1050 s->slice_context_count = 1;
1054 * Set the given MpegEncContext to defaults for decoding.
1055 * the changed fields will not depend upon
1056 * the prior state of the MpegEncContext.
1058 void ff_mpv_decode_defaults(MpegEncContext *s)
1060 ff_mpv_common_defaults(s);
1063 static int init_er(MpegEncContext *s)
1065 ERContext *er = &s->er;
1066 int mb_array_size = s->mb_height * s->mb_stride;
1069 er->avctx = s->avctx;
1071 er->mb_index2xy = s->mb_index2xy;
1072 er->mb_num = s->mb_num;
1073 er->mb_width = s->mb_width;
1074 er->mb_height = s->mb_height;
1075 er->mb_stride = s->mb_stride;
1076 er->b8_stride = s->b8_stride;
1078 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1079 er->error_status_table = av_mallocz(mb_array_size);
1080 if (!er->er_temp_buffer || !er->error_status_table)
1083 er->mbskip_table = s->mbskip_table;
1084 er->mbintra_table = s->mbintra_table;
1086 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1087 er->dc_val[i] = s->dc_val[i];
1089 er->decode_mb = mpeg_er_decode_mb;
1094 av_freep(&er->er_temp_buffer);
1095 av_freep(&er->error_status_table);
1096 return AVERROR(ENOMEM);
1100 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1102 static int init_context_frame(MpegEncContext *s)
1104 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1106 s->mb_width = (s->width + 15) / 16;
1107 s->mb_stride = s->mb_width + 1;
1108 s->b8_stride = s->mb_width * 2 + 1;
1109 mb_array_size = s->mb_height * s->mb_stride;
1110 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1112 /* set default edge pos, will be overriden
1113 * in decode_header if needed */
1114 s->h_edge_pos = s->mb_width * 16;
1115 s->v_edge_pos = s->mb_height * 16;
1117 s->mb_num = s->mb_width * s->mb_height;
1122 s->block_wrap[3] = s->b8_stride;
1124 s->block_wrap[5] = s->mb_stride;
1126 y_size = s->b8_stride * (2 * s->mb_height + 1);
1127 c_size = s->mb_stride * (s->mb_height + 1);
1128 yc_size = y_size + 2 * c_size;
1130 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
1131 fail); // error ressilience code looks cleaner with this
1132 for (y = 0; y < s->mb_height; y++)
1133 for (x = 0; x < s->mb_width; x++)
1134 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1136 s->mb_index2xy[s->mb_height * s->mb_width] =
1137 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1140 /* Allocate MV tables */
1141 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
1142 mv_table_size * 2 * sizeof(int16_t), fail);
1143 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
1144 mv_table_size * 2 * sizeof(int16_t), fail);
1145 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
1146 mv_table_size * 2 * sizeof(int16_t), fail);
1147 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
1148 mv_table_size * 2 * sizeof(int16_t), fail);
1149 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
1150 mv_table_size * 2 * sizeof(int16_t), fail);
1151 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
1152 mv_table_size * 2 * sizeof(int16_t), fail);
1153 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1154 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1155 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1156 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
1158 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
1160 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1162 /* Allocate MB type table */
1163 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
1164 sizeof(uint16_t), fail); // needed for encoding
1166 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
1169 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1170 mb_array_size * sizeof(float), fail);
1171 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1172 mb_array_size * sizeof(float), fail);
1176 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1177 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1178 /* interlaced direct mode decoding tables */
1179 for (i = 0; i < 2; i++) {
1181 for (j = 0; j < 2; j++) {
1182 for (k = 0; k < 2; k++) {
1183 FF_ALLOCZ_OR_GOTO(s->avctx,
1184 s->b_field_mv_table_base[i][j][k],
1185 mv_table_size * 2 * sizeof(int16_t),
1187 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1190 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
1191 mb_array_size * 2 * sizeof(uint8_t), fail);
1192 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
1193 mv_table_size * 2 * sizeof(int16_t), fail);
1194 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
1197 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
1198 mb_array_size * 2 * sizeof(uint8_t), fail);
1201 if (s->out_format == FMT_H263) {
1203 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
1204 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1206 /* cbp, ac_pred, pred_dir */
1207 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
1208 mb_array_size * sizeof(uint8_t), fail);
1209 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
1210 mb_array_size * sizeof(uint8_t), fail);
1213 if (s->h263_pred || s->h263_plus || !s->encoding) {
1215 // MN: we need these for error resilience of intra-frames
1216 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
1217 yc_size * sizeof(int16_t), fail);
1218 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1219 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1220 s->dc_val[2] = s->dc_val[1] + c_size;
1221 for (i = 0; i < yc_size; i++)
1222 s->dc_val_base[i] = 1024;
1225 /* which mb is a intra block */
1226 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1227 memset(s->mbintra_table, 1, mb_array_size);
1229 /* init macroblock skip table */
1230 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1231 // Note the + 1 is for a quicker mpeg4 slice_end detection
1235 return AVERROR(ENOMEM);
1239 * init common structure for both encoder and decoder.
1240 * this assumes that some variables like width/height are already set
1242 av_cold int ff_mpv_common_init(MpegEncContext *s)
1245 int nb_slices = (HAVE_THREADS &&
1246 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1247 s->avctx->thread_count : 1;
1249 if (s->encoding && s->avctx->slices)
1250 nb_slices = s->avctx->slices;
1252 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1253 s->mb_height = (s->height + 31) / 32 * 2;
1255 s->mb_height = (s->height + 15) / 16;
1257 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1258 av_log(s->avctx, AV_LOG_ERROR,
1259 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1263 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1266 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1268 max_slices = MAX_THREADS;
1269 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1270 " reducing to %d\n", nb_slices, max_slices);
1271 nb_slices = max_slices;
1274 if ((s->width || s->height) &&
1275 av_image_check_size(s->width, s->height, 0, s->avctx))
1280 s->flags = s->avctx->flags;
1281 s->flags2 = s->avctx->flags2;
1283 /* set chroma shifts */
1284 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1286 &s->chroma_y_shift);
1288 /* convert fourcc to upper case */
1289 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1291 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1292 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1293 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1294 s->picture[i].f = av_frame_alloc();
1295 if (!s->picture[i].f)
1298 memset(&s->next_picture, 0, sizeof(s->next_picture));
1299 memset(&s->last_picture, 0, sizeof(s->last_picture));
1300 memset(&s->current_picture, 0, sizeof(s->current_picture));
1301 memset(&s->new_picture, 0, sizeof(s->new_picture));
1302 s->next_picture.f = av_frame_alloc();
1303 if (!s->next_picture.f)
1305 s->last_picture.f = av_frame_alloc();
1306 if (!s->last_picture.f)
1308 s->current_picture.f = av_frame_alloc();
1309 if (!s->current_picture.f)
1311 s->new_picture.f = av_frame_alloc();
1312 if (!s->new_picture.f)
1315 if (s->width && s->height) {
1316 if (init_context_frame(s))
1319 s->parse_context.state = -1;
1322 s->context_initialized = 1;
1323 s->thread_context[0] = s;
1325 if (s->width && s->height) {
1326 if (nb_slices > 1) {
1327 for (i = 1; i < nb_slices; i++) {
1328 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1329 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1332 for (i = 0; i < nb_slices; i++) {
1333 if (init_duplicate_context(s->thread_context[i]) < 0)
1335 s->thread_context[i]->start_mb_y =
1336 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1337 s->thread_context[i]->end_mb_y =
1338 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1341 if (init_duplicate_context(s) < 0)
1344 s->end_mb_y = s->mb_height;
1346 s->slice_context_count = nb_slices;
1351 ff_mpv_common_end(s);
1356 * Frees and resets MpegEncContext fields depending on the resolution.
1357 * Is used during resolution changes to avoid a full reinitialization of the
1360 static void free_context_frame(MpegEncContext *s)
1364 av_freep(&s->mb_type);
1365 av_freep(&s->p_mv_table_base);
1366 av_freep(&s->b_forw_mv_table_base);
1367 av_freep(&s->b_back_mv_table_base);
1368 av_freep(&s->b_bidir_forw_mv_table_base);
1369 av_freep(&s->b_bidir_back_mv_table_base);
1370 av_freep(&s->b_direct_mv_table_base);
1371 s->p_mv_table = NULL;
1372 s->b_forw_mv_table = NULL;
1373 s->b_back_mv_table = NULL;
1374 s->b_bidir_forw_mv_table = NULL;
1375 s->b_bidir_back_mv_table = NULL;
1376 s->b_direct_mv_table = NULL;
1377 for (i = 0; i < 2; i++) {
1378 for (j = 0; j < 2; j++) {
1379 for (k = 0; k < 2; k++) {
1380 av_freep(&s->b_field_mv_table_base[i][j][k]);
1381 s->b_field_mv_table[i][j][k] = NULL;
1383 av_freep(&s->b_field_select_table[i][j]);
1384 av_freep(&s->p_field_mv_table_base[i][j]);
1385 s->p_field_mv_table[i][j] = NULL;
1387 av_freep(&s->p_field_select_table[i]);
1390 av_freep(&s->dc_val_base);
1391 av_freep(&s->coded_block_base);
1392 av_freep(&s->mbintra_table);
1393 av_freep(&s->cbp_table);
1394 av_freep(&s->pred_dir_table);
1396 av_freep(&s->mbskip_table);
1398 av_freep(&s->er.error_status_table);
1399 av_freep(&s->er.er_temp_buffer);
1400 av_freep(&s->mb_index2xy);
1401 av_freep(&s->lambda_table);
1402 av_freep(&s->cplx_tab);
1403 av_freep(&s->bits_tab);
1405 s->linesize = s->uvlinesize = 0;
1408 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1412 if (s->slice_context_count > 1) {
1413 for (i = 0; i < s->slice_context_count; i++) {
1414 free_duplicate_context(s->thread_context[i]);
1416 for (i = 1; i < s->slice_context_count; i++) {
1417 av_freep(&s->thread_context[i]);
1420 free_duplicate_context(s);
1422 free_context_frame(s);
1425 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1426 s->picture[i].needs_realloc = 1;
1429 s->last_picture_ptr =
1430 s->next_picture_ptr =
1431 s->current_picture_ptr = NULL;
1434 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1435 s->mb_height = (s->height + 31) / 32 * 2;
1437 s->mb_height = (s->height + 15) / 16;
1439 if ((s->width || s->height) &&
1440 (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1443 if ((err = init_context_frame(s)))
1446 s->thread_context[0] = s;
1448 if (s->width && s->height) {
1449 int nb_slices = s->slice_context_count;
1450 if (nb_slices > 1) {
1451 for (i = 1; i < nb_slices; i++) {
1452 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1453 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1456 for (i = 0; i < nb_slices; i++) {
1457 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1459 s->thread_context[i]->start_mb_y =
1460 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1461 s->thread_context[i]->end_mb_y =
1462 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1465 if (init_duplicate_context(s) < 0)
1468 s->end_mb_y = s->mb_height;
1470 s->slice_context_count = nb_slices;
1475 ff_mpv_common_end(s);
1479 /* init common structure for both encoder and decoder */
1480 void ff_mpv_common_end(MpegEncContext *s)
1484 if (s->slice_context_count > 1) {
1485 for (i = 0; i < s->slice_context_count; i++) {
1486 free_duplicate_context(s->thread_context[i]);
1488 for (i = 1; i < s->slice_context_count; i++) {
1489 av_freep(&s->thread_context[i]);
1491 s->slice_context_count = 1;
1492 } else free_duplicate_context(s);
1494 av_freep(&s->parse_context.buffer);
1495 s->parse_context.buffer_size = 0;
1497 av_freep(&s->bitstream_buffer);
1498 s->allocated_bitstream_buffer_size = 0;
1501 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1502 ff_free_picture_tables(&s->picture[i]);
1503 ff_mpeg_unref_picture(s, &s->picture[i]);
1504 av_frame_free(&s->picture[i].f);
1507 av_freep(&s->picture);
1508 ff_free_picture_tables(&s->last_picture);
1509 ff_mpeg_unref_picture(s, &s->last_picture);
1510 av_frame_free(&s->last_picture.f);
1511 ff_free_picture_tables(&s->current_picture);
1512 ff_mpeg_unref_picture(s, &s->current_picture);
1513 av_frame_free(&s->current_picture.f);
1514 ff_free_picture_tables(&s->next_picture);
1515 ff_mpeg_unref_picture(s, &s->next_picture);
1516 av_frame_free(&s->next_picture.f);
1517 ff_free_picture_tables(&s->new_picture);
1518 ff_mpeg_unref_picture(s, &s->new_picture);
1519 av_frame_free(&s->new_picture.f);
1521 free_context_frame(s);
1523 s->context_initialized = 0;
1524 s->last_picture_ptr =
1525 s->next_picture_ptr =
1526 s->current_picture_ptr = NULL;
1527 s->linesize = s->uvlinesize = 0;
1530 av_cold void ff_init_rl(RLTable *rl,
1531 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1533 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1534 uint8_t index_run[MAX_RUN + 1];
1535 int last, run, level, start, end, i;
1537 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1538 if (static_store && rl->max_level[0])
1541 /* compute max_level[], max_run[] and index_run[] */
1542 for (last = 0; last < 2; last++) {
1551 memset(max_level, 0, MAX_RUN + 1);
1552 memset(max_run, 0, MAX_LEVEL + 1);
1553 memset(index_run, rl->n, MAX_RUN + 1);
1554 for (i = start; i < end; i++) {
1555 run = rl->table_run[i];
1556 level = rl->table_level[i];
1557 if (index_run[run] == rl->n)
1559 if (level > max_level[run])
1560 max_level[run] = level;
1561 if (run > max_run[level])
1562 max_run[level] = run;
1565 rl->max_level[last] = static_store[last];
1567 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1568 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1570 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1572 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1573 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1575 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1577 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1578 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1582 av_cold void ff_init_vlc_rl(RLTable *rl)
1586 for (q = 0; q < 32; q++) {
1588 int qadd = (q - 1) | 1;
1594 for (i = 0; i < rl->vlc.table_size; i++) {
1595 int code = rl->vlc.table[i][0];
1596 int len = rl->vlc.table[i][1];
1599 if (len == 0) { // illegal code
1602 } else if (len < 0) { // more bits needed
1606 if (code == rl->n) { // esc
1610 run = rl->table_run[code] + 1;
1611 level = rl->table_level[code] * qmul + qadd;
1612 if (code >= rl->last) run += 192;
1615 rl->rl_vlc[q][i].len = len;
1616 rl->rl_vlc[q][i].level = level;
1617 rl->rl_vlc[q][i].run = run;
1622 static void release_unused_pictures(MpegEncContext *s)
1626 /* release non reference frames */
1627 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1628 if (!s->picture[i].reference)
1629 ff_mpeg_unref_picture(s, &s->picture[i]);
1633 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1635 if (!pic->f->buf[0])
1637 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1642 static int find_unused_picture(MpegEncContext *s, int shared)
1647 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1648 if (!s->picture[i].f->buf[0])
1652 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1653 if (pic_is_unused(s, &s->picture[i]))
1658 return AVERROR_INVALIDDATA;
1661 int ff_find_unused_picture(MpegEncContext *s, int shared)
1663 int ret = find_unused_picture(s, shared);
1665 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1666 if (s->picture[ret].needs_realloc) {
1667 s->picture[ret].needs_realloc = 0;
1668 ff_free_picture_tables(&s->picture[ret]);
1669 ff_mpeg_unref_picture(s, &s->picture[ret]);
1676 * generic function called after decoding
1677 * the header and before a frame is decoded.
1679 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1685 /* mark & release old frames */
1686 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1687 s->last_picture_ptr != s->next_picture_ptr &&
1688 s->last_picture_ptr->f->buf[0]) {
1689 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1692 /* release forgotten pictures */
1693 /* if (mpeg124/h263) */
1694 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1695 if (&s->picture[i] != s->last_picture_ptr &&
1696 &s->picture[i] != s->next_picture_ptr &&
1697 s->picture[i].reference && !s->picture[i].needs_realloc) {
1698 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1699 av_log(avctx, AV_LOG_ERROR,
1700 "releasing zombie picture\n");
1701 ff_mpeg_unref_picture(s, &s->picture[i]);
1705 ff_mpeg_unref_picture(s, &s->current_picture);
1707 release_unused_pictures(s);
1709 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1710 // we already have a unused image
1711 // (maybe it was set before reading the header)
1712 pic = s->current_picture_ptr;
1714 i = ff_find_unused_picture(s, 0);
1716 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1719 pic = &s->picture[i];
1723 if (!s->droppable) {
1724 if (s->pict_type != AV_PICTURE_TYPE_B)
1728 pic->f->coded_picture_number = s->coded_picture_number++;
1730 if (ff_alloc_picture(s, pic, 0) < 0)
1733 s->current_picture_ptr = pic;
1734 // FIXME use only the vars from current_pic
1735 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1736 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1737 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1738 if (s->picture_structure != PICT_FRAME)
1739 s->current_picture_ptr->f->top_field_first =
1740 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1742 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1743 !s->progressive_sequence;
1744 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1746 s->current_picture_ptr->f->pict_type = s->pict_type;
1747 // if (s->flags && CODEC_FLAG_QSCALE)
1748 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1749 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1751 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1752 s->current_picture_ptr)) < 0)
1755 if (s->pict_type != AV_PICTURE_TYPE_B) {
1756 s->last_picture_ptr = s->next_picture_ptr;
1758 s->next_picture_ptr = s->current_picture_ptr;
1760 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1761 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1762 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1763 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1764 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1765 s->pict_type, s->droppable);
1767 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1768 (s->pict_type != AV_PICTURE_TYPE_I ||
1769 s->picture_structure != PICT_FRAME)) {
1770 int h_chroma_shift, v_chroma_shift;
1771 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1772 &h_chroma_shift, &v_chroma_shift);
1773 if (s->pict_type != AV_PICTURE_TYPE_I)
1774 av_log(avctx, AV_LOG_ERROR,
1775 "warning: first frame is no keyframe\n");
1776 else if (s->picture_structure != PICT_FRAME)
1777 av_log(avctx, AV_LOG_INFO,
1778 "allocate dummy last picture for field based first keyframe\n");
1780 /* Allocate a dummy frame */
1781 i = ff_find_unused_picture(s, 0);
1783 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1786 s->last_picture_ptr = &s->picture[i];
1788 s->last_picture_ptr->reference = 3;
1789 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1791 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1792 s->last_picture_ptr = NULL;
1796 memset(s->last_picture_ptr->f->data[0], 0,
1797 avctx->height * s->last_picture_ptr->f->linesize[0]);
1798 memset(s->last_picture_ptr->f->data[1], 0x80,
1799 (avctx->height >> v_chroma_shift) *
1800 s->last_picture_ptr->f->linesize[1]);
1801 memset(s->last_picture_ptr->f->data[2], 0x80,
1802 (avctx->height >> v_chroma_shift) *
1803 s->last_picture_ptr->f->linesize[2]);
1805 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1806 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1808 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1809 s->pict_type == AV_PICTURE_TYPE_B) {
1810 /* Allocate a dummy frame */
1811 i = ff_find_unused_picture(s, 0);
1813 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1816 s->next_picture_ptr = &s->picture[i];
1818 s->next_picture_ptr->reference = 3;
1819 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1821 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1822 s->next_picture_ptr = NULL;
1825 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1826 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1829 if (s->last_picture_ptr) {
1830 ff_mpeg_unref_picture(s, &s->last_picture);
1831 if (s->last_picture_ptr->f->buf[0] &&
1832 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1833 s->last_picture_ptr)) < 0)
1836 if (s->next_picture_ptr) {
1837 ff_mpeg_unref_picture(s, &s->next_picture);
1838 if (s->next_picture_ptr->f->buf[0] &&
1839 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1840 s->next_picture_ptr)) < 0)
1844 if (s->pict_type != AV_PICTURE_TYPE_I &&
1845 !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1846 av_log(s, AV_LOG_ERROR,
1847 "Non-reference picture received and no reference available\n");
1848 return AVERROR_INVALIDDATA;
1851 if (s->picture_structure!= PICT_FRAME) {
1853 for (i = 0; i < 4; i++) {
1854 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1855 s->current_picture.f->data[i] +=
1856 s->current_picture.f->linesize[i];
1858 s->current_picture.f->linesize[i] *= 2;
1859 s->last_picture.f->linesize[i] *= 2;
1860 s->next_picture.f->linesize[i] *= 2;
1864 s->err_recognition = avctx->err_recognition;
1866 /* set dequantizer, we can't do it during init as
1867 * it might change for mpeg4 and we can't do it in the header
1868 * decode as init is not called for mpeg4 there yet */
1869 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1870 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1871 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1872 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1873 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1874 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1876 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1877 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1881 FF_DISABLE_DEPRECATION_WARNINGS
1882 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1883 return ff_xvmc_field_start(s, avctx);
1884 FF_ENABLE_DEPRECATION_WARNINGS
1885 #endif /* FF_API_XVMC */
1890 /* called after a frame has been decoded. */
1891 void ff_mpv_frame_end(MpegEncContext *s)
1894 FF_DISABLE_DEPRECATION_WARNINGS
1895 /* redraw edges for the frame if decoding didn't complete */
1896 // just to make sure that all data is rendered.
1897 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1898 ff_xvmc_field_end(s);
1900 FF_ENABLE_DEPRECATION_WARNINGS
1901 #endif /* FF_API_XVMC */
1905 if (s->current_picture.reference)
1906 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1910 * Print debugging info for the given picture.
1912 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1915 if (s->avctx->hwaccel || !p || !p->mb_type)
1919 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1922 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1923 switch (pict->pict_type) {
1924 case AV_PICTURE_TYPE_I:
1925 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1927 case AV_PICTURE_TYPE_P:
1928 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1930 case AV_PICTURE_TYPE_B:
1931 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1933 case AV_PICTURE_TYPE_S:
1934 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1936 case AV_PICTURE_TYPE_SI:
1937 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1939 case AV_PICTURE_TYPE_SP:
1940 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1943 for (y = 0; y < s->mb_height; y++) {
1944 for (x = 0; x < s->mb_width; x++) {
1945 if (s->avctx->debug & FF_DEBUG_SKIP) {
1946 int count = s->mbskip_table[x + y * s->mb_stride];
1949 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1951 if (s->avctx->debug & FF_DEBUG_QP) {
1952 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1953 p->qscale_table[x + y * s->mb_stride]);
1955 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1956 int mb_type = p->mb_type[x + y * s->mb_stride];
1957 // Type & MV direction
1958 if (IS_PCM(mb_type))
1959 av_log(s->avctx, AV_LOG_DEBUG, "P");
1960 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1961 av_log(s->avctx, AV_LOG_DEBUG, "A");
1962 else if (IS_INTRA4x4(mb_type))
1963 av_log(s->avctx, AV_LOG_DEBUG, "i");
1964 else if (IS_INTRA16x16(mb_type))
1965 av_log(s->avctx, AV_LOG_DEBUG, "I");
1966 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1967 av_log(s->avctx, AV_LOG_DEBUG, "d");
1968 else if (IS_DIRECT(mb_type))
1969 av_log(s->avctx, AV_LOG_DEBUG, "D");
1970 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1971 av_log(s->avctx, AV_LOG_DEBUG, "g");
1972 else if (IS_GMC(mb_type))
1973 av_log(s->avctx, AV_LOG_DEBUG, "G");
1974 else if (IS_SKIP(mb_type))
1975 av_log(s->avctx, AV_LOG_DEBUG, "S");
1976 else if (!USES_LIST(mb_type, 1))
1977 av_log(s->avctx, AV_LOG_DEBUG, ">");
1978 else if (!USES_LIST(mb_type, 0))
1979 av_log(s->avctx, AV_LOG_DEBUG, "<");
1981 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1982 av_log(s->avctx, AV_LOG_DEBUG, "X");
1986 if (IS_8X8(mb_type))
1987 av_log(s->avctx, AV_LOG_DEBUG, "+");
1988 else if (IS_16X8(mb_type))
1989 av_log(s->avctx, AV_LOG_DEBUG, "-");
1990 else if (IS_8X16(mb_type))
1991 av_log(s->avctx, AV_LOG_DEBUG, "|");
1992 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1993 av_log(s->avctx, AV_LOG_DEBUG, " ");
1995 av_log(s->avctx, AV_LOG_DEBUG, "?");
1998 if (IS_INTERLACED(mb_type))
1999 av_log(s->avctx, AV_LOG_DEBUG, "=");
2001 av_log(s->avctx, AV_LOG_DEBUG, " ");
2004 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2010 * find the lowest MB row referenced in the MVs
2012 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2014 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2015 int my, off, i, mvs;
2017 if (s->picture_structure != PICT_FRAME || s->mcsel)
2020 switch (s->mv_type) {
2034 for (i = 0; i < mvs; i++) {
2035 my = s->mv[dir][i][1]<<qpel_shift;
2036 my_max = FFMAX(my_max, my);
2037 my_min = FFMIN(my_min, my);
2040 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2042 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2044 return s->mb_height-1;
2047 /* put block[] to dest[] */
2048 static inline void put_dct(MpegEncContext *s,
2049 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2051 s->dct_unquantize_intra(s, block, i, qscale);
2052 s->idsp.idct_put(dest, line_size, block);
2055 /* add block[] to dest[] */
2056 static inline void add_dct(MpegEncContext *s,
2057 int16_t *block, int i, uint8_t *dest, int line_size)
2059 if (s->block_last_index[i] >= 0) {
2060 s->idsp.idct_add(dest, line_size, block);
2064 static inline void add_dequant_dct(MpegEncContext *s,
2065 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2067 if (s->block_last_index[i] >= 0) {
2068 s->dct_unquantize_inter(s, block, i, qscale);
2070 s->idsp.idct_add(dest, line_size, block);
2075 * Clean dc, ac, coded_block for the current non-intra MB.
2077 void ff_clean_intra_table_entries(MpegEncContext *s)
2079 int wrap = s->b8_stride;
2080 int xy = s->block_index[0];
2083 s->dc_val[0][xy + 1 ] =
2084 s->dc_val[0][xy + wrap] =
2085 s->dc_val[0][xy + 1 + wrap] = 1024;
2087 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2088 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2089 if (s->msmpeg4_version>=3) {
2090 s->coded_block[xy ] =
2091 s->coded_block[xy + 1 ] =
2092 s->coded_block[xy + wrap] =
2093 s->coded_block[xy + 1 + wrap] = 0;
2096 wrap = s->mb_stride;
2097 xy = s->mb_x + s->mb_y * wrap;
2099 s->dc_val[2][xy] = 1024;
2101 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2102 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2104 s->mbintra_table[xy]= 0;
2107 /* generic function called after a macroblock has been parsed by the
2108 decoder or after it has been encoded by the encoder.
2110 Important variables used:
2111 s->mb_intra : true if intra macroblock
2112 s->mv_dir : motion vector direction
2113 s->mv_type : motion vector type
2114 s->mv : motion vector
2115 s->interlaced_dct : true if interlaced dct used (mpeg2)
2117 static av_always_inline
2118 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2121 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2124 FF_DISABLE_DEPRECATION_WARNINGS
2125 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2126 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2129 FF_ENABLE_DEPRECATION_WARNINGS
2130 #endif /* FF_API_XVMC */
2132 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2133 /* print DCT coefficients */
2135 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2137 for(j=0; j<64; j++){
2138 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2139 block[i][s->idsp.idct_permutation[j]]);
2141 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2145 s->current_picture.qscale_table[mb_xy] = s->qscale;
2147 /* update DC predictors for P macroblocks */
2149 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2150 if(s->mbintra_table[mb_xy])
2151 ff_clean_intra_table_entries(s);
2155 s->last_dc[2] = 128 << s->intra_dc_precision;
2158 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2159 s->mbintra_table[mb_xy]=1;
2161 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2162 uint8_t *dest_y, *dest_cb, *dest_cr;
2163 int dct_linesize, dct_offset;
2164 op_pixels_func (*op_pix)[4];
2165 qpel_mc_func (*op_qpix)[16];
2166 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2167 const int uvlinesize = s->current_picture.f->linesize[1];
2168 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2169 const int block_size = 8;
2171 /* avoid copy if macroblock skipped in last frame too */
2172 /* skip only during decoding as we might trash the buffers during encoding a bit */
2174 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2176 if (s->mb_skipped) {
2178 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2180 } else if(!s->current_picture.reference) {
2183 *mbskip_ptr = 0; /* not skipped */
2187 dct_linesize = linesize << s->interlaced_dct;
2188 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2192 dest_cb= s->dest[1];
2193 dest_cr= s->dest[2];
2195 dest_y = s->b_scratchpad;
2196 dest_cb= s->b_scratchpad+16*linesize;
2197 dest_cr= s->b_scratchpad+32*linesize;
2201 /* motion handling */
2202 /* decoding or more than one mb_type (MC was already done otherwise) */
2205 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2206 if (s->mv_dir & MV_DIR_FORWARD) {
2207 ff_thread_await_progress(&s->last_picture_ptr->tf,
2208 ff_mpv_lowest_referenced_row(s, 0),
2211 if (s->mv_dir & MV_DIR_BACKWARD) {
2212 ff_thread_await_progress(&s->next_picture_ptr->tf,
2213 ff_mpv_lowest_referenced_row(s, 1),
2218 op_qpix= s->me.qpel_put;
2219 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2220 op_pix = s->hdsp.put_pixels_tab;
2222 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2224 if (s->mv_dir & MV_DIR_FORWARD) {
2225 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2226 op_pix = s->hdsp.avg_pixels_tab;
2227 op_qpix= s->me.qpel_avg;
2229 if (s->mv_dir & MV_DIR_BACKWARD) {
2230 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2234 /* skip dequant / idct if we are really late ;) */
2235 if(s->avctx->skip_idct){
2236 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2237 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2238 || s->avctx->skip_idct >= AVDISCARD_ALL)
2242 /* add dct residue */
2243 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2244 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2245 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2246 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2247 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2248 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2250 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2251 if (s->chroma_y_shift){
2252 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2253 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2257 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2258 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2259 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2260 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2263 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2264 add_dct(s, block[0], 0, dest_y , dct_linesize);
2265 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2266 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2267 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2269 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2270 if(s->chroma_y_shift){//Chroma420
2271 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2272 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2275 dct_linesize = uvlinesize << s->interlaced_dct;
2276 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2278 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2279 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2280 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2281 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2282 if(!s->chroma_x_shift){//Chroma444
2283 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2284 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2285 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2286 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2291 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2292 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2295 /* dct only in intra block */
2296 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2297 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2298 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2299 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2300 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2302 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2303 if(s->chroma_y_shift){
2304 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2305 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2309 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2310 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2311 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2312 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2316 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2317 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2318 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2319 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2321 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2322 if(s->chroma_y_shift){
2323 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2324 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2327 dct_linesize = uvlinesize << s->interlaced_dct;
2328 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2330 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2331 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2332 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2333 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2334 if(!s->chroma_x_shift){//Chroma444
2335 s->idsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2336 s->idsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2337 s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2338 s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2346 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2347 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2348 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2353 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
2356 if(s->out_format == FMT_MPEG1) {
2357 mpv_decode_mb_internal(s, block, 1);
2360 mpv_decode_mb_internal(s, block, 0);
2363 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2365 ff_draw_horiz_band(s->avctx, s->current_picture.f,
2366 s->last_picture.f, y, h, s->picture_structure,
2367 s->first_field, s->low_delay);
2370 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2371 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2372 const int uvlinesize = s->current_picture.f->linesize[1];
2373 const int mb_size= 4;
2375 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2376 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2377 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2378 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2379 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2380 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2381 //block_index is not used by mpeg2, so it is not affected by chroma_format
2383 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
2384 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2385 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2387 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2389 if(s->picture_structure==PICT_FRAME){
2390 s->dest[0] += s->mb_y * linesize << mb_size;
2391 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2392 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2394 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2395 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2396 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2397 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2403 * Permute an 8x8 block.
2404 * @param block the block which will be permuted according to the given permutation vector
2405 * @param permutation the permutation vector
2406 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2407 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2408 * (inverse) permutated to scantable order!
2410 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2416 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2418 for(i=0; i<=last; i++){
2419 const int j= scantable[i];
2424 for(i=0; i<=last; i++){
2425 const int j= scantable[i];
2426 const int perm_j= permutation[j];
2427 block[perm_j]= temp[j];
2431 void ff_mpeg_flush(AVCodecContext *avctx){
2433 MpegEncContext *s = avctx->priv_data;
2435 if (!s || !s->picture)
2438 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2439 ff_mpeg_unref_picture(s, &s->picture[i]);
2440 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2442 ff_mpeg_unref_picture(s, &s->current_picture);
2443 ff_mpeg_unref_picture(s, &s->last_picture);
2444 ff_mpeg_unref_picture(s, &s->next_picture);
2446 s->mb_x= s->mb_y= 0;
2448 s->parse_context.state= -1;
2449 s->parse_context.frame_start_found= 0;
2450 s->parse_context.overread= 0;
2451 s->parse_context.overread_index= 0;
2452 s->parse_context.index= 0;
2453 s->parse_context.last_index= 0;
2454 s->bitstream_buffer_size=0;
2459 * set qscale and update qscale dependent variables.
2461 void ff_set_qscale(MpegEncContext * s, int qscale)
2465 else if (qscale > 31)
2469 s->chroma_qscale= s->chroma_qscale_table[qscale];
2471 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2472 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2475 void ff_mpv_report_decode_progress(MpegEncContext *s)
2477 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2478 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);