2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
40 #include "mpegutils.h"
41 #include "mpegvideo.h"
45 #include "xvmc_internal.h"
49 static const uint8_t ff_default_chroma_qscale_table[32] = {
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
55 const uint8_t ff_mpeg1_dc_scale_table[128] = {
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const uint8_t ff_alternate_horizontal_scan[64] = {
111 0, 1, 2, 3, 8, 9, 16, 17,
112 10, 11, 4, 5, 6, 7, 15, 14,
113 13, 12, 19, 18, 24, 25, 32, 33,
114 26, 27, 20, 21, 22, 23, 28, 29,
115 30, 31, 34, 35, 40, 41, 48, 49,
116 42, 43, 36, 37, 38, 39, 44, 45,
117 46, 47, 50, 51, 56, 57, 58, 59,
118 52, 53, 54, 55, 60, 61, 62, 63,
121 const uint8_t ff_alternate_vertical_scan[64] = {
122 0, 8, 16, 24, 1, 9, 2, 10,
123 17, 25, 32, 40, 48, 56, 57, 49,
124 41, 33, 26, 18, 3, 11, 4, 12,
125 19, 27, 34, 42, 50, 58, 35, 43,
126 51, 59, 20, 28, 5, 13, 6, 14,
127 21, 29, 36, 44, 52, 60, 37, 45,
128 53, 61, 22, 30, 7, 15, 23, 31,
129 38, 46, 54, 62, 39, 47, 55, 63,
132 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133 int16_t *block, int n, int qscale)
135 int i, level, nCoeffs;
136 const uint16_t *quant_matrix;
138 nCoeffs= s->block_last_index[n];
141 block[0] = block[0] * s->y_dc_scale;
143 block[0] = block[0] * s->c_dc_scale;
144 /* XXX: only mpeg1 */
145 quant_matrix = s->intra_matrix;
146 for(i=1;i<=nCoeffs;i++) {
147 int j= s->intra_scantable.permutated[i];
152 level = (int)(level * qscale * quant_matrix[j]) >> 3;
153 level = (level - 1) | 1;
156 level = (int)(level * qscale * quant_matrix[j]) >> 3;
157 level = (level - 1) | 1;
164 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
165 int16_t *block, int n, int qscale)
167 int i, level, nCoeffs;
168 const uint16_t *quant_matrix;
170 nCoeffs= s->block_last_index[n];
172 quant_matrix = s->inter_matrix;
173 for(i=0; i<=nCoeffs; i++) {
174 int j= s->intra_scantable.permutated[i];
179 level = (((level << 1) + 1) * qscale *
180 ((int) (quant_matrix[j]))) >> 4;
181 level = (level - 1) | 1;
184 level = (((level << 1) + 1) * qscale *
185 ((int) (quant_matrix[j]))) >> 4;
186 level = (level - 1) | 1;
193 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
194 int16_t *block, int n, int qscale)
196 int i, level, nCoeffs;
197 const uint16_t *quant_matrix;
199 if(s->alternate_scan) nCoeffs= 63;
200 else nCoeffs= s->block_last_index[n];
203 block[0] = block[0] * s->y_dc_scale;
205 block[0] = block[0] * s->c_dc_scale;
206 quant_matrix = s->intra_matrix;
207 for(i=1;i<=nCoeffs;i++) {
208 int j= s->intra_scantable.permutated[i];
213 level = (int)(level * qscale * quant_matrix[j]) >> 3;
216 level = (int)(level * qscale * quant_matrix[j]) >> 3;
223 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
224 int16_t *block, int n, int qscale)
226 int i, level, nCoeffs;
227 const uint16_t *quant_matrix;
230 if(s->alternate_scan) nCoeffs= 63;
231 else nCoeffs= s->block_last_index[n];
234 block[0] = block[0] * s->y_dc_scale;
236 block[0] = block[0] * s->c_dc_scale;
237 quant_matrix = s->intra_matrix;
238 for(i=1;i<=nCoeffs;i++) {
239 int j= s->intra_scantable.permutated[i];
244 level = (int)(level * qscale * quant_matrix[j]) >> 3;
247 level = (int)(level * qscale * quant_matrix[j]) >> 3;
256 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
257 int16_t *block, int n, int qscale)
259 int i, level, nCoeffs;
260 const uint16_t *quant_matrix;
263 if(s->alternate_scan) nCoeffs= 63;
264 else nCoeffs= s->block_last_index[n];
266 quant_matrix = s->inter_matrix;
267 for(i=0; i<=nCoeffs; i++) {
268 int j= s->intra_scantable.permutated[i];
273 level = (((level << 1) + 1) * qscale *
274 ((int) (quant_matrix[j]))) >> 4;
277 level = (((level << 1) + 1) * qscale *
278 ((int) (quant_matrix[j]))) >> 4;
287 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
288 int16_t *block, int n, int qscale)
290 int i, level, qmul, qadd;
293 assert(s->block_last_index[n]>=0);
299 block[0] = block[0] * s->y_dc_scale;
301 block[0] = block[0] * s->c_dc_scale;
302 qadd = (qscale - 1) | 1;
309 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
311 for(i=1; i<=nCoeffs; i++) {
315 level = level * qmul - qadd;
317 level = level * qmul + qadd;
324 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
325 int16_t *block, int n, int qscale)
327 int i, level, qmul, qadd;
330 assert(s->block_last_index[n]>=0);
332 qadd = (qscale - 1) | 1;
335 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
337 for(i=0; i<=nCoeffs; i++) {
341 level = level * qmul - qadd;
343 level = level * qmul + qadd;
350 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
352 int mb_x, int mb_y, int mb_intra, int mb_skipped)
354 MpegEncContext *s = opaque;
357 s->mv_type = mv_type;
358 s->mb_intra = mb_intra;
359 s->mb_skipped = mb_skipped;
362 memcpy(s->mv, mv, sizeof(*mv));
364 ff_init_block_index(s);
365 ff_update_block_index(s);
367 s->bdsp.clear_blocks(s->block[0]);
369 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
370 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
371 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
374 ff_mpv_decode_mb(s, s->block);
377 /* init common dct for both encoder and decoder */
378 static av_cold int dct_init(MpegEncContext *s)
380 ff_blockdsp_init(&s->bdsp, s->avctx);
381 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
382 ff_me_cmp_init(&s->mecc, s->avctx);
383 ff_mpegvideodsp_init(&s->mdsp);
384 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
386 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
387 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
388 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
389 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
390 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
391 if (s->flags & CODEC_FLAG_BITEXACT)
392 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
393 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
395 if (HAVE_INTRINSICS_NEON)
396 ff_mpv_common_init_neon(s);
399 ff_mpv_common_init_arm(s);
401 ff_mpv_common_init_ppc(s);
403 ff_mpv_common_init_x86(s);
408 av_cold void ff_mpv_idct_init(MpegEncContext *s)
410 ff_idctdsp_init(&s->idsp, s->avctx);
412 /* load & permutate scantables
413 * note: only wmv uses different ones
415 if (s->alternate_scan) {
416 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
417 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
419 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
420 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
422 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
423 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
426 static int frame_size_alloc(MpegEncContext *s, int linesize)
428 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
430 // edge emu needs blocksize + filter length - 1
431 // (= 17x17 for halfpel / 21x21 for h264)
432 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
433 // at uvlinesize. It supports only YUV420 so 24x24 is enough
434 // linesize * interlaced * MBsize
435 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
440 s->me.temp = s->me.scratchpad;
441 s->rd_scratchpad = s->me.scratchpad;
442 s->b_scratchpad = s->me.scratchpad;
443 s->obmc_scratchpad = s->me.scratchpad + 16;
447 av_freep(&s->edge_emu_buffer);
448 return AVERROR(ENOMEM);
452 * Allocate a frame buffer
454 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
456 int edges_needed = av_codec_is_encoder(s->avctx->codec);
460 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
461 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
462 s->codec_id != AV_CODEC_ID_MSS2) {
464 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
465 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
468 r = ff_thread_get_buffer(s->avctx, &pic->tf,
469 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
471 pic->f->width = s->avctx->width;
472 pic->f->height = s->avctx->height;
473 pic->f->format = s->avctx->pix_fmt;
474 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
477 if (r < 0 || !pic->f->buf[0]) {
478 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
485 for (i = 0; pic->f->data[i]; i++) {
486 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
487 pic->f->linesize[i] +
488 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
489 pic->f->data[i] += offset;
491 pic->f->width = s->avctx->width;
492 pic->f->height = s->avctx->height;
495 if (s->avctx->hwaccel) {
496 assert(!pic->hwaccel_picture_private);
497 if (s->avctx->hwaccel->frame_priv_data_size) {
498 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
499 if (!pic->hwaccel_priv_buf) {
500 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
503 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
507 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
508 s->uvlinesize != pic->f->linesize[1])) {
509 av_log(s->avctx, AV_LOG_ERROR,
510 "get_buffer() failed (stride changed)\n");
511 ff_mpeg_unref_picture(s, pic);
515 if (pic->f->linesize[1] != pic->f->linesize[2]) {
516 av_log(s->avctx, AV_LOG_ERROR,
517 "get_buffer() failed (uv stride mismatch)\n");
518 ff_mpeg_unref_picture(s, pic);
522 if (!s->edge_emu_buffer &&
523 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
524 av_log(s->avctx, AV_LOG_ERROR,
525 "get_buffer() failed to allocate context scratch buffers.\n");
526 ff_mpeg_unref_picture(s, pic);
533 void ff_free_picture_tables(Picture *pic)
537 av_buffer_unref(&pic->mb_var_buf);
538 av_buffer_unref(&pic->mc_mb_var_buf);
539 av_buffer_unref(&pic->mb_mean_buf);
540 av_buffer_unref(&pic->mbskip_table_buf);
541 av_buffer_unref(&pic->qscale_table_buf);
542 av_buffer_unref(&pic->mb_type_buf);
544 for (i = 0; i < 2; i++) {
545 av_buffer_unref(&pic->motion_val_buf[i]);
546 av_buffer_unref(&pic->ref_index_buf[i]);
550 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
552 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
553 const int mb_array_size = s->mb_stride * s->mb_height;
554 const int b8_array_size = s->b8_stride * s->mb_height * 2;
558 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
559 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
560 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
562 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
563 return AVERROR(ENOMEM);
566 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
567 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
568 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
569 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
570 return AVERROR(ENOMEM);
573 if (s->out_format == FMT_H263 || s->encoding) {
574 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
575 int ref_index_size = 4 * mb_array_size;
577 for (i = 0; mv_size && i < 2; i++) {
578 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
579 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
580 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
581 return AVERROR(ENOMEM);
588 static int make_tables_writable(Picture *pic)
591 #define MAKE_WRITABLE(table) \
594 (ret = av_buffer_make_writable(&pic->table)) < 0)\
598 MAKE_WRITABLE(mb_var_buf);
599 MAKE_WRITABLE(mc_mb_var_buf);
600 MAKE_WRITABLE(mb_mean_buf);
601 MAKE_WRITABLE(mbskip_table_buf);
602 MAKE_WRITABLE(qscale_table_buf);
603 MAKE_WRITABLE(mb_type_buf);
605 for (i = 0; i < 2; i++) {
606 MAKE_WRITABLE(motion_val_buf[i]);
607 MAKE_WRITABLE(ref_index_buf[i]);
614 * Allocate a Picture.
615 * The pixels are allocated/set by calling get_buffer() if shared = 0
617 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
622 assert(pic->f->data[0]);
625 assert(!pic->f->buf[0]);
627 if (alloc_frame_buffer(s, pic) < 0)
630 s->linesize = pic->f->linesize[0];
631 s->uvlinesize = pic->f->linesize[1];
634 if (!pic->qscale_table_buf)
635 ret = alloc_picture_tables(s, pic);
637 ret = make_tables_writable(pic);
642 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
643 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
644 pic->mb_mean = pic->mb_mean_buf->data;
647 pic->mbskip_table = pic->mbskip_table_buf->data;
648 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
649 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
651 if (pic->motion_val_buf[0]) {
652 for (i = 0; i < 2; i++) {
653 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
654 pic->ref_index[i] = pic->ref_index_buf[i]->data;
660 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
661 ff_mpeg_unref_picture(s, pic);
662 ff_free_picture_tables(pic);
663 return AVERROR(ENOMEM);
667 * Deallocate a picture.
669 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
671 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
674 /* WM Image / Screen codecs allocate internal buffers with different
675 * dimensions / colorspaces; ignore user-defined callbacks for these. */
676 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
677 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
678 s->codec_id != AV_CODEC_ID_MSS2)
679 ff_thread_release_buffer(s->avctx, &pic->tf);
681 av_frame_unref(pic->f);
683 av_buffer_unref(&pic->hwaccel_priv_buf);
685 if (pic->needs_realloc)
686 ff_free_picture_tables(pic);
688 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
691 static int update_picture_tables(Picture *dst, Picture *src)
695 #define UPDATE_TABLE(table)\
698 (!dst->table || dst->table->buffer != src->table->buffer)) {\
699 av_buffer_unref(&dst->table);\
700 dst->table = av_buffer_ref(src->table);\
702 ff_free_picture_tables(dst);\
703 return AVERROR(ENOMEM);\
708 UPDATE_TABLE(mb_var_buf);
709 UPDATE_TABLE(mc_mb_var_buf);
710 UPDATE_TABLE(mb_mean_buf);
711 UPDATE_TABLE(mbskip_table_buf);
712 UPDATE_TABLE(qscale_table_buf);
713 UPDATE_TABLE(mb_type_buf);
714 for (i = 0; i < 2; i++) {
715 UPDATE_TABLE(motion_val_buf[i]);
716 UPDATE_TABLE(ref_index_buf[i]);
719 dst->mb_var = src->mb_var;
720 dst->mc_mb_var = src->mc_mb_var;
721 dst->mb_mean = src->mb_mean;
722 dst->mbskip_table = src->mbskip_table;
723 dst->qscale_table = src->qscale_table;
724 dst->mb_type = src->mb_type;
725 for (i = 0; i < 2; i++) {
726 dst->motion_val[i] = src->motion_val[i];
727 dst->ref_index[i] = src->ref_index[i];
733 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
737 av_assert0(!dst->f->buf[0]);
738 av_assert0(src->f->buf[0]);
742 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
746 ret = update_picture_tables(dst, src);
750 if (src->hwaccel_picture_private) {
751 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
752 if (!dst->hwaccel_priv_buf)
754 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
757 dst->field_picture = src->field_picture;
758 dst->mb_var_sum = src->mb_var_sum;
759 dst->mc_mb_var_sum = src->mc_mb_var_sum;
760 dst->b_frame_score = src->b_frame_score;
761 dst->needs_realloc = src->needs_realloc;
762 dst->reference = src->reference;
763 dst->shared = src->shared;
767 ff_mpeg_unref_picture(s, dst);
771 static void exchange_uv(MpegEncContext *s)
776 s->pblocks[4] = s->pblocks[5];
780 static int init_duplicate_context(MpegEncContext *s)
782 int y_size = s->b8_stride * (2 * s->mb_height + 1);
783 int c_size = s->mb_stride * (s->mb_height + 1);
784 int yc_size = y_size + 2 * c_size;
792 s->obmc_scratchpad = NULL;
795 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
796 ME_MAP_SIZE * sizeof(uint32_t), fail)
797 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
798 ME_MAP_SIZE * sizeof(uint32_t), fail)
799 if (s->avctx->noise_reduction) {
800 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
801 2 * 64 * sizeof(int), fail)
804 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
805 s->block = s->blocks[0];
807 for (i = 0; i < 12; i++) {
808 s->pblocks[i] = &s->block[i];
810 if (s->avctx->codec_tag == AV_RL32("VCR2"))
813 if (s->out_format == FMT_H263) {
815 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
816 yc_size * sizeof(int16_t) * 16, fail);
817 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
818 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
819 s->ac_val[2] = s->ac_val[1] + c_size;
824 return -1; // free() through ff_mpv_common_end()
827 static void free_duplicate_context(MpegEncContext *s)
832 av_freep(&s->edge_emu_buffer);
833 av_freep(&s->me.scratchpad);
837 s->obmc_scratchpad = NULL;
839 av_freep(&s->dct_error_sum);
840 av_freep(&s->me.map);
841 av_freep(&s->me.score_map);
842 av_freep(&s->blocks);
843 av_freep(&s->ac_val_base);
847 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
849 #define COPY(a) bak->a = src->a
850 COPY(edge_emu_buffer);
855 COPY(obmc_scratchpad);
862 COPY(me.map_generation);
874 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
878 // FIXME copy only needed parts
880 backup_duplicate_context(&bak, dst);
881 memcpy(dst, src, sizeof(MpegEncContext));
882 backup_duplicate_context(dst, &bak);
883 for (i = 0; i < 12; i++) {
884 dst->pblocks[i] = &dst->block[i];
886 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
888 if (!dst->edge_emu_buffer &&
889 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
890 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
891 "scratch buffers.\n");
894 // STOP_TIMER("update_duplicate_context")
895 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
899 int ff_mpeg_update_thread_context(AVCodecContext *dst,
900 const AVCodecContext *src)
903 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
905 if (dst == src || !s1->context_initialized)
908 // FIXME can parameters change on I-frames?
909 // in that case dst may need a reinit
910 if (!s->context_initialized) {
911 memcpy(s, s1, sizeof(MpegEncContext));
914 s->bitstream_buffer = NULL;
915 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
918 ff_mpv_common_init(s);
921 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
923 s->context_reinit = 0;
924 s->height = s1->height;
925 s->width = s1->width;
926 if ((err = ff_mpv_common_frame_size_change(s)) < 0)
930 s->avctx->coded_height = s1->avctx->coded_height;
931 s->avctx->coded_width = s1->avctx->coded_width;
932 s->avctx->width = s1->avctx->width;
933 s->avctx->height = s1->avctx->height;
935 s->coded_picture_number = s1->coded_picture_number;
936 s->picture_number = s1->picture_number;
938 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
939 ff_mpeg_unref_picture(s, &s->picture[i]);
940 if (s1->picture[i].f->buf[0] &&
941 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
945 #define UPDATE_PICTURE(pic)\
947 ff_mpeg_unref_picture(s, &s->pic);\
948 if (s1->pic.f->buf[0])\
949 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
951 ret = update_picture_tables(&s->pic, &s1->pic);\
956 UPDATE_PICTURE(current_picture);
957 UPDATE_PICTURE(last_picture);
958 UPDATE_PICTURE(next_picture);
960 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
961 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
962 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
964 // Error/bug resilience
965 s->next_p_frame_damaged = s1->next_p_frame_damaged;
966 s->workaround_bugs = s1->workaround_bugs;
969 memcpy(&s->last_time_base, &s1->last_time_base,
970 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
971 (char *) &s1->last_time_base);
974 s->max_b_frames = s1->max_b_frames;
975 s->low_delay = s1->low_delay;
976 s->droppable = s1->droppable;
978 // DivX handling (doesn't work)
979 s->divx_packed = s1->divx_packed;
981 if (s1->bitstream_buffer) {
982 if (s1->bitstream_buffer_size +
983 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
984 av_fast_malloc(&s->bitstream_buffer,
985 &s->allocated_bitstream_buffer_size,
986 s1->allocated_bitstream_buffer_size);
987 s->bitstream_buffer_size = s1->bitstream_buffer_size;
988 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
989 s1->bitstream_buffer_size);
990 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
991 FF_INPUT_BUFFER_PADDING_SIZE);
994 // linesize dependend scratch buffer allocation
995 if (!s->edge_emu_buffer)
997 if (frame_size_alloc(s, s1->linesize) < 0) {
998 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
999 "scratch buffers.\n");
1000 return AVERROR(ENOMEM);
1003 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1004 "be allocated due to unknown size.\n");
1008 // MPEG2/interlacing info
1009 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1010 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1012 if (!s1->first_field) {
1013 s->last_pict_type = s1->pict_type;
1014 if (s1->current_picture_ptr)
1015 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1022 * Set the given MpegEncContext to common defaults
1023 * (same for encoding and decoding).
1024 * The changed fields will not depend upon the
1025 * prior state of the MpegEncContext.
1027 void ff_mpv_common_defaults(MpegEncContext *s)
1029 s->y_dc_scale_table =
1030 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1031 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1032 s->progressive_frame = 1;
1033 s->progressive_sequence = 1;
1034 s->picture_structure = PICT_FRAME;
1036 s->coded_picture_number = 0;
1037 s->picture_number = 0;
1042 s->slice_context_count = 1;
1046 * Set the given MpegEncContext to defaults for decoding.
1047 * the changed fields will not depend upon
1048 * the prior state of the MpegEncContext.
1050 void ff_mpv_decode_defaults(MpegEncContext *s)
1052 ff_mpv_common_defaults(s);
1055 static int init_er(MpegEncContext *s)
1057 ERContext *er = &s->er;
1058 int mb_array_size = s->mb_height * s->mb_stride;
1061 er->avctx = s->avctx;
1062 er->mecc = &s->mecc;
1064 er->mb_index2xy = s->mb_index2xy;
1065 er->mb_num = s->mb_num;
1066 er->mb_width = s->mb_width;
1067 er->mb_height = s->mb_height;
1068 er->mb_stride = s->mb_stride;
1069 er->b8_stride = s->b8_stride;
1071 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1072 er->error_status_table = av_mallocz(mb_array_size);
1073 if (!er->er_temp_buffer || !er->error_status_table)
1076 er->mbskip_table = s->mbskip_table;
1077 er->mbintra_table = s->mbintra_table;
1079 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1080 er->dc_val[i] = s->dc_val[i];
1082 er->decode_mb = mpeg_er_decode_mb;
1087 av_freep(&er->er_temp_buffer);
1088 av_freep(&er->error_status_table);
1089 return AVERROR(ENOMEM);
1093 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1095 static int init_context_frame(MpegEncContext *s)
1097 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1099 s->mb_width = (s->width + 15) / 16;
1100 s->mb_stride = s->mb_width + 1;
1101 s->b8_stride = s->mb_width * 2 + 1;
1102 mb_array_size = s->mb_height * s->mb_stride;
1103 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1105 /* set default edge pos, will be overriden
1106 * in decode_header if needed */
1107 s->h_edge_pos = s->mb_width * 16;
1108 s->v_edge_pos = s->mb_height * 16;
1110 s->mb_num = s->mb_width * s->mb_height;
1115 s->block_wrap[3] = s->b8_stride;
1117 s->block_wrap[5] = s->mb_stride;
1119 y_size = s->b8_stride * (2 * s->mb_height + 1);
1120 c_size = s->mb_stride * (s->mb_height + 1);
1121 yc_size = y_size + 2 * c_size;
1123 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
1124 fail); // error ressilience code looks cleaner with this
1125 for (y = 0; y < s->mb_height; y++)
1126 for (x = 0; x < s->mb_width; x++)
1127 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1129 s->mb_index2xy[s->mb_height * s->mb_width] =
1130 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1133 /* Allocate MV tables */
1134 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
1135 mv_table_size * 2 * sizeof(int16_t), fail);
1136 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
1137 mv_table_size * 2 * sizeof(int16_t), fail);
1138 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
1139 mv_table_size * 2 * sizeof(int16_t), fail);
1140 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
1141 mv_table_size * 2 * sizeof(int16_t), fail);
1142 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
1143 mv_table_size * 2 * sizeof(int16_t), fail);
1144 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
1145 mv_table_size * 2 * sizeof(int16_t), fail);
1146 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1147 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1148 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1149 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
1151 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
1153 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1155 /* Allocate MB type table */
1156 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
1157 sizeof(uint16_t), fail); // needed for encoding
1159 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
1162 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1163 mb_array_size * sizeof(float), fail);
1164 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1165 mb_array_size * sizeof(float), fail);
1169 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1170 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1171 /* interlaced direct mode decoding tables */
1172 for (i = 0; i < 2; i++) {
1174 for (j = 0; j < 2; j++) {
1175 for (k = 0; k < 2; k++) {
1176 FF_ALLOCZ_OR_GOTO(s->avctx,
1177 s->b_field_mv_table_base[i][j][k],
1178 mv_table_size * 2 * sizeof(int16_t),
1180 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1183 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
1184 mb_array_size * 2 * sizeof(uint8_t), fail);
1185 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
1186 mv_table_size * 2 * sizeof(int16_t), fail);
1187 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
1190 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
1191 mb_array_size * 2 * sizeof(uint8_t), fail);
1194 if (s->out_format == FMT_H263) {
1196 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
1197 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1199 /* cbp, ac_pred, pred_dir */
1200 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
1201 mb_array_size * sizeof(uint8_t), fail);
1202 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
1203 mb_array_size * sizeof(uint8_t), fail);
1206 if (s->h263_pred || s->h263_plus || !s->encoding) {
1208 // MN: we need these for error resilience of intra-frames
1209 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
1210 yc_size * sizeof(int16_t), fail);
1211 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1212 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1213 s->dc_val[2] = s->dc_val[1] + c_size;
1214 for (i = 0; i < yc_size; i++)
1215 s->dc_val_base[i] = 1024;
1218 /* which mb is a intra block */
1219 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1220 memset(s->mbintra_table, 1, mb_array_size);
1222 /* init macroblock skip table */
1223 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1224 // Note the + 1 is for a quicker mpeg4 slice_end detection
1228 return AVERROR(ENOMEM);
1232 * init common structure for both encoder and decoder.
1233 * this assumes that some variables like width/height are already set
1235 av_cold int ff_mpv_common_init(MpegEncContext *s)
1238 int nb_slices = (HAVE_THREADS &&
1239 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1240 s->avctx->thread_count : 1;
1242 if (s->encoding && s->avctx->slices)
1243 nb_slices = s->avctx->slices;
1245 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1246 s->mb_height = (s->height + 31) / 32 * 2;
1248 s->mb_height = (s->height + 15) / 16;
1250 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1251 av_log(s->avctx, AV_LOG_ERROR,
1252 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1256 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1259 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1261 max_slices = MAX_THREADS;
1262 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1263 " reducing to %d\n", nb_slices, max_slices);
1264 nb_slices = max_slices;
1267 if ((s->width || s->height) &&
1268 av_image_check_size(s->width, s->height, 0, s->avctx))
1273 s->flags = s->avctx->flags;
1274 s->flags2 = s->avctx->flags2;
1276 /* set chroma shifts */
1277 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1279 &s->chroma_y_shift);
1281 /* convert fourcc to upper case */
1282 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1284 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1286 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1287 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1288 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1289 s->picture[i].f = av_frame_alloc();
1290 if (!s->picture[i].f)
1293 memset(&s->next_picture, 0, sizeof(s->next_picture));
1294 memset(&s->last_picture, 0, sizeof(s->last_picture));
1295 memset(&s->current_picture, 0, sizeof(s->current_picture));
1296 memset(&s->new_picture, 0, sizeof(s->new_picture));
1297 s->next_picture.f = av_frame_alloc();
1298 if (!s->next_picture.f)
1300 s->last_picture.f = av_frame_alloc();
1301 if (!s->last_picture.f)
1303 s->current_picture.f = av_frame_alloc();
1304 if (!s->current_picture.f)
1306 s->new_picture.f = av_frame_alloc();
1307 if (!s->new_picture.f)
1310 if (s->width && s->height) {
1311 if (init_context_frame(s))
1314 s->parse_context.state = -1;
1317 s->context_initialized = 1;
1318 s->thread_context[0] = s;
1320 if (s->width && s->height) {
1321 if (nb_slices > 1) {
1322 for (i = 1; i < nb_slices; i++) {
1323 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1324 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1327 for (i = 0; i < nb_slices; i++) {
1328 if (init_duplicate_context(s->thread_context[i]) < 0)
1330 s->thread_context[i]->start_mb_y =
1331 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1332 s->thread_context[i]->end_mb_y =
1333 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1336 if (init_duplicate_context(s) < 0)
1339 s->end_mb_y = s->mb_height;
1341 s->slice_context_count = nb_slices;
1346 ff_mpv_common_end(s);
1351 * Frees and resets MpegEncContext fields depending on the resolution.
1352 * Is used during resolution changes to avoid a full reinitialization of the
1355 static int free_context_frame(MpegEncContext *s)
1359 av_freep(&s->mb_type);
1360 av_freep(&s->p_mv_table_base);
1361 av_freep(&s->b_forw_mv_table_base);
1362 av_freep(&s->b_back_mv_table_base);
1363 av_freep(&s->b_bidir_forw_mv_table_base);
1364 av_freep(&s->b_bidir_back_mv_table_base);
1365 av_freep(&s->b_direct_mv_table_base);
1366 s->p_mv_table = NULL;
1367 s->b_forw_mv_table = NULL;
1368 s->b_back_mv_table = NULL;
1369 s->b_bidir_forw_mv_table = NULL;
1370 s->b_bidir_back_mv_table = NULL;
1371 s->b_direct_mv_table = NULL;
1372 for (i = 0; i < 2; i++) {
1373 for (j = 0; j < 2; j++) {
1374 for (k = 0; k < 2; k++) {
1375 av_freep(&s->b_field_mv_table_base[i][j][k]);
1376 s->b_field_mv_table[i][j][k] = NULL;
1378 av_freep(&s->b_field_select_table[i][j]);
1379 av_freep(&s->p_field_mv_table_base[i][j]);
1380 s->p_field_mv_table[i][j] = NULL;
1382 av_freep(&s->p_field_select_table[i]);
1385 av_freep(&s->dc_val_base);
1386 av_freep(&s->coded_block_base);
1387 av_freep(&s->mbintra_table);
1388 av_freep(&s->cbp_table);
1389 av_freep(&s->pred_dir_table);
1391 av_freep(&s->mbskip_table);
1393 av_freep(&s->er.error_status_table);
1394 av_freep(&s->er.er_temp_buffer);
1395 av_freep(&s->mb_index2xy);
1396 av_freep(&s->lambda_table);
1397 av_freep(&s->cplx_tab);
1398 av_freep(&s->bits_tab);
1400 s->linesize = s->uvlinesize = 0;
1405 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1409 if (s->slice_context_count > 1) {
1410 for (i = 0; i < s->slice_context_count; i++) {
1411 free_duplicate_context(s->thread_context[i]);
1413 for (i = 1; i < s->slice_context_count; i++) {
1414 av_freep(&s->thread_context[i]);
1417 free_duplicate_context(s);
1419 if ((err = free_context_frame(s)) < 0)
1423 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1424 s->picture[i].needs_realloc = 1;
1427 s->last_picture_ptr =
1428 s->next_picture_ptr =
1429 s->current_picture_ptr = NULL;
1432 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1433 s->mb_height = (s->height + 31) / 32 * 2;
1435 s->mb_height = (s->height + 15) / 16;
1437 if ((s->width || s->height) &&
1438 av_image_check_size(s->width, s->height, 0, s->avctx))
1439 return AVERROR_INVALIDDATA;
1441 if ((err = init_context_frame(s)))
1444 s->thread_context[0] = s;
1446 if (s->width && s->height) {
1447 int nb_slices = s->slice_context_count;
1448 if (nb_slices > 1) {
1449 for (i = 1; i < nb_slices; i++) {
1450 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1451 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1454 for (i = 0; i < nb_slices; i++) {
1455 if (init_duplicate_context(s->thread_context[i]) < 0)
1457 s->thread_context[i]->start_mb_y =
1458 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1459 s->thread_context[i]->end_mb_y =
1460 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1463 if (init_duplicate_context(s) < 0)
1466 s->end_mb_y = s->mb_height;
1468 s->slice_context_count = nb_slices;
1473 ff_mpv_common_end(s);
1477 /* init common structure for both encoder and decoder */
1478 void ff_mpv_common_end(MpegEncContext *s)
1482 if (s->slice_context_count > 1) {
1483 for (i = 0; i < s->slice_context_count; i++) {
1484 free_duplicate_context(s->thread_context[i]);
1486 for (i = 1; i < s->slice_context_count; i++) {
1487 av_freep(&s->thread_context[i]);
1489 s->slice_context_count = 1;
1490 } else free_duplicate_context(s);
1492 av_freep(&s->parse_context.buffer);
1493 s->parse_context.buffer_size = 0;
1495 av_freep(&s->bitstream_buffer);
1496 s->allocated_bitstream_buffer_size = 0;
1499 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1500 ff_free_picture_tables(&s->picture[i]);
1501 ff_mpeg_unref_picture(s, &s->picture[i]);
1502 av_frame_free(&s->picture[i].f);
1505 av_freep(&s->picture);
1506 ff_free_picture_tables(&s->last_picture);
1507 ff_mpeg_unref_picture(s, &s->last_picture);
1508 av_frame_free(&s->last_picture.f);
1509 ff_free_picture_tables(&s->current_picture);
1510 ff_mpeg_unref_picture(s, &s->current_picture);
1511 av_frame_free(&s->current_picture.f);
1512 ff_free_picture_tables(&s->next_picture);
1513 ff_mpeg_unref_picture(s, &s->next_picture);
1514 av_frame_free(&s->next_picture.f);
1515 ff_free_picture_tables(&s->new_picture);
1516 ff_mpeg_unref_picture(s, &s->new_picture);
1517 av_frame_free(&s->new_picture.f);
1519 free_context_frame(s);
1521 s->context_initialized = 0;
1522 s->last_picture_ptr =
1523 s->next_picture_ptr =
1524 s->current_picture_ptr = NULL;
1525 s->linesize = s->uvlinesize = 0;
1528 av_cold void ff_init_rl(RLTable *rl,
1529 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1531 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1532 uint8_t index_run[MAX_RUN + 1];
1533 int last, run, level, start, end, i;
1535 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1536 if (static_store && rl->max_level[0])
1539 /* compute max_level[], max_run[] and index_run[] */
1540 for (last = 0; last < 2; last++) {
1549 memset(max_level, 0, MAX_RUN + 1);
1550 memset(max_run, 0, MAX_LEVEL + 1);
1551 memset(index_run, rl->n, MAX_RUN + 1);
1552 for (i = start; i < end; i++) {
1553 run = rl->table_run[i];
1554 level = rl->table_level[i];
1555 if (index_run[run] == rl->n)
1557 if (level > max_level[run])
1558 max_level[run] = level;
1559 if (run > max_run[level])
1560 max_run[level] = run;
1563 rl->max_level[last] = static_store[last];
1565 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1566 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1568 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1570 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1571 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1573 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1575 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1576 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1580 av_cold void ff_init_vlc_rl(RLTable *rl)
1584 for (q = 0; q < 32; q++) {
1586 int qadd = (q - 1) | 1;
1592 for (i = 0; i < rl->vlc.table_size; i++) {
1593 int code = rl->vlc.table[i][0];
1594 int len = rl->vlc.table[i][1];
1597 if (len == 0) { // illegal code
1600 } else if (len < 0) { // more bits needed
1604 if (code == rl->n) { // esc
1608 run = rl->table_run[code] + 1;
1609 level = rl->table_level[code] * qmul + qadd;
1610 if (code >= rl->last) run += 192;
1613 rl->rl_vlc[q][i].len = len;
1614 rl->rl_vlc[q][i].level = level;
1615 rl->rl_vlc[q][i].run = run;
1620 static void release_unused_pictures(MpegEncContext *s)
1624 /* release non reference frames */
1625 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1626 if (!s->picture[i].reference)
1627 ff_mpeg_unref_picture(s, &s->picture[i]);
1631 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1633 if (!pic->f->buf[0])
1635 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1640 static int find_unused_picture(MpegEncContext *s, int shared)
1645 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1646 if (!s->picture[i].f->buf[0])
1650 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1651 if (pic_is_unused(s, &s->picture[i]))
1656 return AVERROR_INVALIDDATA;
1659 int ff_find_unused_picture(MpegEncContext *s, int shared)
1661 int ret = find_unused_picture(s, shared);
1663 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1664 if (s->picture[ret].needs_realloc) {
1665 s->picture[ret].needs_realloc = 0;
1666 ff_free_picture_tables(&s->picture[ret]);
1667 ff_mpeg_unref_picture(s, &s->picture[ret]);
1674 * generic function called after decoding
1675 * the header and before a frame is decoded.
1677 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1683 /* mark & release old frames */
1684 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1685 s->last_picture_ptr != s->next_picture_ptr &&
1686 s->last_picture_ptr->f->buf[0]) {
1687 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1690 /* release forgotten pictures */
1691 /* if (mpeg124/h263) */
1692 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1693 if (&s->picture[i] != s->last_picture_ptr &&
1694 &s->picture[i] != s->next_picture_ptr &&
1695 s->picture[i].reference && !s->picture[i].needs_realloc) {
1696 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1697 av_log(avctx, AV_LOG_ERROR,
1698 "releasing zombie picture\n");
1699 ff_mpeg_unref_picture(s, &s->picture[i]);
1703 ff_mpeg_unref_picture(s, &s->current_picture);
1705 release_unused_pictures(s);
1707 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1708 // we already have a unused image
1709 // (maybe it was set before reading the header)
1710 pic = s->current_picture_ptr;
1712 i = ff_find_unused_picture(s, 0);
1714 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1717 pic = &s->picture[i];
1721 if (!s->droppable) {
1722 if (s->pict_type != AV_PICTURE_TYPE_B)
1726 pic->f->coded_picture_number = s->coded_picture_number++;
1728 if (ff_alloc_picture(s, pic, 0) < 0)
1731 s->current_picture_ptr = pic;
1732 // FIXME use only the vars from current_pic
1733 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1734 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1735 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1736 if (s->picture_structure != PICT_FRAME)
1737 s->current_picture_ptr->f->top_field_first =
1738 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1740 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1741 !s->progressive_sequence;
1742 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1744 s->current_picture_ptr->f->pict_type = s->pict_type;
1745 // if (s->flags && CODEC_FLAG_QSCALE)
1746 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1747 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1749 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1750 s->current_picture_ptr)) < 0)
1753 if (s->pict_type != AV_PICTURE_TYPE_B) {
1754 s->last_picture_ptr = s->next_picture_ptr;
1756 s->next_picture_ptr = s->current_picture_ptr;
1758 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1759 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1760 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1761 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1762 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1763 s->pict_type, s->droppable);
1765 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1766 (s->pict_type != AV_PICTURE_TYPE_I ||
1767 s->picture_structure != PICT_FRAME)) {
1768 int h_chroma_shift, v_chroma_shift;
1769 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1770 &h_chroma_shift, &v_chroma_shift);
1771 if (s->pict_type != AV_PICTURE_TYPE_I)
1772 av_log(avctx, AV_LOG_ERROR,
1773 "warning: first frame is no keyframe\n");
1774 else if (s->picture_structure != PICT_FRAME)
1775 av_log(avctx, AV_LOG_INFO,
1776 "allocate dummy last picture for field based first keyframe\n");
1778 /* Allocate a dummy frame */
1779 i = ff_find_unused_picture(s, 0);
1781 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1784 s->last_picture_ptr = &s->picture[i];
1786 s->last_picture_ptr->reference = 3;
1787 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1789 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1790 s->last_picture_ptr = NULL;
1794 memset(s->last_picture_ptr->f->data[0], 0,
1795 avctx->height * s->last_picture_ptr->f->linesize[0]);
1796 memset(s->last_picture_ptr->f->data[1], 0x80,
1797 (avctx->height >> v_chroma_shift) *
1798 s->last_picture_ptr->f->linesize[1]);
1799 memset(s->last_picture_ptr->f->data[2], 0x80,
1800 (avctx->height >> v_chroma_shift) *
1801 s->last_picture_ptr->f->linesize[2]);
1803 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1804 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1806 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1807 s->pict_type == AV_PICTURE_TYPE_B) {
1808 /* Allocate a dummy frame */
1809 i = ff_find_unused_picture(s, 0);
1811 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1814 s->next_picture_ptr = &s->picture[i];
1816 s->next_picture_ptr->reference = 3;
1817 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_I;
1819 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1820 s->next_picture_ptr = NULL;
1823 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1824 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1827 if (s->last_picture_ptr) {
1828 ff_mpeg_unref_picture(s, &s->last_picture);
1829 if (s->last_picture_ptr->f->buf[0] &&
1830 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1831 s->last_picture_ptr)) < 0)
1834 if (s->next_picture_ptr) {
1835 ff_mpeg_unref_picture(s, &s->next_picture);
1836 if (s->next_picture_ptr->f->buf[0] &&
1837 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1838 s->next_picture_ptr)) < 0)
1842 if (s->pict_type != AV_PICTURE_TYPE_I &&
1843 !(s->last_picture_ptr && s->last_picture_ptr->f->buf[0])) {
1844 av_log(s, AV_LOG_ERROR,
1845 "Non-reference picture received and no reference available\n");
1846 return AVERROR_INVALIDDATA;
1849 if (s->picture_structure!= PICT_FRAME) {
1851 for (i = 0; i < 4; i++) {
1852 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1853 s->current_picture.f->data[i] +=
1854 s->current_picture.f->linesize[i];
1856 s->current_picture.f->linesize[i] *= 2;
1857 s->last_picture.f->linesize[i] *= 2;
1858 s->next_picture.f->linesize[i] *= 2;
1862 s->err_recognition = avctx->err_recognition;
1864 /* set dequantizer, we can't do it during init as
1865 * it might change for mpeg4 and we can't do it in the header
1866 * decode as init is not called for mpeg4 there yet */
1867 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1868 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1869 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1870 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1871 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1872 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1874 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1875 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1879 FF_DISABLE_DEPRECATION_WARNINGS
1880 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1881 return ff_xvmc_field_start(s, avctx);
1882 FF_ENABLE_DEPRECATION_WARNINGS
1883 #endif /* FF_API_XVMC */
1888 /* called after a frame has been decoded. */
1889 void ff_mpv_frame_end(MpegEncContext *s)
1892 FF_DISABLE_DEPRECATION_WARNINGS
1893 /* redraw edges for the frame if decoding didn't complete */
1894 // just to make sure that all data is rendered.
1895 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1896 ff_xvmc_field_end(s);
1898 FF_ENABLE_DEPRECATION_WARNINGS
1899 #endif /* FF_API_XVMC */
1903 if (s->current_picture.reference)
1904 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1908 * Print debugging info for the given picture.
1910 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1913 if (s->avctx->hwaccel || !p || !p->mb_type)
1917 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1920 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1921 switch (pict->pict_type) {
1922 case AV_PICTURE_TYPE_I:
1923 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1925 case AV_PICTURE_TYPE_P:
1926 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1928 case AV_PICTURE_TYPE_B:
1929 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1931 case AV_PICTURE_TYPE_S:
1932 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1934 case AV_PICTURE_TYPE_SI:
1935 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1937 case AV_PICTURE_TYPE_SP:
1938 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1941 for (y = 0; y < s->mb_height; y++) {
1942 for (x = 0; x < s->mb_width; x++) {
1943 if (s->avctx->debug & FF_DEBUG_SKIP) {
1944 int count = s->mbskip_table[x + y * s->mb_stride];
1947 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1949 if (s->avctx->debug & FF_DEBUG_QP) {
1950 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1951 p->qscale_table[x + y * s->mb_stride]);
1953 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1954 int mb_type = p->mb_type[x + y * s->mb_stride];
1955 // Type & MV direction
1956 if (IS_PCM(mb_type))
1957 av_log(s->avctx, AV_LOG_DEBUG, "P");
1958 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1959 av_log(s->avctx, AV_LOG_DEBUG, "A");
1960 else if (IS_INTRA4x4(mb_type))
1961 av_log(s->avctx, AV_LOG_DEBUG, "i");
1962 else if (IS_INTRA16x16(mb_type))
1963 av_log(s->avctx, AV_LOG_DEBUG, "I");
1964 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1965 av_log(s->avctx, AV_LOG_DEBUG, "d");
1966 else if (IS_DIRECT(mb_type))
1967 av_log(s->avctx, AV_LOG_DEBUG, "D");
1968 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1969 av_log(s->avctx, AV_LOG_DEBUG, "g");
1970 else if (IS_GMC(mb_type))
1971 av_log(s->avctx, AV_LOG_DEBUG, "G");
1972 else if (IS_SKIP(mb_type))
1973 av_log(s->avctx, AV_LOG_DEBUG, "S");
1974 else if (!USES_LIST(mb_type, 1))
1975 av_log(s->avctx, AV_LOG_DEBUG, ">");
1976 else if (!USES_LIST(mb_type, 0))
1977 av_log(s->avctx, AV_LOG_DEBUG, "<");
1979 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1980 av_log(s->avctx, AV_LOG_DEBUG, "X");
1984 if (IS_8X8(mb_type))
1985 av_log(s->avctx, AV_LOG_DEBUG, "+");
1986 else if (IS_16X8(mb_type))
1987 av_log(s->avctx, AV_LOG_DEBUG, "-");
1988 else if (IS_8X16(mb_type))
1989 av_log(s->avctx, AV_LOG_DEBUG, "|");
1990 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1991 av_log(s->avctx, AV_LOG_DEBUG, " ");
1993 av_log(s->avctx, AV_LOG_DEBUG, "?");
1996 if (IS_INTERLACED(mb_type))
1997 av_log(s->avctx, AV_LOG_DEBUG, "=");
1999 av_log(s->avctx, AV_LOG_DEBUG, " ");
2002 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2008 * find the lowest MB row referenced in the MVs
2010 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2012 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2013 int my, off, i, mvs;
2015 if (s->picture_structure != PICT_FRAME || s->mcsel)
2018 switch (s->mv_type) {
2032 for (i = 0; i < mvs; i++) {
2033 my = s->mv[dir][i][1]<<qpel_shift;
2034 my_max = FFMAX(my_max, my);
2035 my_min = FFMIN(my_min, my);
2038 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2040 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2042 return s->mb_height-1;
2045 /* put block[] to dest[] */
2046 static inline void put_dct(MpegEncContext *s,
2047 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2049 s->dct_unquantize_intra(s, block, i, qscale);
2050 s->idsp.idct_put(dest, line_size, block);
2053 /* add block[] to dest[] */
2054 static inline void add_dct(MpegEncContext *s,
2055 int16_t *block, int i, uint8_t *dest, int line_size)
2057 if (s->block_last_index[i] >= 0) {
2058 s->idsp.idct_add(dest, line_size, block);
2062 static inline void add_dequant_dct(MpegEncContext *s,
2063 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2065 if (s->block_last_index[i] >= 0) {
2066 s->dct_unquantize_inter(s, block, i, qscale);
2068 s->idsp.idct_add(dest, line_size, block);
2073 * Clean dc, ac, coded_block for the current non-intra MB.
2075 void ff_clean_intra_table_entries(MpegEncContext *s)
2077 int wrap = s->b8_stride;
2078 int xy = s->block_index[0];
2081 s->dc_val[0][xy + 1 ] =
2082 s->dc_val[0][xy + wrap] =
2083 s->dc_val[0][xy + 1 + wrap] = 1024;
2085 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2086 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2087 if (s->msmpeg4_version>=3) {
2088 s->coded_block[xy ] =
2089 s->coded_block[xy + 1 ] =
2090 s->coded_block[xy + wrap] =
2091 s->coded_block[xy + 1 + wrap] = 0;
2094 wrap = s->mb_stride;
2095 xy = s->mb_x + s->mb_y * wrap;
2097 s->dc_val[2][xy] = 1024;
2099 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2100 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2102 s->mbintra_table[xy]= 0;
2105 /* generic function called after a macroblock has been parsed by the
2106 decoder or after it has been encoded by the encoder.
2108 Important variables used:
2109 s->mb_intra : true if intra macroblock
2110 s->mv_dir : motion vector direction
2111 s->mv_type : motion vector type
2112 s->mv : motion vector
2113 s->interlaced_dct : true if interlaced dct used (mpeg2)
2115 static av_always_inline
2116 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2119 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2122 FF_DISABLE_DEPRECATION_WARNINGS
2123 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2124 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2127 FF_ENABLE_DEPRECATION_WARNINGS
2128 #endif /* FF_API_XVMC */
2130 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2131 /* print DCT coefficients */
2133 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2135 for(j=0; j<64; j++){
2136 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2137 block[i][s->idsp.idct_permutation[j]]);
2139 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2143 s->current_picture.qscale_table[mb_xy] = s->qscale;
2145 /* update DC predictors for P macroblocks */
2147 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2148 if(s->mbintra_table[mb_xy])
2149 ff_clean_intra_table_entries(s);
2153 s->last_dc[2] = 128 << s->intra_dc_precision;
2156 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2157 s->mbintra_table[mb_xy]=1;
2159 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2160 uint8_t *dest_y, *dest_cb, *dest_cr;
2161 int dct_linesize, dct_offset;
2162 op_pixels_func (*op_pix)[4];
2163 qpel_mc_func (*op_qpix)[16];
2164 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2165 const int uvlinesize = s->current_picture.f->linesize[1];
2166 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2167 const int block_size = 8;
2169 /* avoid copy if macroblock skipped in last frame too */
2170 /* skip only during decoding as we might trash the buffers during encoding a bit */
2172 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2174 if (s->mb_skipped) {
2176 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2178 } else if(!s->current_picture.reference) {
2181 *mbskip_ptr = 0; /* not skipped */
2185 dct_linesize = linesize << s->interlaced_dct;
2186 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2190 dest_cb= s->dest[1];
2191 dest_cr= s->dest[2];
2193 dest_y = s->b_scratchpad;
2194 dest_cb= s->b_scratchpad+16*linesize;
2195 dest_cr= s->b_scratchpad+32*linesize;
2199 /* motion handling */
2200 /* decoding or more than one mb_type (MC was already done otherwise) */
2203 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2204 if (s->mv_dir & MV_DIR_FORWARD) {
2205 ff_thread_await_progress(&s->last_picture_ptr->tf,
2206 ff_mpv_lowest_referenced_row(s, 0),
2209 if (s->mv_dir & MV_DIR_BACKWARD) {
2210 ff_thread_await_progress(&s->next_picture_ptr->tf,
2211 ff_mpv_lowest_referenced_row(s, 1),
2216 op_qpix= s->me.qpel_put;
2217 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2218 op_pix = s->hdsp.put_pixels_tab;
2220 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2222 if (s->mv_dir & MV_DIR_FORWARD) {
2223 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2224 op_pix = s->hdsp.avg_pixels_tab;
2225 op_qpix= s->me.qpel_avg;
2227 if (s->mv_dir & MV_DIR_BACKWARD) {
2228 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2232 /* skip dequant / idct if we are really late ;) */
2233 if(s->avctx->skip_idct){
2234 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2235 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2236 || s->avctx->skip_idct >= AVDISCARD_ALL)
2240 /* add dct residue */
2241 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2242 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2243 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2244 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2245 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2246 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2248 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2249 if (s->chroma_y_shift){
2250 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2251 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2255 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2256 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2257 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2258 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2261 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2262 add_dct(s, block[0], 0, dest_y , dct_linesize);
2263 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2264 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2265 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2267 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2268 if(s->chroma_y_shift){//Chroma420
2269 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2270 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2273 dct_linesize = uvlinesize << s->interlaced_dct;
2274 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2276 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2277 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2278 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2279 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2280 if(!s->chroma_x_shift){//Chroma444
2281 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2282 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2283 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2284 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2289 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2290 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2293 /* dct only in intra block */
2294 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2295 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2296 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2297 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2298 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2300 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2301 if(s->chroma_y_shift){
2302 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2303 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2307 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2308 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2309 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2310 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2314 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2315 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2316 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2317 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2319 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2320 if(s->chroma_y_shift){
2321 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2322 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2325 dct_linesize = uvlinesize << s->interlaced_dct;
2326 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2328 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2329 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2330 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2331 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2332 if(!s->chroma_x_shift){//Chroma444
2333 s->idsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2334 s->idsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2335 s->idsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2336 s->idsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2344 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2345 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2346 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2351 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
2354 if(s->out_format == FMT_MPEG1) {
2355 mpv_decode_mb_internal(s, block, 1);
2358 mpv_decode_mb_internal(s, block, 0);
2361 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2363 ff_draw_horiz_band(s->avctx, s->current_picture.f,
2364 s->last_picture.f, y, h, s->picture_structure,
2365 s->first_field, s->low_delay);
2368 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2369 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2370 const int uvlinesize = s->current_picture.f->linesize[1];
2371 const int mb_size= 4;
2373 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2374 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2375 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2376 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2377 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2378 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2379 //block_index is not used by mpeg2, so it is not affected by chroma_format
2381 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
2382 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2383 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2385 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2387 if(s->picture_structure==PICT_FRAME){
2388 s->dest[0] += s->mb_y * linesize << mb_size;
2389 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2390 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2392 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2393 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2394 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2395 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2401 * Permute an 8x8 block.
2402 * @param block the block which will be permuted according to the given permutation vector
2403 * @param permutation the permutation vector
2404 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2405 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2406 * (inverse) permutated to scantable order!
2408 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2414 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2416 for(i=0; i<=last; i++){
2417 const int j= scantable[i];
2422 for(i=0; i<=last; i++){
2423 const int j= scantable[i];
2424 const int perm_j= permutation[j];
2425 block[perm_j]= temp[j];
2429 void ff_mpeg_flush(AVCodecContext *avctx){
2431 MpegEncContext *s = avctx->priv_data;
2433 if (!s || !s->picture)
2436 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2437 ff_mpeg_unref_picture(s, &s->picture[i]);
2438 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2440 ff_mpeg_unref_picture(s, &s->current_picture);
2441 ff_mpeg_unref_picture(s, &s->last_picture);
2442 ff_mpeg_unref_picture(s, &s->next_picture);
2444 s->mb_x= s->mb_y= 0;
2446 s->parse_context.state= -1;
2447 s->parse_context.frame_start_found= 0;
2448 s->parse_context.overread= 0;
2449 s->parse_context.overread_index= 0;
2450 s->parse_context.index= 0;
2451 s->parse_context.last_index= 0;
2452 s->bitstream_buffer_size=0;
2457 * set qscale and update qscale dependent variables.
2459 void ff_set_qscale(MpegEncContext * s, int qscale)
2463 else if (qscale > 31)
2467 s->chroma_qscale= s->chroma_qscale_table[qscale];
2469 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2470 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2473 void ff_mpv_report_decode_progress(MpegEncContext *s)
2475 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2476 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);