2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
37 #include "h264chroma.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
49 static const uint8_t ff_default_chroma_qscale_table[32] = {
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
55 const uint8_t ff_mpeg1_dc_scale_table[128] = {
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const uint8_t ff_alternate_horizontal_scan[64] = {
111 0, 1, 2, 3, 8, 9, 16, 17,
112 10, 11, 4, 5, 6, 7, 15, 14,
113 13, 12, 19, 18, 24, 25, 32, 33,
114 26, 27, 20, 21, 22, 23, 28, 29,
115 30, 31, 34, 35, 40, 41, 48, 49,
116 42, 43, 36, 37, 38, 39, 44, 45,
117 46, 47, 50, 51, 56, 57, 58, 59,
118 52, 53, 54, 55, 60, 61, 62, 63,
121 const uint8_t ff_alternate_vertical_scan[64] = {
122 0, 8, 16, 24, 1, 9, 2, 10,
123 17, 25, 32, 40, 48, 56, 57, 49,
124 41, 33, 26, 18, 3, 11, 4, 12,
125 19, 27, 34, 42, 50, 58, 35, 43,
126 51, 59, 20, 28, 5, 13, 6, 14,
127 21, 29, 36, 44, 52, 60, 37, 45,
128 53, 61, 22, 30, 7, 15, 23, 31,
129 38, 46, 54, 62, 39, 47, 55, 63,
132 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133 int16_t *block, int n, int qscale)
135 int i, level, nCoeffs;
136 const uint16_t *quant_matrix;
138 nCoeffs= s->block_last_index[n];
140 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
141 /* XXX: only mpeg1 */
142 quant_matrix = s->intra_matrix;
143 for(i=1;i<=nCoeffs;i++) {
144 int j= s->intra_scantable.permutated[i];
149 level = (int)(level * qscale * quant_matrix[j]) >> 3;
150 level = (level - 1) | 1;
153 level = (int)(level * qscale * quant_matrix[j]) >> 3;
154 level = (level - 1) | 1;
161 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
162 int16_t *block, int n, int qscale)
164 int i, level, nCoeffs;
165 const uint16_t *quant_matrix;
167 nCoeffs= s->block_last_index[n];
169 quant_matrix = s->inter_matrix;
170 for(i=0; i<=nCoeffs; i++) {
171 int j= s->intra_scantable.permutated[i];
176 level = (((level << 1) + 1) * qscale *
177 ((int) (quant_matrix[j]))) >> 4;
178 level = (level - 1) | 1;
181 level = (((level << 1) + 1) * qscale *
182 ((int) (quant_matrix[j]))) >> 4;
183 level = (level - 1) | 1;
190 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
191 int16_t *block, int n, int qscale)
193 int i, level, nCoeffs;
194 const uint16_t *quant_matrix;
196 if(s->alternate_scan) nCoeffs= 63;
197 else nCoeffs= s->block_last_index[n];
199 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
200 quant_matrix = s->intra_matrix;
201 for(i=1;i<=nCoeffs;i++) {
202 int j= s->intra_scantable.permutated[i];
207 level = (int)(level * qscale * quant_matrix[j]) >> 3;
210 level = (int)(level * qscale * quant_matrix[j]) >> 3;
217 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
218 int16_t *block, int n, int qscale)
220 int i, level, nCoeffs;
221 const uint16_t *quant_matrix;
224 if(s->alternate_scan) nCoeffs= 63;
225 else nCoeffs= s->block_last_index[n];
227 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
229 quant_matrix = s->intra_matrix;
230 for(i=1;i<=nCoeffs;i++) {
231 int j= s->intra_scantable.permutated[i];
236 level = (int)(level * qscale * quant_matrix[j]) >> 3;
239 level = (int)(level * qscale * quant_matrix[j]) >> 3;
248 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
249 int16_t *block, int n, int qscale)
251 int i, level, nCoeffs;
252 const uint16_t *quant_matrix;
255 if(s->alternate_scan) nCoeffs= 63;
256 else nCoeffs= s->block_last_index[n];
258 quant_matrix = s->inter_matrix;
259 for(i=0; i<=nCoeffs; i++) {
260 int j= s->intra_scantable.permutated[i];
265 level = (((level << 1) + 1) * qscale *
266 ((int) (quant_matrix[j]))) >> 4;
269 level = (((level << 1) + 1) * qscale *
270 ((int) (quant_matrix[j]))) >> 4;
279 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
280 int16_t *block, int n, int qscale)
282 int i, level, qmul, qadd;
285 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
290 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
291 qadd = (qscale - 1) | 1;
298 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
300 for(i=1; i<=nCoeffs; i++) {
304 level = level * qmul - qadd;
306 level = level * qmul + qadd;
313 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
314 int16_t *block, int n, int qscale)
316 int i, level, qmul, qadd;
319 av_assert2(s->block_last_index[n]>=0);
321 qadd = (qscale - 1) | 1;
324 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
326 for(i=0; i<=nCoeffs; i++) {
330 level = level * qmul - qadd;
332 level = level * qmul + qadd;
339 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
341 int mb_x, int mb_y, int mb_intra, int mb_skipped)
343 MpegEncContext *s = opaque;
346 s->mv_type = mv_type;
347 s->mb_intra = mb_intra;
348 s->mb_skipped = mb_skipped;
351 memcpy(s->mv, mv, sizeof(*mv));
353 ff_init_block_index(s);
354 ff_update_block_index(s);
356 s->bdsp.clear_blocks(s->block[0]);
358 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
359 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
360 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
363 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
364 ff_MPV_decode_mb(s, s->block);
367 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
370 memset(dst + h*linesize, 128, 16);
373 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
376 memset(dst + h*linesize, 128, 8);
379 /* init common dct for both encoder and decoder */
380 av_cold int ff_dct_common_init(MpegEncContext *s)
382 ff_blockdsp_init(&s->bdsp, s->avctx);
383 ff_dsputil_init(&s->dsp, s->avctx);
384 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
385 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
386 ff_idctdsp_init(&s->idsp, s->avctx);
387 ff_mpegvideodsp_init(&s->mdsp);
388 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
390 if (s->avctx->debug & FF_DEBUG_NOMC) {
392 for (i=0; i<4; i++) {
393 s->hdsp.avg_pixels_tab[0][i] = gray16;
394 s->hdsp.put_pixels_tab[0][i] = gray16;
395 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
397 s->hdsp.avg_pixels_tab[1][i] = gray8;
398 s->hdsp.put_pixels_tab[1][i] = gray8;
399 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
403 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
404 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
405 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
406 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
407 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
408 if (s->flags & CODEC_FLAG_BITEXACT)
409 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
410 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
413 ff_MPV_common_init_axp(s);
415 ff_MPV_common_init_arm(s);
417 ff_MPV_common_init_ppc(s);
419 ff_MPV_common_init_x86(s);
421 /* load & permutate scantables
422 * note: only wmv uses different ones
424 if (s->alternate_scan) {
425 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
426 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
428 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
429 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
431 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
432 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
437 static int frame_size_alloc(MpegEncContext *s, int linesize)
439 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
441 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
445 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
446 return AVERROR_PATCHWELCOME;
449 // edge emu needs blocksize + filter length - 1
450 // (= 17x17 for halfpel / 21x21 for h264)
451 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
452 // at uvlinesize. It supports only YUV420 so 24x24 is enough
453 // linesize * interlaced * MBsize
454 // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
455 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 68,
458 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
460 s->me.temp = s->me.scratchpad;
461 s->rd_scratchpad = s->me.scratchpad;
462 s->b_scratchpad = s->me.scratchpad;
463 s->obmc_scratchpad = s->me.scratchpad + 16;
467 av_freep(&s->edge_emu_buffer);
468 return AVERROR(ENOMEM);
472 * Allocate a frame buffer
474 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
476 int edges_needed = av_codec_is_encoder(s->avctx->codec);
480 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
481 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
482 s->codec_id != AV_CODEC_ID_MSS2) {
484 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
485 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
488 r = ff_thread_get_buffer(s->avctx, &pic->tf,
489 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
491 pic->f->width = s->avctx->width;
492 pic->f->height = s->avctx->height;
493 pic->f->format = s->avctx->pix_fmt;
494 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
497 if (r < 0 || !pic->f->buf[0]) {
498 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
505 for (i = 0; pic->f->data[i]; i++) {
506 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
507 pic->f->linesize[i] +
508 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
509 pic->f->data[i] += offset;
511 pic->f->width = s->avctx->width;
512 pic->f->height = s->avctx->height;
515 if (s->avctx->hwaccel) {
516 assert(!pic->hwaccel_picture_private);
517 if (s->avctx->hwaccel->frame_priv_data_size) {
518 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
519 if (!pic->hwaccel_priv_buf) {
520 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
523 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
527 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
528 s->uvlinesize != pic->f->linesize[1])) {
529 av_log(s->avctx, AV_LOG_ERROR,
530 "get_buffer() failed (stride changed)\n");
531 ff_mpeg_unref_picture(s, pic);
535 if (pic->f->linesize[1] != pic->f->linesize[2]) {
536 av_log(s->avctx, AV_LOG_ERROR,
537 "get_buffer() failed (uv stride mismatch)\n");
538 ff_mpeg_unref_picture(s, pic);
542 if (!s->edge_emu_buffer &&
543 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
544 av_log(s->avctx, AV_LOG_ERROR,
545 "get_buffer() failed to allocate context scratch buffers.\n");
546 ff_mpeg_unref_picture(s, pic);
553 void ff_free_picture_tables(Picture *pic)
557 pic->alloc_mb_width =
558 pic->alloc_mb_height = 0;
560 av_buffer_unref(&pic->mb_var_buf);
561 av_buffer_unref(&pic->mc_mb_var_buf);
562 av_buffer_unref(&pic->mb_mean_buf);
563 av_buffer_unref(&pic->mbskip_table_buf);
564 av_buffer_unref(&pic->qscale_table_buf);
565 av_buffer_unref(&pic->mb_type_buf);
567 for (i = 0; i < 2; i++) {
568 av_buffer_unref(&pic->motion_val_buf[i]);
569 av_buffer_unref(&pic->ref_index_buf[i]);
573 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
575 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
576 const int mb_array_size = s->mb_stride * s->mb_height;
577 const int b8_array_size = s->b8_stride * s->mb_height * 2;
581 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
582 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
583 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
585 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
586 return AVERROR(ENOMEM);
589 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
590 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
591 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
592 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
593 return AVERROR(ENOMEM);
596 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
597 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
598 int ref_index_size = 4 * mb_array_size;
600 for (i = 0; mv_size && i < 2; i++) {
601 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
602 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
603 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
604 return AVERROR(ENOMEM);
608 pic->alloc_mb_width = s->mb_width;
609 pic->alloc_mb_height = s->mb_height;
614 static int make_tables_writable(Picture *pic)
617 #define MAKE_WRITABLE(table) \
620 (ret = av_buffer_make_writable(&pic->table)) < 0)\
624 MAKE_WRITABLE(mb_var_buf);
625 MAKE_WRITABLE(mc_mb_var_buf);
626 MAKE_WRITABLE(mb_mean_buf);
627 MAKE_WRITABLE(mbskip_table_buf);
628 MAKE_WRITABLE(qscale_table_buf);
629 MAKE_WRITABLE(mb_type_buf);
631 for (i = 0; i < 2; i++) {
632 MAKE_WRITABLE(motion_val_buf[i]);
633 MAKE_WRITABLE(ref_index_buf[i]);
640 * Allocate a Picture.
641 * The pixels are allocated/set by calling get_buffer() if shared = 0
643 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
647 if (pic->qscale_table_buf)
648 if ( pic->alloc_mb_width != s->mb_width
649 || pic->alloc_mb_height != s->mb_height)
650 ff_free_picture_tables(pic);
653 av_assert0(pic->f->data[0]);
656 av_assert0(!pic->f->buf[0]);
658 if (alloc_frame_buffer(s, pic) < 0)
661 s->linesize = pic->f->linesize[0];
662 s->uvlinesize = pic->f->linesize[1];
665 if (!pic->qscale_table_buf)
666 ret = alloc_picture_tables(s, pic);
668 ret = make_tables_writable(pic);
673 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
674 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
675 pic->mb_mean = pic->mb_mean_buf->data;
678 pic->mbskip_table = pic->mbskip_table_buf->data;
679 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
680 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
682 if (pic->motion_val_buf[0]) {
683 for (i = 0; i < 2; i++) {
684 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
685 pic->ref_index[i] = pic->ref_index_buf[i]->data;
691 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
692 ff_mpeg_unref_picture(s, pic);
693 ff_free_picture_tables(pic);
694 return AVERROR(ENOMEM);
698 * Deallocate a picture.
700 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
702 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
705 /* WM Image / Screen codecs allocate internal buffers with different
706 * dimensions / colorspaces; ignore user-defined callbacks for these. */
707 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
708 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
709 s->codec_id != AV_CODEC_ID_MSS2)
710 ff_thread_release_buffer(s->avctx, &pic->tf);
712 av_frame_unref(pic->f);
714 av_buffer_unref(&pic->hwaccel_priv_buf);
716 if (pic->needs_realloc)
717 ff_free_picture_tables(pic);
719 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
722 static int update_picture_tables(Picture *dst, Picture *src)
726 #define UPDATE_TABLE(table)\
729 (!dst->table || dst->table->buffer != src->table->buffer)) {\
730 av_buffer_unref(&dst->table);\
731 dst->table = av_buffer_ref(src->table);\
733 ff_free_picture_tables(dst);\
734 return AVERROR(ENOMEM);\
739 UPDATE_TABLE(mb_var_buf);
740 UPDATE_TABLE(mc_mb_var_buf);
741 UPDATE_TABLE(mb_mean_buf);
742 UPDATE_TABLE(mbskip_table_buf);
743 UPDATE_TABLE(qscale_table_buf);
744 UPDATE_TABLE(mb_type_buf);
745 for (i = 0; i < 2; i++) {
746 UPDATE_TABLE(motion_val_buf[i]);
747 UPDATE_TABLE(ref_index_buf[i]);
750 dst->mb_var = src->mb_var;
751 dst->mc_mb_var = src->mc_mb_var;
752 dst->mb_mean = src->mb_mean;
753 dst->mbskip_table = src->mbskip_table;
754 dst->qscale_table = src->qscale_table;
755 dst->mb_type = src->mb_type;
756 for (i = 0; i < 2; i++) {
757 dst->motion_val[i] = src->motion_val[i];
758 dst->ref_index[i] = src->ref_index[i];
761 dst->alloc_mb_width = src->alloc_mb_width;
762 dst->alloc_mb_height = src->alloc_mb_height;
767 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
771 av_assert0(!dst->f->buf[0]);
772 av_assert0(src->f->buf[0]);
776 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
780 ret = update_picture_tables(dst, src);
784 if (src->hwaccel_picture_private) {
785 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
786 if (!dst->hwaccel_priv_buf)
788 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
791 dst->field_picture = src->field_picture;
792 dst->mb_var_sum = src->mb_var_sum;
793 dst->mc_mb_var_sum = src->mc_mb_var_sum;
794 dst->b_frame_score = src->b_frame_score;
795 dst->needs_realloc = src->needs_realloc;
796 dst->reference = src->reference;
797 dst->shared = src->shared;
801 ff_mpeg_unref_picture(s, dst);
805 static void exchange_uv(MpegEncContext *s)
810 s->pblocks[4] = s->pblocks[5];
814 static int init_duplicate_context(MpegEncContext *s)
816 int y_size = s->b8_stride * (2 * s->mb_height + 1);
817 int c_size = s->mb_stride * (s->mb_height + 1);
818 int yc_size = y_size + 2 * c_size;
821 if (s->mb_height & 1)
822 yc_size += 2*s->b8_stride + 2*s->mb_stride;
829 s->obmc_scratchpad = NULL;
832 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
833 ME_MAP_SIZE * sizeof(uint32_t), fail)
834 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
835 ME_MAP_SIZE * sizeof(uint32_t), fail)
836 if (s->avctx->noise_reduction) {
837 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
838 2 * 64 * sizeof(int), fail)
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
842 s->block = s->blocks[0];
844 for (i = 0; i < 12; i++) {
845 s->pblocks[i] = &s->block[i];
847 if (s->avctx->codec_tag == AV_RL32("VCR2"))
850 if (s->out_format == FMT_H263) {
852 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
853 yc_size * sizeof(int16_t) * 16, fail);
854 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
855 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
856 s->ac_val[2] = s->ac_val[1] + c_size;
861 return -1; // free() through ff_MPV_common_end()
864 static void free_duplicate_context(MpegEncContext *s)
869 av_freep(&s->edge_emu_buffer);
870 av_freep(&s->me.scratchpad);
874 s->obmc_scratchpad = NULL;
876 av_freep(&s->dct_error_sum);
877 av_freep(&s->me.map);
878 av_freep(&s->me.score_map);
879 av_freep(&s->blocks);
880 av_freep(&s->ac_val_base);
884 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
886 #define COPY(a) bak->a = src->a
887 COPY(edge_emu_buffer);
892 COPY(obmc_scratchpad);
899 COPY(me.map_generation);
911 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
915 // FIXME copy only needed parts
917 backup_duplicate_context(&bak, dst);
918 memcpy(dst, src, sizeof(MpegEncContext));
919 backup_duplicate_context(dst, &bak);
920 for (i = 0; i < 12; i++) {
921 dst->pblocks[i] = &dst->block[i];
923 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
925 if (!dst->edge_emu_buffer &&
926 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
927 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
928 "scratch buffers.\n");
931 // STOP_TIMER("update_duplicate_context")
932 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
936 int ff_mpeg_update_thread_context(AVCodecContext *dst,
937 const AVCodecContext *src)
940 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
947 // FIXME can parameters change on I-frames?
948 // in that case dst may need a reinit
949 if (!s->context_initialized) {
950 memcpy(s, s1, sizeof(MpegEncContext));
953 s->bitstream_buffer = NULL;
954 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
956 if (s1->context_initialized){
957 // s->picture_range_start += MAX_PICTURE_COUNT;
958 // s->picture_range_end += MAX_PICTURE_COUNT;
959 if((ret = ff_MPV_common_init(s)) < 0){
960 memset(s, 0, sizeof(MpegEncContext));
967 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
968 s->context_reinit = 0;
969 s->height = s1->height;
970 s->width = s1->width;
971 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
975 s->avctx->coded_height = s1->avctx->coded_height;
976 s->avctx->coded_width = s1->avctx->coded_width;
977 s->avctx->width = s1->avctx->width;
978 s->avctx->height = s1->avctx->height;
980 s->coded_picture_number = s1->coded_picture_number;
981 s->picture_number = s1->picture_number;
983 av_assert0(!s->picture || s->picture != s1->picture);
985 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
986 ff_mpeg_unref_picture(s, &s->picture[i]);
987 if (s1->picture[i].f->buf[0] &&
988 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
992 #define UPDATE_PICTURE(pic)\
994 ff_mpeg_unref_picture(s, &s->pic);\
995 if (s1->pic.f && s1->pic.f->buf[0])\
996 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
998 ret = update_picture_tables(&s->pic, &s1->pic);\
1003 UPDATE_PICTURE(current_picture);
1004 UPDATE_PICTURE(last_picture);
1005 UPDATE_PICTURE(next_picture);
1007 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1008 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1009 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1011 // Error/bug resilience
1012 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1013 s->workaround_bugs = s1->workaround_bugs;
1014 s->padding_bug_score = s1->padding_bug_score;
1016 // MPEG4 timing info
1017 memcpy(&s->last_time_base, &s1->last_time_base,
1018 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1019 (char *) &s1->last_time_base);
1022 s->max_b_frames = s1->max_b_frames;
1023 s->low_delay = s1->low_delay;
1024 s->droppable = s1->droppable;
1026 // DivX handling (doesn't work)
1027 s->divx_packed = s1->divx_packed;
1029 if (s1->bitstream_buffer) {
1030 if (s1->bitstream_buffer_size +
1031 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1032 av_fast_malloc(&s->bitstream_buffer,
1033 &s->allocated_bitstream_buffer_size,
1034 s1->allocated_bitstream_buffer_size);
1035 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1036 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1037 s1->bitstream_buffer_size);
1038 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1039 FF_INPUT_BUFFER_PADDING_SIZE);
1042 // linesize dependend scratch buffer allocation
1043 if (!s->edge_emu_buffer)
1045 if (frame_size_alloc(s, s1->linesize) < 0) {
1046 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1047 "scratch buffers.\n");
1048 return AVERROR(ENOMEM);
1051 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1052 "be allocated due to unknown size.\n");
1055 // MPEG2/interlacing info
1056 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1057 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1059 if (!s1->first_field) {
1060 s->last_pict_type = s1->pict_type;
1061 if (s1->current_picture_ptr)
1062 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1069 * Set the given MpegEncContext to common defaults
1070 * (same for encoding and decoding).
1071 * The changed fields will not depend upon the
1072 * prior state of the MpegEncContext.
1074 void ff_MPV_common_defaults(MpegEncContext *s)
1076 s->y_dc_scale_table =
1077 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1078 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1079 s->progressive_frame = 1;
1080 s->progressive_sequence = 1;
1081 s->picture_structure = PICT_FRAME;
1083 s->coded_picture_number = 0;
1084 s->picture_number = 0;
1089 s->slice_context_count = 1;
1093 * Set the given MpegEncContext to defaults for decoding.
1094 * the changed fields will not depend upon
1095 * the prior state of the MpegEncContext.
1097 void ff_MPV_decode_defaults(MpegEncContext *s)
1099 ff_MPV_common_defaults(s);
1102 static int init_er(MpegEncContext *s)
1104 ERContext *er = &s->er;
1105 int mb_array_size = s->mb_height * s->mb_stride;
1108 er->avctx = s->avctx;
1111 er->mb_index2xy = s->mb_index2xy;
1112 er->mb_num = s->mb_num;
1113 er->mb_width = s->mb_width;
1114 er->mb_height = s->mb_height;
1115 er->mb_stride = s->mb_stride;
1116 er->b8_stride = s->b8_stride;
1118 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1119 er->error_status_table = av_mallocz(mb_array_size);
1120 if (!er->er_temp_buffer || !er->error_status_table)
1123 er->mbskip_table = s->mbskip_table;
1124 er->mbintra_table = s->mbintra_table;
1126 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1127 er->dc_val[i] = s->dc_val[i];
1129 er->decode_mb = mpeg_er_decode_mb;
1134 av_freep(&er->er_temp_buffer);
1135 av_freep(&er->error_status_table);
1136 return AVERROR(ENOMEM);
1140 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1142 static int init_context_frame(MpegEncContext *s)
1144 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1146 s->mb_width = (s->width + 15) / 16;
1147 s->mb_stride = s->mb_width + 1;
1148 s->b8_stride = s->mb_width * 2 + 1;
1149 mb_array_size = s->mb_height * s->mb_stride;
1150 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1152 /* set default edge pos, will be overridden
1153 * in decode_header if needed */
1154 s->h_edge_pos = s->mb_width * 16;
1155 s->v_edge_pos = s->mb_height * 16;
1157 s->mb_num = s->mb_width * s->mb_height;
1162 s->block_wrap[3] = s->b8_stride;
1164 s->block_wrap[5] = s->mb_stride;
1166 y_size = s->b8_stride * (2 * s->mb_height + 1);
1167 c_size = s->mb_stride * (s->mb_height + 1);
1168 yc_size = y_size + 2 * c_size;
1170 if (s->mb_height & 1)
1171 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1173 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1174 for (y = 0; y < s->mb_height; y++)
1175 for (x = 0; x < s->mb_width; x++)
1176 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1178 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1181 /* Allocate MV tables */
1182 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1183 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1184 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1185 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1186 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1187 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1188 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1189 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1190 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1191 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1192 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1193 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1195 /* Allocate MB type table */
1196 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1198 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1200 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1201 mb_array_size * sizeof(float), fail);
1202 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1203 mb_array_size * sizeof(float), fail);
1207 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1208 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1209 /* interlaced direct mode decoding tables */
1210 for (i = 0; i < 2; i++) {
1212 for (j = 0; j < 2; j++) {
1213 for (k = 0; k < 2; k++) {
1214 FF_ALLOCZ_OR_GOTO(s->avctx,
1215 s->b_field_mv_table_base[i][j][k],
1216 mv_table_size * 2 * sizeof(int16_t),
1218 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1221 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1222 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1223 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1225 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1228 if (s->out_format == FMT_H263) {
1230 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1231 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1233 /* cbp, ac_pred, pred_dir */
1234 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1235 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1238 if (s->h263_pred || s->h263_plus || !s->encoding) {
1240 // MN: we need these for error resilience of intra-frames
1241 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1242 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1243 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1244 s->dc_val[2] = s->dc_val[1] + c_size;
1245 for (i = 0; i < yc_size; i++)
1246 s->dc_val_base[i] = 1024;
1249 /* which mb is a intra block */
1250 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1251 memset(s->mbintra_table, 1, mb_array_size);
1253 /* init macroblock skip table */
1254 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1255 // Note the + 1 is for a quicker mpeg4 slice_end detection
1259 return AVERROR(ENOMEM);
1263 * init common structure for both encoder and decoder.
1264 * this assumes that some variables like width/height are already set
1266 av_cold int ff_MPV_common_init(MpegEncContext *s)
1269 int nb_slices = (HAVE_THREADS &&
1270 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1271 s->avctx->thread_count : 1;
1273 if (s->encoding && s->avctx->slices)
1274 nb_slices = s->avctx->slices;
1276 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1277 s->mb_height = (s->height + 31) / 32 * 2;
1279 s->mb_height = (s->height + 15) / 16;
1281 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1282 av_log(s->avctx, AV_LOG_ERROR,
1283 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1287 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1290 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1292 max_slices = MAX_THREADS;
1293 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1294 " reducing to %d\n", nb_slices, max_slices);
1295 nb_slices = max_slices;
1298 if ((s->width || s->height) &&
1299 av_image_check_size(s->width, s->height, 0, s->avctx))
1302 ff_dct_common_init(s);
1304 s->flags = s->avctx->flags;
1305 s->flags2 = s->avctx->flags2;
1307 /* set chroma shifts */
1308 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1310 &s->chroma_y_shift);
1312 /* convert fourcc to upper case */
1313 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1315 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1317 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1318 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1319 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1320 s->picture[i].f = av_frame_alloc();
1321 if (!s->picture[i].f)
1324 memset(&s->next_picture, 0, sizeof(s->next_picture));
1325 memset(&s->last_picture, 0, sizeof(s->last_picture));
1326 memset(&s->current_picture, 0, sizeof(s->current_picture));
1327 memset(&s->new_picture, 0, sizeof(s->new_picture));
1328 s->next_picture.f = av_frame_alloc();
1329 if (!s->next_picture.f)
1331 s->last_picture.f = av_frame_alloc();
1332 if (!s->last_picture.f)
1334 s->current_picture.f = av_frame_alloc();
1335 if (!s->current_picture.f)
1337 s->new_picture.f = av_frame_alloc();
1338 if (!s->new_picture.f)
1341 if (init_context_frame(s))
1344 s->parse_context.state = -1;
1346 s->context_initialized = 1;
1347 s->thread_context[0] = s;
1349 // if (s->width && s->height) {
1350 if (nb_slices > 1) {
1351 for (i = 1; i < nb_slices; i++) {
1352 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1353 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1356 for (i = 0; i < nb_slices; i++) {
1357 if (init_duplicate_context(s->thread_context[i]) < 0)
1359 s->thread_context[i]->start_mb_y =
1360 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1361 s->thread_context[i]->end_mb_y =
1362 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1365 if (init_duplicate_context(s) < 0)
1368 s->end_mb_y = s->mb_height;
1370 s->slice_context_count = nb_slices;
1375 ff_MPV_common_end(s);
1380 * Frees and resets MpegEncContext fields depending on the resolution.
1381 * Is used during resolution changes to avoid a full reinitialization of the
1384 static int free_context_frame(MpegEncContext *s)
1388 av_freep(&s->mb_type);
1389 av_freep(&s->p_mv_table_base);
1390 av_freep(&s->b_forw_mv_table_base);
1391 av_freep(&s->b_back_mv_table_base);
1392 av_freep(&s->b_bidir_forw_mv_table_base);
1393 av_freep(&s->b_bidir_back_mv_table_base);
1394 av_freep(&s->b_direct_mv_table_base);
1395 s->p_mv_table = NULL;
1396 s->b_forw_mv_table = NULL;
1397 s->b_back_mv_table = NULL;
1398 s->b_bidir_forw_mv_table = NULL;
1399 s->b_bidir_back_mv_table = NULL;
1400 s->b_direct_mv_table = NULL;
1401 for (i = 0; i < 2; i++) {
1402 for (j = 0; j < 2; j++) {
1403 for (k = 0; k < 2; k++) {
1404 av_freep(&s->b_field_mv_table_base[i][j][k]);
1405 s->b_field_mv_table[i][j][k] = NULL;
1407 av_freep(&s->b_field_select_table[i][j]);
1408 av_freep(&s->p_field_mv_table_base[i][j]);
1409 s->p_field_mv_table[i][j] = NULL;
1411 av_freep(&s->p_field_select_table[i]);
1414 av_freep(&s->dc_val_base);
1415 av_freep(&s->coded_block_base);
1416 av_freep(&s->mbintra_table);
1417 av_freep(&s->cbp_table);
1418 av_freep(&s->pred_dir_table);
1420 av_freep(&s->mbskip_table);
1422 av_freep(&s->er.error_status_table);
1423 av_freep(&s->er.er_temp_buffer);
1424 av_freep(&s->mb_index2xy);
1425 av_freep(&s->lambda_table);
1427 av_freep(&s->cplx_tab);
1428 av_freep(&s->bits_tab);
1430 s->linesize = s->uvlinesize = 0;
1435 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1439 if (s->slice_context_count > 1) {
1440 for (i = 0; i < s->slice_context_count; i++) {
1441 free_duplicate_context(s->thread_context[i]);
1443 for (i = 1; i < s->slice_context_count; i++) {
1444 av_freep(&s->thread_context[i]);
1447 free_duplicate_context(s);
1449 if ((err = free_context_frame(s)) < 0)
1453 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1454 s->picture[i].needs_realloc = 1;
1457 s->last_picture_ptr =
1458 s->next_picture_ptr =
1459 s->current_picture_ptr = NULL;
1462 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1463 s->mb_height = (s->height + 31) / 32 * 2;
1465 s->mb_height = (s->height + 15) / 16;
1467 if ((s->width || s->height) &&
1468 av_image_check_size(s->width, s->height, 0, s->avctx))
1469 return AVERROR_INVALIDDATA;
1471 if ((err = init_context_frame(s)))
1474 s->thread_context[0] = s;
1476 if (s->width && s->height) {
1477 int nb_slices = s->slice_context_count;
1478 if (nb_slices > 1) {
1479 for (i = 1; i < nb_slices; i++) {
1480 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1481 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1484 for (i = 0; i < nb_slices; i++) {
1485 if (init_duplicate_context(s->thread_context[i]) < 0)
1487 s->thread_context[i]->start_mb_y =
1488 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1489 s->thread_context[i]->end_mb_y =
1490 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1493 err = init_duplicate_context(s);
1497 s->end_mb_y = s->mb_height;
1499 s->slice_context_count = nb_slices;
1504 ff_MPV_common_end(s);
1508 /* init common structure for both encoder and decoder */
1509 void ff_MPV_common_end(MpegEncContext *s)
1513 if (s->slice_context_count > 1) {
1514 for (i = 0; i < s->slice_context_count; i++) {
1515 free_duplicate_context(s->thread_context[i]);
1517 for (i = 1; i < s->slice_context_count; i++) {
1518 av_freep(&s->thread_context[i]);
1520 s->slice_context_count = 1;
1521 } else free_duplicate_context(s);
1523 av_freep(&s->parse_context.buffer);
1524 s->parse_context.buffer_size = 0;
1526 av_freep(&s->bitstream_buffer);
1527 s->allocated_bitstream_buffer_size = 0;
1530 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1531 ff_free_picture_tables(&s->picture[i]);
1532 ff_mpeg_unref_picture(s, &s->picture[i]);
1533 av_frame_free(&s->picture[i].f);
1536 av_freep(&s->picture);
1537 ff_free_picture_tables(&s->last_picture);
1538 ff_mpeg_unref_picture(s, &s->last_picture);
1539 av_frame_free(&s->last_picture.f);
1540 ff_free_picture_tables(&s->current_picture);
1541 ff_mpeg_unref_picture(s, &s->current_picture);
1542 av_frame_free(&s->current_picture.f);
1543 ff_free_picture_tables(&s->next_picture);
1544 ff_mpeg_unref_picture(s, &s->next_picture);
1545 av_frame_free(&s->next_picture.f);
1546 ff_free_picture_tables(&s->new_picture);
1547 ff_mpeg_unref_picture(s, &s->new_picture);
1548 av_frame_free(&s->new_picture.f);
1550 free_context_frame(s);
1552 s->context_initialized = 0;
1553 s->last_picture_ptr =
1554 s->next_picture_ptr =
1555 s->current_picture_ptr = NULL;
1556 s->linesize = s->uvlinesize = 0;
1559 av_cold void ff_init_rl(RLTable *rl,
1560 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1562 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1563 uint8_t index_run[MAX_RUN + 1];
1564 int last, run, level, start, end, i;
1566 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1567 if (static_store && rl->max_level[0])
1570 /* compute max_level[], max_run[] and index_run[] */
1571 for (last = 0; last < 2; last++) {
1580 memset(max_level, 0, MAX_RUN + 1);
1581 memset(max_run, 0, MAX_LEVEL + 1);
1582 memset(index_run, rl->n, MAX_RUN + 1);
1583 for (i = start; i < end; i++) {
1584 run = rl->table_run[i];
1585 level = rl->table_level[i];
1586 if (index_run[run] == rl->n)
1588 if (level > max_level[run])
1589 max_level[run] = level;
1590 if (run > max_run[level])
1591 max_run[level] = run;
1594 rl->max_level[last] = static_store[last];
1596 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1597 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1599 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1601 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1602 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1604 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1606 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1607 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1611 av_cold void ff_init_vlc_rl(RLTable *rl)
1615 for (q = 0; q < 32; q++) {
1617 int qadd = (q - 1) | 1;
1623 for (i = 0; i < rl->vlc.table_size; i++) {
1624 int code = rl->vlc.table[i][0];
1625 int len = rl->vlc.table[i][1];
1628 if (len == 0) { // illegal code
1631 } else if (len < 0) { // more bits needed
1635 if (code == rl->n) { // esc
1639 run = rl->table_run[code] + 1;
1640 level = rl->table_level[code] * qmul + qadd;
1641 if (code >= rl->last) run += 192;
1644 rl->rl_vlc[q][i].len = len;
1645 rl->rl_vlc[q][i].level = level;
1646 rl->rl_vlc[q][i].run = run;
1651 static void release_unused_pictures(MpegEncContext *s)
1655 /* release non reference frames */
1656 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1657 if (!s->picture[i].reference)
1658 ff_mpeg_unref_picture(s, &s->picture[i]);
1662 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1664 if (pic == s->last_picture_ptr)
1666 if (pic->f->buf[0] == NULL)
1668 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1673 static int find_unused_picture(MpegEncContext *s, int shared)
1678 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1679 if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1683 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1684 if (pic_is_unused(s, &s->picture[i]))
1689 av_log(s->avctx, AV_LOG_FATAL,
1690 "Internal error, picture buffer overflow\n");
1691 /* We could return -1, but the codec would crash trying to draw into a
1692 * non-existing frame anyway. This is safer than waiting for a random crash.
1693 * Also the return of this is never useful, an encoder must only allocate
1694 * as much as allowed in the specification. This has no relationship to how
1695 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1696 * enough for such valid streams).
1697 * Plus, a decoder has to check stream validity and remove frames if too
1698 * many reference frames are around. Waiting for "OOM" is not correct at
1699 * all. Similarly, missing reference frames have to be replaced by
1700 * interpolated/MC frames, anything else is a bug in the codec ...
1706 int ff_find_unused_picture(MpegEncContext *s, int shared)
1708 int ret = find_unused_picture(s, shared);
1710 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1711 if (s->picture[ret].needs_realloc) {
1712 s->picture[ret].needs_realloc = 0;
1713 ff_free_picture_tables(&s->picture[ret]);
1714 ff_mpeg_unref_picture(s, &s->picture[ret]);
1720 static void gray_frame(AVFrame *frame)
1722 int i, h_chroma_shift, v_chroma_shift;
1724 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1726 for(i=0; i<frame->height; i++)
1727 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1728 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1729 memset(frame->data[1] + frame->linesize[1]*i,
1730 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1731 memset(frame->data[2] + frame->linesize[2]*i,
1732 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1737 * generic function called after decoding
1738 * the header and before a frame is decoded.
1740 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1746 if (!ff_thread_can_start_frame(avctx)) {
1747 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1751 /* mark & release old frames */
1752 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1753 s->last_picture_ptr != s->next_picture_ptr &&
1754 s->last_picture_ptr->f->buf[0]) {
1755 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1758 /* release forgotten pictures */
1759 /* if (mpeg124/h263) */
1760 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1761 if (&s->picture[i] != s->last_picture_ptr &&
1762 &s->picture[i] != s->next_picture_ptr &&
1763 s->picture[i].reference && !s->picture[i].needs_realloc) {
1764 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1765 av_log(avctx, AV_LOG_ERROR,
1766 "releasing zombie picture\n");
1767 ff_mpeg_unref_picture(s, &s->picture[i]);
1771 ff_mpeg_unref_picture(s, &s->current_picture);
1773 release_unused_pictures(s);
1775 if (s->current_picture_ptr &&
1776 s->current_picture_ptr->f->buf[0] == NULL) {
1777 // we already have a unused image
1778 // (maybe it was set before reading the header)
1779 pic = s->current_picture_ptr;
1781 i = ff_find_unused_picture(s, 0);
1783 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1786 pic = &s->picture[i];
1790 if (!s->droppable) {
1791 if (s->pict_type != AV_PICTURE_TYPE_B)
1795 pic->f->coded_picture_number = s->coded_picture_number++;
1797 if (ff_alloc_picture(s, pic, 0) < 0)
1800 s->current_picture_ptr = pic;
1801 // FIXME use only the vars from current_pic
1802 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1803 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1804 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1805 if (s->picture_structure != PICT_FRAME)
1806 s->current_picture_ptr->f->top_field_first =
1807 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1809 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1810 !s->progressive_sequence;
1811 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1813 s->current_picture_ptr->f->pict_type = s->pict_type;
1814 // if (s->flags && CODEC_FLAG_QSCALE)
1815 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1816 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1818 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1819 s->current_picture_ptr)) < 0)
1822 if (s->pict_type != AV_PICTURE_TYPE_B) {
1823 s->last_picture_ptr = s->next_picture_ptr;
1825 s->next_picture_ptr = s->current_picture_ptr;
1827 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1828 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1829 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1830 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1831 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1832 s->pict_type, s->droppable);
1834 if ((s->last_picture_ptr == NULL ||
1835 s->last_picture_ptr->f->buf[0] == NULL) &&
1836 (s->pict_type != AV_PICTURE_TYPE_I ||
1837 s->picture_structure != PICT_FRAME)) {
1838 int h_chroma_shift, v_chroma_shift;
1839 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1840 &h_chroma_shift, &v_chroma_shift);
1841 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1842 av_log(avctx, AV_LOG_DEBUG,
1843 "allocating dummy last picture for B frame\n");
1844 else if (s->pict_type != AV_PICTURE_TYPE_I)
1845 av_log(avctx, AV_LOG_ERROR,
1846 "warning: first frame is no keyframe\n");
1847 else if (s->picture_structure != PICT_FRAME)
1848 av_log(avctx, AV_LOG_DEBUG,
1849 "allocate dummy last picture for field based first keyframe\n");
1851 /* Allocate a dummy frame */
1852 i = ff_find_unused_picture(s, 0);
1854 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1857 s->last_picture_ptr = &s->picture[i];
1859 s->last_picture_ptr->reference = 3;
1860 s->last_picture_ptr->f->key_frame = 0;
1861 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1863 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1864 s->last_picture_ptr = NULL;
1868 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1869 for(i=0; i<avctx->height; i++)
1870 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1871 0x80, avctx->width);
1872 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1873 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1874 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1875 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1876 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1879 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1880 for(i=0; i<avctx->height; i++)
1881 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1885 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1886 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1888 if ((s->next_picture_ptr == NULL ||
1889 s->next_picture_ptr->f->buf[0] == NULL) &&
1890 s->pict_type == AV_PICTURE_TYPE_B) {
1891 /* Allocate a dummy frame */
1892 i = ff_find_unused_picture(s, 0);
1894 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1897 s->next_picture_ptr = &s->picture[i];
1899 s->next_picture_ptr->reference = 3;
1900 s->next_picture_ptr->f->key_frame = 0;
1901 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1903 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1904 s->next_picture_ptr = NULL;
1907 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1908 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1911 #if 0 // BUFREF-FIXME
1912 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1913 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1915 if (s->last_picture_ptr) {
1916 ff_mpeg_unref_picture(s, &s->last_picture);
1917 if (s->last_picture_ptr->f->buf[0] &&
1918 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1919 s->last_picture_ptr)) < 0)
1922 if (s->next_picture_ptr) {
1923 ff_mpeg_unref_picture(s, &s->next_picture);
1924 if (s->next_picture_ptr->f->buf[0] &&
1925 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1926 s->next_picture_ptr)) < 0)
1930 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1931 s->last_picture_ptr->f->buf[0]));
1933 if (s->picture_structure!= PICT_FRAME) {
1935 for (i = 0; i < 4; i++) {
1936 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1937 s->current_picture.f->data[i] +=
1938 s->current_picture.f->linesize[i];
1940 s->current_picture.f->linesize[i] *= 2;
1941 s->last_picture.f->linesize[i] *= 2;
1942 s->next_picture.f->linesize[i] *= 2;
1946 s->err_recognition = avctx->err_recognition;
1948 /* set dequantizer, we can't do it during init as
1949 * it might change for mpeg4 and we can't do it in the header
1950 * decode as init is not called for mpeg4 there yet */
1951 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1952 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1953 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1954 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1955 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1956 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1958 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1959 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1962 if (s->avctx->debug & FF_DEBUG_NOMC) {
1963 gray_frame(s->current_picture_ptr->f);
1969 /* called after a frame has been decoded. */
1970 void ff_MPV_frame_end(MpegEncContext *s)
1974 if (s->current_picture.reference)
1975 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1979 * Draw a line from (ex, ey) -> (sx, sy).
1980 * @param w width of the image
1981 * @param h height of the image
1982 * @param stride stride/linesize of the image
1983 * @param color color of the arrow
1985 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1986 int w, int h, int stride, int color)
1990 sx = av_clip(sx, 0, w - 1);
1991 sy = av_clip(sy, 0, h - 1);
1992 ex = av_clip(ex, 0, w - 1);
1993 ey = av_clip(ey, 0, h - 1);
1995 buf[sy * stride + sx] += color;
1997 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1999 FFSWAP(int, sx, ex);
2000 FFSWAP(int, sy, ey);
2002 buf += sx + sy * stride;
2004 f = ((ey - sy) << 16) / ex;
2005 for (x = 0; x <= ex; x++) {
2007 fr = (x * f) & 0xFFFF;
2008 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2009 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2013 FFSWAP(int, sx, ex);
2014 FFSWAP(int, sy, ey);
2016 buf += sx + sy * stride;
2019 f = ((ex - sx) << 16) / ey;
2022 for(y= 0; y <= ey; y++){
2024 fr = (y*f) & 0xFFFF;
2025 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2026 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2032 * Draw an arrow from (ex, ey) -> (sx, sy).
2033 * @param w width of the image
2034 * @param h height of the image
2035 * @param stride stride/linesize of the image
2036 * @param color color of the arrow
2038 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2039 int ey, int w, int h, int stride, int color)
2043 sx = av_clip(sx, -100, w + 100);
2044 sy = av_clip(sy, -100, h + 100);
2045 ex = av_clip(ex, -100, w + 100);
2046 ey = av_clip(ey, -100, h + 100);
2051 if (dx * dx + dy * dy > 3 * 3) {
2054 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2056 // FIXME subpixel accuracy
2057 rx = ROUNDED_DIV(rx * 3 << 4, length);
2058 ry = ROUNDED_DIV(ry * 3 << 4, length);
2060 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2061 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2063 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2067 * Print debugging info for the given picture.
2069 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2070 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2072 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2074 if (avctx->hwaccel || !mbtype_table
2075 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2079 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2082 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2083 av_get_picture_type_char(pict->pict_type));
2084 for (y = 0; y < mb_height; y++) {
2085 for (x = 0; x < mb_width; x++) {
2086 if (avctx->debug & FF_DEBUG_SKIP) {
2087 int count = mbskip_table[x + y * mb_stride];
2090 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2092 if (avctx->debug & FF_DEBUG_QP) {
2093 av_log(avctx, AV_LOG_DEBUG, "%2d",
2094 qscale_table[x + y * mb_stride]);
2096 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2097 int mb_type = mbtype_table[x + y * mb_stride];
2098 // Type & MV direction
2099 if (IS_PCM(mb_type))
2100 av_log(avctx, AV_LOG_DEBUG, "P");
2101 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2102 av_log(avctx, AV_LOG_DEBUG, "A");
2103 else if (IS_INTRA4x4(mb_type))
2104 av_log(avctx, AV_LOG_DEBUG, "i");
2105 else if (IS_INTRA16x16(mb_type))
2106 av_log(avctx, AV_LOG_DEBUG, "I");
2107 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2108 av_log(avctx, AV_LOG_DEBUG, "d");
2109 else if (IS_DIRECT(mb_type))
2110 av_log(avctx, AV_LOG_DEBUG, "D");
2111 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2112 av_log(avctx, AV_LOG_DEBUG, "g");
2113 else if (IS_GMC(mb_type))
2114 av_log(avctx, AV_LOG_DEBUG, "G");
2115 else if (IS_SKIP(mb_type))
2116 av_log(avctx, AV_LOG_DEBUG, "S");
2117 else if (!USES_LIST(mb_type, 1))
2118 av_log(avctx, AV_LOG_DEBUG, ">");
2119 else if (!USES_LIST(mb_type, 0))
2120 av_log(avctx, AV_LOG_DEBUG, "<");
2122 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2123 av_log(avctx, AV_LOG_DEBUG, "X");
2127 if (IS_8X8(mb_type))
2128 av_log(avctx, AV_LOG_DEBUG, "+");
2129 else if (IS_16X8(mb_type))
2130 av_log(avctx, AV_LOG_DEBUG, "-");
2131 else if (IS_8X16(mb_type))
2132 av_log(avctx, AV_LOG_DEBUG, "|");
2133 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2134 av_log(avctx, AV_LOG_DEBUG, " ");
2136 av_log(avctx, AV_LOG_DEBUG, "?");
2139 if (IS_INTERLACED(mb_type))
2140 av_log(avctx, AV_LOG_DEBUG, "=");
2142 av_log(avctx, AV_LOG_DEBUG, " ");
2145 av_log(avctx, AV_LOG_DEBUG, "\n");
2149 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2150 (avctx->debug_mv)) {
2151 const int shift = 1 + quarter_sample;
2155 int h_chroma_shift, v_chroma_shift, block_height;
2156 const int width = avctx->width;
2157 const int height = avctx->height;
2158 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2159 const int mv_stride = (mb_width << mv_sample_log2) +
2160 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2162 *low_delay = 0; // needed to see the vectors without trashing the buffers
2164 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2166 av_frame_make_writable(pict);
2168 pict->opaque = NULL;
2169 ptr = pict->data[0];
2170 block_height = 16 >> v_chroma_shift;
2172 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2174 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2175 const int mb_index = mb_x + mb_y * mb_stride;
2176 if ((avctx->debug_mv) && motion_val[0]) {
2178 for (type = 0; type < 3; type++) {
2182 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2183 (pict->pict_type!= AV_PICTURE_TYPE_P))
2188 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2189 (pict->pict_type!= AV_PICTURE_TYPE_B))
2194 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2195 (pict->pict_type!= AV_PICTURE_TYPE_B))
2200 if (!USES_LIST(mbtype_table[mb_index], direction))
2203 if (IS_8X8(mbtype_table[mb_index])) {
2205 for (i = 0; i < 4; i++) {
2206 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2207 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2208 int xy = (mb_x * 2 + (i & 1) +
2209 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2210 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2211 int my = (motion_val[direction][xy][1] >> shift) + sy;
2212 draw_arrow(ptr, sx, sy, mx, my, width,
2213 height, pict->linesize[0], 100);
2215 } else if (IS_16X8(mbtype_table[mb_index])) {
2217 for (i = 0; i < 2; i++) {
2218 int sx = mb_x * 16 + 8;
2219 int sy = mb_y * 16 + 4 + 8 * i;
2220 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2221 int mx = (motion_val[direction][xy][0] >> shift);
2222 int my = (motion_val[direction][xy][1] >> shift);
2224 if (IS_INTERLACED(mbtype_table[mb_index]))
2227 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2228 height, pict->linesize[0], 100);
2230 } else if (IS_8X16(mbtype_table[mb_index])) {
2232 for (i = 0; i < 2; i++) {
2233 int sx = mb_x * 16 + 4 + 8 * i;
2234 int sy = mb_y * 16 + 8;
2235 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2236 int mx = motion_val[direction][xy][0] >> shift;
2237 int my = motion_val[direction][xy][1] >> shift;
2239 if (IS_INTERLACED(mbtype_table[mb_index]))
2242 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2243 height, pict->linesize[0], 100);
2246 int sx= mb_x * 16 + 8;
2247 int sy= mb_y * 16 + 8;
2248 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2249 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2250 int my= (motion_val[direction][xy][1]>>shift) + sy;
2251 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2255 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2256 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2257 0x0101010101010101ULL;
2259 for (y = 0; y < block_height; y++) {
2260 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2261 (block_height * mb_y + y) *
2262 pict->linesize[1]) = c;
2263 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2264 (block_height * mb_y + y) *
2265 pict->linesize[2]) = c;
2268 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2270 int mb_type = mbtype_table[mb_index];
2273 #define COLOR(theta, r) \
2274 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2275 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2279 if (IS_PCM(mb_type)) {
2281 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2282 IS_INTRA16x16(mb_type)) {
2284 } else if (IS_INTRA4x4(mb_type)) {
2286 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2288 } else if (IS_DIRECT(mb_type)) {
2290 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2292 } else if (IS_GMC(mb_type)) {
2294 } else if (IS_SKIP(mb_type)) {
2296 } else if (!USES_LIST(mb_type, 1)) {
2298 } else if (!USES_LIST(mb_type, 0)) {
2301 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2305 u *= 0x0101010101010101ULL;
2306 v *= 0x0101010101010101ULL;
2307 for (y = 0; y < block_height; y++) {
2308 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2309 (block_height * mb_y + y) * pict->linesize[1]) = u;
2310 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2311 (block_height * mb_y + y) * pict->linesize[2]) = v;
2315 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2316 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2317 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2318 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2319 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2321 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2322 for (y = 0; y < 16; y++)
2323 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2324 pict->linesize[0]] ^= 0x80;
2326 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2327 int dm = 1 << (mv_sample_log2 - 2);
2328 for (i = 0; i < 4; i++) {
2329 int sx = mb_x * 16 + 8 * (i & 1);
2330 int sy = mb_y * 16 + 8 * (i >> 1);
2331 int xy = (mb_x * 2 + (i & 1) +
2332 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2334 int32_t *mv = (int32_t *) &motion_val[0][xy];
2335 if (mv[0] != mv[dm] ||
2336 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2337 for (y = 0; y < 8; y++)
2338 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2339 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2340 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2341 pict->linesize[0]) ^= 0x8080808080808080ULL;
2345 if (IS_INTERLACED(mb_type) &&
2346 avctx->codec->id == AV_CODEC_ID_H264) {
2350 mbskip_table[mb_index] = 0;
2356 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2358 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2359 p->qscale_table, p->motion_val, &s->low_delay,
2360 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2363 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2365 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2366 int offset = 2*s->mb_stride + 1;
2368 return AVERROR(ENOMEM);
2369 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2370 ref->size -= offset;
2371 ref->data += offset;
2372 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2375 static inline int hpel_motion_lowres(MpegEncContext *s,
2376 uint8_t *dest, uint8_t *src,
2377 int field_based, int field_select,
2378 int src_x, int src_y,
2379 int width, int height, ptrdiff_t stride,
2380 int h_edge_pos, int v_edge_pos,
2381 int w, int h, h264_chroma_mc_func *pix_op,
2382 int motion_x, int motion_y)
2384 const int lowres = s->avctx->lowres;
2385 const int op_index = FFMIN(lowres, 3);
2386 const int s_mask = (2 << lowres) - 1;
2390 if (s->quarter_sample) {
2395 sx = motion_x & s_mask;
2396 sy = motion_y & s_mask;
2397 src_x += motion_x >> lowres + 1;
2398 src_y += motion_y >> lowres + 1;
2400 src += src_y * stride + src_x;
2402 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2403 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2404 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2405 s->linesize, s->linesize,
2406 w + 1, (h + 1) << field_based,
2407 src_x, src_y << field_based,
2408 h_edge_pos, v_edge_pos);
2409 src = s->edge_emu_buffer;
2413 sx = (sx << 2) >> lowres;
2414 sy = (sy << 2) >> lowres;
2417 pix_op[op_index](dest, src, stride, h, sx, sy);
2421 /* apply one mpeg motion vector to the three components */
2422 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2429 uint8_t **ref_picture,
2430 h264_chroma_mc_func *pix_op,
2431 int motion_x, int motion_y,
2434 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2435 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2436 ptrdiff_t uvlinesize, linesize;
2437 const int lowres = s->avctx->lowres;
2438 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2439 const int block_s = 8>>lowres;
2440 const int s_mask = (2 << lowres) - 1;
2441 const int h_edge_pos = s->h_edge_pos >> lowres;
2442 const int v_edge_pos = s->v_edge_pos >> lowres;
2443 linesize = s->current_picture.f->linesize[0] << field_based;
2444 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2446 // FIXME obviously not perfect but qpel will not work in lowres anyway
2447 if (s->quarter_sample) {
2453 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2456 sx = motion_x & s_mask;
2457 sy = motion_y & s_mask;
2458 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2459 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2461 if (s->out_format == FMT_H263) {
2462 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2463 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2464 uvsrc_x = src_x >> 1;
2465 uvsrc_y = src_y >> 1;
2466 } else if (s->out_format == FMT_H261) {
2467 // even chroma mv's are full pel in H261
2470 uvsx = (2 * mx) & s_mask;
2471 uvsy = (2 * my) & s_mask;
2472 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2473 uvsrc_y = mb_y * block_s + (my >> lowres);
2475 if(s->chroma_y_shift){
2480 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2481 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2483 if(s->chroma_x_shift){
2487 uvsy = motion_y & s_mask;
2489 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2492 uvsx = motion_x & s_mask;
2493 uvsy = motion_y & s_mask;
2500 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2501 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2502 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2504 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2505 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2506 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2507 linesize >> field_based, linesize >> field_based,
2508 17, 17 + field_based,
2509 src_x, src_y << field_based, h_edge_pos,
2511 ptr_y = s->edge_emu_buffer;
2512 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2513 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2514 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2515 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2516 uvlinesize >> field_based, uvlinesize >> field_based,
2518 uvsrc_x, uvsrc_y << field_based,
2519 h_edge_pos >> 1, v_edge_pos >> 1);
2520 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2521 uvlinesize >> field_based,uvlinesize >> field_based,
2523 uvsrc_x, uvsrc_y << field_based,
2524 h_edge_pos >> 1, v_edge_pos >> 1);
2530 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2532 dest_y += s->linesize;
2533 dest_cb += s->uvlinesize;
2534 dest_cr += s->uvlinesize;
2538 ptr_y += s->linesize;
2539 ptr_cb += s->uvlinesize;
2540 ptr_cr += s->uvlinesize;
2543 sx = (sx << 2) >> lowres;
2544 sy = (sy << 2) >> lowres;
2545 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2547 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2548 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2549 uvsx = (uvsx << 2) >> lowres;
2550 uvsy = (uvsy << 2) >> lowres;
2552 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2553 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2556 // FIXME h261 lowres loop filter
2559 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2560 uint8_t *dest_cb, uint8_t *dest_cr,
2561 uint8_t **ref_picture,
2562 h264_chroma_mc_func * pix_op,
2565 const int lowres = s->avctx->lowres;
2566 const int op_index = FFMIN(lowres, 3);
2567 const int block_s = 8 >> lowres;
2568 const int s_mask = (2 << lowres) - 1;
2569 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2570 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2571 int emu = 0, src_x, src_y, sx, sy;
2575 if (s->quarter_sample) {
2580 /* In case of 8X8, we construct a single chroma motion vector
2581 with a special rounding */
2582 mx = ff_h263_round_chroma(mx);
2583 my = ff_h263_round_chroma(my);
2587 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2588 src_y = s->mb_y * block_s + (my >> lowres + 1);
2590 offset = src_y * s->uvlinesize + src_x;
2591 ptr = ref_picture[1] + offset;
2592 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2593 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2594 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2595 s->uvlinesize, s->uvlinesize,
2597 src_x, src_y, h_edge_pos, v_edge_pos);
2598 ptr = s->edge_emu_buffer;
2601 sx = (sx << 2) >> lowres;
2602 sy = (sy << 2) >> lowres;
2603 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2605 ptr = ref_picture[2] + offset;
2607 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2608 s->uvlinesize, s->uvlinesize,
2610 src_x, src_y, h_edge_pos, v_edge_pos);
2611 ptr = s->edge_emu_buffer;
2613 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2617 * motion compensation of a single macroblock
2619 * @param dest_y luma destination pointer
2620 * @param dest_cb chroma cb/u destination pointer
2621 * @param dest_cr chroma cr/v destination pointer
2622 * @param dir direction (0->forward, 1->backward)
2623 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2624 * @param pix_op halfpel motion compensation function (average or put normally)
2625 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2627 static inline void MPV_motion_lowres(MpegEncContext *s,
2628 uint8_t *dest_y, uint8_t *dest_cb,
2630 int dir, uint8_t **ref_picture,
2631 h264_chroma_mc_func *pix_op)
2635 const int lowres = s->avctx->lowres;
2636 const int block_s = 8 >>lowres;
2641 switch (s->mv_type) {
2643 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2645 ref_picture, pix_op,
2646 s->mv[dir][0][0], s->mv[dir][0][1],
2652 for (i = 0; i < 4; i++) {
2653 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2654 s->linesize) * block_s,
2655 ref_picture[0], 0, 0,
2656 (2 * mb_x + (i & 1)) * block_s,
2657 (2 * mb_y + (i >> 1)) * block_s,
2658 s->width, s->height, s->linesize,
2659 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2660 block_s, block_s, pix_op,
2661 s->mv[dir][i][0], s->mv[dir][i][1]);
2663 mx += s->mv[dir][i][0];
2664 my += s->mv[dir][i][1];
2667 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2668 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2672 if (s->picture_structure == PICT_FRAME) {
2674 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2675 1, 0, s->field_select[dir][0],
2676 ref_picture, pix_op,
2677 s->mv[dir][0][0], s->mv[dir][0][1],
2680 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2681 1, 1, s->field_select[dir][1],
2682 ref_picture, pix_op,
2683 s->mv[dir][1][0], s->mv[dir][1][1],
2686 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2687 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2688 ref_picture = s->current_picture_ptr->f->data;
2691 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2692 0, 0, s->field_select[dir][0],
2693 ref_picture, pix_op,
2695 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2699 for (i = 0; i < 2; i++) {
2700 uint8_t **ref2picture;
2702 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2703 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2704 ref2picture = ref_picture;
2706 ref2picture = s->current_picture_ptr->f->data;
2709 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2710 0, 0, s->field_select[dir][i],
2711 ref2picture, pix_op,
2712 s->mv[dir][i][0], s->mv[dir][i][1] +
2713 2 * block_s * i, block_s, mb_y >> 1);
2715 dest_y += 2 * block_s * s->linesize;
2716 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2717 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2721 if (s->picture_structure == PICT_FRAME) {
2722 for (i = 0; i < 2; i++) {
2724 for (j = 0; j < 2; j++) {
2725 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2727 ref_picture, pix_op,
2728 s->mv[dir][2 * i + j][0],
2729 s->mv[dir][2 * i + j][1],
2732 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2735 for (i = 0; i < 2; i++) {
2736 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2737 0, 0, s->picture_structure != i + 1,
2738 ref_picture, pix_op,
2739 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2740 2 * block_s, mb_y >> 1);
2742 // after put we make avg of the same block
2743 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2745 // opposite parity is always in the same
2746 // frame if this is second field
2747 if (!s->first_field) {
2748 ref_picture = s->current_picture_ptr->f->data;
2759 * find the lowest MB row referenced in the MVs
2761 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2763 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2764 int my, off, i, mvs;
2766 if (s->picture_structure != PICT_FRAME || s->mcsel)
2769 switch (s->mv_type) {
2783 for (i = 0; i < mvs; i++) {
2784 my = s->mv[dir][i][1]<<qpel_shift;
2785 my_max = FFMAX(my_max, my);
2786 my_min = FFMIN(my_min, my);
2789 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2791 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2793 return s->mb_height-1;
2796 /* put block[] to dest[] */
2797 static inline void put_dct(MpegEncContext *s,
2798 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2800 s->dct_unquantize_intra(s, block, i, qscale);
2801 s->idsp.idct_put(dest, line_size, block);
2804 /* add block[] to dest[] */
2805 static inline void add_dct(MpegEncContext *s,
2806 int16_t *block, int i, uint8_t *dest, int line_size)
2808 if (s->block_last_index[i] >= 0) {
2809 s->idsp.idct_add(dest, line_size, block);
2813 static inline void add_dequant_dct(MpegEncContext *s,
2814 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2816 if (s->block_last_index[i] >= 0) {
2817 s->dct_unquantize_inter(s, block, i, qscale);
2819 s->idsp.idct_add(dest, line_size, block);
2824 * Clean dc, ac, coded_block for the current non-intra MB.
2826 void ff_clean_intra_table_entries(MpegEncContext *s)
2828 int wrap = s->b8_stride;
2829 int xy = s->block_index[0];
2832 s->dc_val[0][xy + 1 ] =
2833 s->dc_val[0][xy + wrap] =
2834 s->dc_val[0][xy + 1 + wrap] = 1024;
2836 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2837 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2838 if (s->msmpeg4_version>=3) {
2839 s->coded_block[xy ] =
2840 s->coded_block[xy + 1 ] =
2841 s->coded_block[xy + wrap] =
2842 s->coded_block[xy + 1 + wrap] = 0;
2845 wrap = s->mb_stride;
2846 xy = s->mb_x + s->mb_y * wrap;
2848 s->dc_val[2][xy] = 1024;
2850 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2851 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2853 s->mbintra_table[xy]= 0;
2856 /* generic function called after a macroblock has been parsed by the
2857 decoder or after it has been encoded by the encoder.
2859 Important variables used:
2860 s->mb_intra : true if intra macroblock
2861 s->mv_dir : motion vector direction
2862 s->mv_type : motion vector type
2863 s->mv : motion vector
2864 s->interlaced_dct : true if interlaced dct used (mpeg2)
2866 static av_always_inline
2867 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2868 int lowres_flag, int is_mpeg12)
2870 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2873 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2874 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2878 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2879 /* print DCT coefficients */
2881 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2883 for(j=0; j<64; j++){
2884 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2885 block[i][s->idsp.idct_permutation[j]]);
2887 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2891 s->current_picture.qscale_table[mb_xy] = s->qscale;
2893 /* update DC predictors for P macroblocks */
2895 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2896 if(s->mbintra_table[mb_xy])
2897 ff_clean_intra_table_entries(s);
2901 s->last_dc[2] = 128 << s->intra_dc_precision;
2904 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2905 s->mbintra_table[mb_xy]=1;
2907 if ( (s->flags&CODEC_FLAG_PSNR)
2908 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
2909 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2910 uint8_t *dest_y, *dest_cb, *dest_cr;
2911 int dct_linesize, dct_offset;
2912 op_pixels_func (*op_pix)[4];
2913 qpel_mc_func (*op_qpix)[16];
2914 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2915 const int uvlinesize = s->current_picture.f->linesize[1];
2916 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2917 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2919 /* avoid copy if macroblock skipped in last frame too */
2920 /* skip only during decoding as we might trash the buffers during encoding a bit */
2922 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2924 if (s->mb_skipped) {
2926 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2928 } else if(!s->current_picture.reference) {
2931 *mbskip_ptr = 0; /* not skipped */
2935 dct_linesize = linesize << s->interlaced_dct;
2936 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2940 dest_cb= s->dest[1];
2941 dest_cr= s->dest[2];
2943 dest_y = s->b_scratchpad;
2944 dest_cb= s->b_scratchpad+16*linesize;
2945 dest_cr= s->b_scratchpad+32*linesize;
2949 /* motion handling */
2950 /* decoding or more than one mb_type (MC was already done otherwise) */
2953 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2954 if (s->mv_dir & MV_DIR_FORWARD) {
2955 ff_thread_await_progress(&s->last_picture_ptr->tf,
2956 ff_MPV_lowest_referenced_row(s, 0),
2959 if (s->mv_dir & MV_DIR_BACKWARD) {
2960 ff_thread_await_progress(&s->next_picture_ptr->tf,
2961 ff_MPV_lowest_referenced_row(s, 1),
2967 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2969 if (s->mv_dir & MV_DIR_FORWARD) {
2970 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
2971 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2973 if (s->mv_dir & MV_DIR_BACKWARD) {
2974 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
2977 op_qpix = s->me.qpel_put;
2978 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2979 op_pix = s->hdsp.put_pixels_tab;
2981 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2983 if (s->mv_dir & MV_DIR_FORWARD) {
2984 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2985 op_pix = s->hdsp.avg_pixels_tab;
2986 op_qpix= s->me.qpel_avg;
2988 if (s->mv_dir & MV_DIR_BACKWARD) {
2989 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2994 /* skip dequant / idct if we are really late ;) */
2995 if(s->avctx->skip_idct){
2996 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2997 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2998 || s->avctx->skip_idct >= AVDISCARD_ALL)
3002 /* add dct residue */
3003 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3004 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3005 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3006 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3007 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3008 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3010 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3011 if (s->chroma_y_shift){
3012 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3013 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3017 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3018 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3019 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3020 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3023 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3024 add_dct(s, block[0], 0, dest_y , dct_linesize);
3025 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3026 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3027 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3029 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3030 if(s->chroma_y_shift){//Chroma420
3031 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3032 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3035 dct_linesize = uvlinesize << s->interlaced_dct;
3036 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3038 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3039 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3040 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3041 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3042 if(!s->chroma_x_shift){//Chroma444
3043 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3044 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3045 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3046 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3051 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3052 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3055 /* dct only in intra block */
3056 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3057 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3058 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3059 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3060 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3062 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3063 if(s->chroma_y_shift){
3064 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3065 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3069 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3070 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3071 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3072 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3076 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3077 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3078 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3079 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3081 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3082 if(s->chroma_y_shift){
3083 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3084 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3087 dct_linesize = uvlinesize << s->interlaced_dct;
3088 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3090 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3091 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3092 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3093 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3094 if(!s->chroma_x_shift){//Chroma444
3095 s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3096 s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3097 s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3098 s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3106 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3107 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3108 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3113 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
3115 if(s->out_format == FMT_MPEG1) {
3116 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
3117 else MPV_decode_mb_internal(s, block, 0, 1);
3120 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
3121 else MPV_decode_mb_internal(s, block, 0, 0);
3124 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3126 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3127 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3128 s->first_field, s->low_delay);
3131 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3132 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3133 const int uvlinesize = s->current_picture.f->linesize[1];
3134 const int mb_size= 4 - s->avctx->lowres;
3136 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3137 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3138 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3139 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3140 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3141 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3142 //block_index is not used by mpeg2, so it is not affected by chroma_format
3144 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3145 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3146 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3148 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3150 if(s->picture_structure==PICT_FRAME){
3151 s->dest[0] += s->mb_y * linesize << mb_size;
3152 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3153 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3155 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3156 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3157 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3158 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3164 * Permute an 8x8 block.
3165 * @param block the block which will be permuted according to the given permutation vector
3166 * @param permutation the permutation vector
3167 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3168 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3169 * (inverse) permutated to scantable order!
3171 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3177 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3179 for(i=0; i<=last; i++){
3180 const int j= scantable[i];
3185 for(i=0; i<=last; i++){
3186 const int j= scantable[i];
3187 const int perm_j= permutation[j];
3188 block[perm_j]= temp[j];
3192 void ff_mpeg_flush(AVCodecContext *avctx){
3194 MpegEncContext *s = avctx->priv_data;
3196 if(s==NULL || s->picture==NULL)
3199 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3200 ff_mpeg_unref_picture(s, &s->picture[i]);
3201 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3203 ff_mpeg_unref_picture(s, &s->current_picture);
3204 ff_mpeg_unref_picture(s, &s->last_picture);
3205 ff_mpeg_unref_picture(s, &s->next_picture);
3207 s->mb_x= s->mb_y= 0;
3210 s->parse_context.state= -1;
3211 s->parse_context.frame_start_found= 0;
3212 s->parse_context.overread= 0;
3213 s->parse_context.overread_index= 0;
3214 s->parse_context.index= 0;
3215 s->parse_context.last_index= 0;
3216 s->bitstream_buffer_size=0;
3221 * set qscale and update qscale dependent variables.
3223 void ff_set_qscale(MpegEncContext * s, int qscale)
3227 else if (qscale > 31)
3231 s->chroma_qscale= s->chroma_qscale_table[qscale];
3233 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3234 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3237 void ff_MPV_report_decode_progress(MpegEncContext *s)
3239 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3240 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);