2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
37 #include "h264chroma.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
49 static const uint8_t ff_default_chroma_qscale_table[32] = {
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
55 const uint8_t ff_mpeg1_dc_scale_table[128] = {
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const uint8_t ff_alternate_horizontal_scan[64] = {
111 0, 1, 2, 3, 8, 9, 16, 17,
112 10, 11, 4, 5, 6, 7, 15, 14,
113 13, 12, 19, 18, 24, 25, 32, 33,
114 26, 27, 20, 21, 22, 23, 28, 29,
115 30, 31, 34, 35, 40, 41, 48, 49,
116 42, 43, 36, 37, 38, 39, 44, 45,
117 46, 47, 50, 51, 56, 57, 58, 59,
118 52, 53, 54, 55, 60, 61, 62, 63,
121 const uint8_t ff_alternate_vertical_scan[64] = {
122 0, 8, 16, 24, 1, 9, 2, 10,
123 17, 25, 32, 40, 48, 56, 57, 49,
124 41, 33, 26, 18, 3, 11, 4, 12,
125 19, 27, 34, 42, 50, 58, 35, 43,
126 51, 59, 20, 28, 5, 13, 6, 14,
127 21, 29, 36, 44, 52, 60, 37, 45,
128 53, 61, 22, 30, 7, 15, 23, 31,
129 38, 46, 54, 62, 39, 47, 55, 63,
132 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133 int16_t *block, int n, int qscale)
135 int i, level, nCoeffs;
136 const uint16_t *quant_matrix;
138 nCoeffs= s->block_last_index[n];
140 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
141 /* XXX: only mpeg1 */
142 quant_matrix = s->intra_matrix;
143 for(i=1;i<=nCoeffs;i++) {
144 int j= s->intra_scantable.permutated[i];
149 level = (int)(level * qscale * quant_matrix[j]) >> 3;
150 level = (level - 1) | 1;
153 level = (int)(level * qscale * quant_matrix[j]) >> 3;
154 level = (level - 1) | 1;
161 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
162 int16_t *block, int n, int qscale)
164 int i, level, nCoeffs;
165 const uint16_t *quant_matrix;
167 nCoeffs= s->block_last_index[n];
169 quant_matrix = s->inter_matrix;
170 for(i=0; i<=nCoeffs; i++) {
171 int j= s->intra_scantable.permutated[i];
176 level = (((level << 1) + 1) * qscale *
177 ((int) (quant_matrix[j]))) >> 4;
178 level = (level - 1) | 1;
181 level = (((level << 1) + 1) * qscale *
182 ((int) (quant_matrix[j]))) >> 4;
183 level = (level - 1) | 1;
190 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
191 int16_t *block, int n, int qscale)
193 int i, level, nCoeffs;
194 const uint16_t *quant_matrix;
196 if(s->alternate_scan) nCoeffs= 63;
197 else nCoeffs= s->block_last_index[n];
199 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
200 quant_matrix = s->intra_matrix;
201 for(i=1;i<=nCoeffs;i++) {
202 int j= s->intra_scantable.permutated[i];
207 level = (int)(level * qscale * quant_matrix[j]) >> 3;
210 level = (int)(level * qscale * quant_matrix[j]) >> 3;
217 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
218 int16_t *block, int n, int qscale)
220 int i, level, nCoeffs;
221 const uint16_t *quant_matrix;
224 if(s->alternate_scan) nCoeffs= 63;
225 else nCoeffs= s->block_last_index[n];
227 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
229 quant_matrix = s->intra_matrix;
230 for(i=1;i<=nCoeffs;i++) {
231 int j= s->intra_scantable.permutated[i];
236 level = (int)(level * qscale * quant_matrix[j]) >> 3;
239 level = (int)(level * qscale * quant_matrix[j]) >> 3;
248 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
249 int16_t *block, int n, int qscale)
251 int i, level, nCoeffs;
252 const uint16_t *quant_matrix;
255 if(s->alternate_scan) nCoeffs= 63;
256 else nCoeffs= s->block_last_index[n];
258 quant_matrix = s->inter_matrix;
259 for(i=0; i<=nCoeffs; i++) {
260 int j= s->intra_scantable.permutated[i];
265 level = (((level << 1) + 1) * qscale *
266 ((int) (quant_matrix[j]))) >> 4;
269 level = (((level << 1) + 1) * qscale *
270 ((int) (quant_matrix[j]))) >> 4;
279 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
280 int16_t *block, int n, int qscale)
282 int i, level, qmul, qadd;
285 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
290 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
291 qadd = (qscale - 1) | 1;
298 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
300 for(i=1; i<=nCoeffs; i++) {
304 level = level * qmul - qadd;
306 level = level * qmul + qadd;
313 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
314 int16_t *block, int n, int qscale)
316 int i, level, qmul, qadd;
319 av_assert2(s->block_last_index[n]>=0);
321 qadd = (qscale - 1) | 1;
324 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
326 for(i=0; i<=nCoeffs; i++) {
330 level = level * qmul - qadd;
332 level = level * qmul + qadd;
339 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
341 int mb_x, int mb_y, int mb_intra, int mb_skipped)
343 MpegEncContext *s = opaque;
346 s->mv_type = mv_type;
347 s->mb_intra = mb_intra;
348 s->mb_skipped = mb_skipped;
351 memcpy(s->mv, mv, sizeof(*mv));
353 ff_init_block_index(s);
354 ff_update_block_index(s);
356 s->bdsp.clear_blocks(s->block[0]);
358 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
359 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
360 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
363 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
364 ff_MPV_decode_mb(s, s->block);
367 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
370 memset(dst + h*linesize, 128, 16);
373 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
376 memset(dst + h*linesize, 128, 8);
379 /* init common dct for both encoder and decoder */
380 av_cold int ff_dct_common_init(MpegEncContext *s)
382 ff_blockdsp_init(&s->bdsp, s->avctx);
383 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
384 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
385 ff_idctdsp_init(&s->idsp, s->avctx);
386 ff_me_cmp_init(&s->mecc, s->avctx);
387 ff_mpegvideodsp_init(&s->mdsp);
388 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
390 if (s->avctx->debug & FF_DEBUG_NOMC) {
392 for (i=0; i<4; i++) {
393 s->hdsp.avg_pixels_tab[0][i] = gray16;
394 s->hdsp.put_pixels_tab[0][i] = gray16;
395 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
397 s->hdsp.avg_pixels_tab[1][i] = gray8;
398 s->hdsp.put_pixels_tab[1][i] = gray8;
399 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
403 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
404 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
405 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
406 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
407 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
408 if (s->flags & CODEC_FLAG_BITEXACT)
409 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
410 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
413 ff_MPV_common_init_axp(s);
415 ff_MPV_common_init_arm(s);
417 ff_MPV_common_init_ppc(s);
419 ff_MPV_common_init_x86(s);
421 /* load & permutate scantables
422 * note: only wmv uses different ones
424 if (s->alternate_scan) {
425 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
426 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
428 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
429 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
431 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
432 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
437 static int frame_size_alloc(MpegEncContext *s, int linesize)
439 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
441 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
445 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
446 return AVERROR_PATCHWELCOME;
449 // edge emu needs blocksize + filter length - 1
450 // (= 17x17 for halfpel / 21x21 for h264)
451 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
452 // at uvlinesize. It supports only YUV420 so 24x24 is enough
453 // linesize * interlaced * MBsize
454 // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
455 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 68,
458 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
460 s->me.temp = s->me.scratchpad;
461 s->rd_scratchpad = s->me.scratchpad;
462 s->b_scratchpad = s->me.scratchpad;
463 s->obmc_scratchpad = s->me.scratchpad + 16;
467 av_freep(&s->edge_emu_buffer);
468 return AVERROR(ENOMEM);
472 * Allocate a frame buffer
474 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
476 int edges_needed = av_codec_is_encoder(s->avctx->codec);
480 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
481 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
482 s->codec_id != AV_CODEC_ID_MSS2) {
484 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
485 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
488 r = ff_thread_get_buffer(s->avctx, &pic->tf,
489 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
491 pic->f->width = s->avctx->width;
492 pic->f->height = s->avctx->height;
493 pic->f->format = s->avctx->pix_fmt;
494 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
497 if (r < 0 || !pic->f->buf[0]) {
498 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
505 for (i = 0; pic->f->data[i]; i++) {
506 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
507 pic->f->linesize[i] +
508 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
509 pic->f->data[i] += offset;
511 pic->f->width = s->avctx->width;
512 pic->f->height = s->avctx->height;
515 if (s->avctx->hwaccel) {
516 assert(!pic->hwaccel_picture_private);
517 if (s->avctx->hwaccel->frame_priv_data_size) {
518 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
519 if (!pic->hwaccel_priv_buf) {
520 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
523 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
527 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
528 s->uvlinesize != pic->f->linesize[1])) {
529 av_log(s->avctx, AV_LOG_ERROR,
530 "get_buffer() failed (stride changed)\n");
531 ff_mpeg_unref_picture(s, pic);
535 if (pic->f->linesize[1] != pic->f->linesize[2]) {
536 av_log(s->avctx, AV_LOG_ERROR,
537 "get_buffer() failed (uv stride mismatch)\n");
538 ff_mpeg_unref_picture(s, pic);
542 if (!s->edge_emu_buffer &&
543 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
544 av_log(s->avctx, AV_LOG_ERROR,
545 "get_buffer() failed to allocate context scratch buffers.\n");
546 ff_mpeg_unref_picture(s, pic);
553 void ff_free_picture_tables(Picture *pic)
557 pic->alloc_mb_width =
558 pic->alloc_mb_height = 0;
560 av_buffer_unref(&pic->mb_var_buf);
561 av_buffer_unref(&pic->mc_mb_var_buf);
562 av_buffer_unref(&pic->mb_mean_buf);
563 av_buffer_unref(&pic->mbskip_table_buf);
564 av_buffer_unref(&pic->qscale_table_buf);
565 av_buffer_unref(&pic->mb_type_buf);
567 for (i = 0; i < 2; i++) {
568 av_buffer_unref(&pic->motion_val_buf[i]);
569 av_buffer_unref(&pic->ref_index_buf[i]);
573 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
575 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
576 const int mb_array_size = s->mb_stride * s->mb_height;
577 const int b8_array_size = s->b8_stride * s->mb_height * 2;
581 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
582 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
583 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
585 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
586 return AVERROR(ENOMEM);
589 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
590 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
591 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
592 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
593 return AVERROR(ENOMEM);
596 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
597 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
598 int ref_index_size = 4 * mb_array_size;
600 for (i = 0; mv_size && i < 2; i++) {
601 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
602 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
603 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
604 return AVERROR(ENOMEM);
608 pic->alloc_mb_width = s->mb_width;
609 pic->alloc_mb_height = s->mb_height;
614 static int make_tables_writable(Picture *pic)
617 #define MAKE_WRITABLE(table) \
620 (ret = av_buffer_make_writable(&pic->table)) < 0)\
624 MAKE_WRITABLE(mb_var_buf);
625 MAKE_WRITABLE(mc_mb_var_buf);
626 MAKE_WRITABLE(mb_mean_buf);
627 MAKE_WRITABLE(mbskip_table_buf);
628 MAKE_WRITABLE(qscale_table_buf);
629 MAKE_WRITABLE(mb_type_buf);
631 for (i = 0; i < 2; i++) {
632 MAKE_WRITABLE(motion_val_buf[i]);
633 MAKE_WRITABLE(ref_index_buf[i]);
640 * Allocate a Picture.
641 * The pixels are allocated/set by calling get_buffer() if shared = 0
643 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
647 if (pic->qscale_table_buf)
648 if ( pic->alloc_mb_width != s->mb_width
649 || pic->alloc_mb_height != s->mb_height)
650 ff_free_picture_tables(pic);
653 av_assert0(pic->f->data[0]);
656 av_assert0(!pic->f->buf[0]);
658 if (alloc_frame_buffer(s, pic) < 0)
661 s->linesize = pic->f->linesize[0];
662 s->uvlinesize = pic->f->linesize[1];
665 if (!pic->qscale_table_buf)
666 ret = alloc_picture_tables(s, pic);
668 ret = make_tables_writable(pic);
673 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
674 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
675 pic->mb_mean = pic->mb_mean_buf->data;
678 pic->mbskip_table = pic->mbskip_table_buf->data;
679 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
680 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
682 if (pic->motion_val_buf[0]) {
683 for (i = 0; i < 2; i++) {
684 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
685 pic->ref_index[i] = pic->ref_index_buf[i]->data;
691 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
692 ff_mpeg_unref_picture(s, pic);
693 ff_free_picture_tables(pic);
694 return AVERROR(ENOMEM);
698 * Deallocate a picture.
700 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
702 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
705 /* WM Image / Screen codecs allocate internal buffers with different
706 * dimensions / colorspaces; ignore user-defined callbacks for these. */
707 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
708 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
709 s->codec_id != AV_CODEC_ID_MSS2)
710 ff_thread_release_buffer(s->avctx, &pic->tf);
712 av_frame_unref(pic->f);
714 av_buffer_unref(&pic->hwaccel_priv_buf);
716 if (pic->needs_realloc)
717 ff_free_picture_tables(pic);
719 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
722 static int update_picture_tables(Picture *dst, Picture *src)
726 #define UPDATE_TABLE(table)\
729 (!dst->table || dst->table->buffer != src->table->buffer)) {\
730 av_buffer_unref(&dst->table);\
731 dst->table = av_buffer_ref(src->table);\
733 ff_free_picture_tables(dst);\
734 return AVERROR(ENOMEM);\
739 UPDATE_TABLE(mb_var_buf);
740 UPDATE_TABLE(mc_mb_var_buf);
741 UPDATE_TABLE(mb_mean_buf);
742 UPDATE_TABLE(mbskip_table_buf);
743 UPDATE_TABLE(qscale_table_buf);
744 UPDATE_TABLE(mb_type_buf);
745 for (i = 0; i < 2; i++) {
746 UPDATE_TABLE(motion_val_buf[i]);
747 UPDATE_TABLE(ref_index_buf[i]);
750 dst->mb_var = src->mb_var;
751 dst->mc_mb_var = src->mc_mb_var;
752 dst->mb_mean = src->mb_mean;
753 dst->mbskip_table = src->mbskip_table;
754 dst->qscale_table = src->qscale_table;
755 dst->mb_type = src->mb_type;
756 for (i = 0; i < 2; i++) {
757 dst->motion_val[i] = src->motion_val[i];
758 dst->ref_index[i] = src->ref_index[i];
761 dst->alloc_mb_width = src->alloc_mb_width;
762 dst->alloc_mb_height = src->alloc_mb_height;
767 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
771 av_assert0(!dst->f->buf[0]);
772 av_assert0(src->f->buf[0]);
776 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
780 ret = update_picture_tables(dst, src);
784 if (src->hwaccel_picture_private) {
785 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
786 if (!dst->hwaccel_priv_buf)
788 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
791 dst->field_picture = src->field_picture;
792 dst->mb_var_sum = src->mb_var_sum;
793 dst->mc_mb_var_sum = src->mc_mb_var_sum;
794 dst->b_frame_score = src->b_frame_score;
795 dst->needs_realloc = src->needs_realloc;
796 dst->reference = src->reference;
797 dst->shared = src->shared;
801 ff_mpeg_unref_picture(s, dst);
805 static void exchange_uv(MpegEncContext *s)
810 s->pblocks[4] = s->pblocks[5];
814 static int init_duplicate_context(MpegEncContext *s)
816 int y_size = s->b8_stride * (2 * s->mb_height + 1);
817 int c_size = s->mb_stride * (s->mb_height + 1);
818 int yc_size = y_size + 2 * c_size;
821 if (s->mb_height & 1)
822 yc_size += 2*s->b8_stride + 2*s->mb_stride;
829 s->obmc_scratchpad = NULL;
832 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
833 ME_MAP_SIZE * sizeof(uint32_t), fail)
834 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
835 ME_MAP_SIZE * sizeof(uint32_t), fail)
836 if (s->avctx->noise_reduction) {
837 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
838 2 * 64 * sizeof(int), fail)
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
842 s->block = s->blocks[0];
844 for (i = 0; i < 12; i++) {
845 s->pblocks[i] = &s->block[i];
847 if (s->avctx->codec_tag == AV_RL32("VCR2"))
850 if (s->out_format == FMT_H263) {
852 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
853 yc_size * sizeof(int16_t) * 16, fail);
854 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
855 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
856 s->ac_val[2] = s->ac_val[1] + c_size;
861 return -1; // free() through ff_MPV_common_end()
864 static void free_duplicate_context(MpegEncContext *s)
869 av_freep(&s->edge_emu_buffer);
870 av_freep(&s->me.scratchpad);
874 s->obmc_scratchpad = NULL;
876 av_freep(&s->dct_error_sum);
877 av_freep(&s->me.map);
878 av_freep(&s->me.score_map);
879 av_freep(&s->blocks);
880 av_freep(&s->ac_val_base);
884 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
886 #define COPY(a) bak->a = src->a
887 COPY(edge_emu_buffer);
892 COPY(obmc_scratchpad);
899 COPY(me.map_generation);
911 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
915 // FIXME copy only needed parts
917 backup_duplicate_context(&bak, dst);
918 memcpy(dst, src, sizeof(MpegEncContext));
919 backup_duplicate_context(dst, &bak);
920 for (i = 0; i < 12; i++) {
921 dst->pblocks[i] = &dst->block[i];
923 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
925 if (!dst->edge_emu_buffer &&
926 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
927 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
928 "scratch buffers.\n");
931 // STOP_TIMER("update_duplicate_context")
932 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
936 int ff_mpeg_update_thread_context(AVCodecContext *dst,
937 const AVCodecContext *src)
940 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
947 // FIXME can parameters change on I-frames?
948 // in that case dst may need a reinit
949 if (!s->context_initialized) {
950 memcpy(s, s1, sizeof(MpegEncContext));
953 s->bitstream_buffer = NULL;
954 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
956 if (s1->context_initialized){
957 // s->picture_range_start += MAX_PICTURE_COUNT;
958 // s->picture_range_end += MAX_PICTURE_COUNT;
959 if((ret = ff_MPV_common_init(s)) < 0){
960 memset(s, 0, sizeof(MpegEncContext));
967 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
968 s->context_reinit = 0;
969 s->height = s1->height;
970 s->width = s1->width;
971 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
975 s->avctx->coded_height = s1->avctx->coded_height;
976 s->avctx->coded_width = s1->avctx->coded_width;
977 s->avctx->width = s1->avctx->width;
978 s->avctx->height = s1->avctx->height;
980 s->coded_picture_number = s1->coded_picture_number;
981 s->picture_number = s1->picture_number;
983 av_assert0(!s->picture || s->picture != s1->picture);
985 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
986 ff_mpeg_unref_picture(s, &s->picture[i]);
987 if (s1->picture[i].f->buf[0] &&
988 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
992 #define UPDATE_PICTURE(pic)\
994 ff_mpeg_unref_picture(s, &s->pic);\
995 if (s1->pic.f && s1->pic.f->buf[0])\
996 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
998 ret = update_picture_tables(&s->pic, &s1->pic);\
1003 UPDATE_PICTURE(current_picture);
1004 UPDATE_PICTURE(last_picture);
1005 UPDATE_PICTURE(next_picture);
1007 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1008 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1009 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1011 // Error/bug resilience
1012 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1013 s->workaround_bugs = s1->workaround_bugs;
1014 s->padding_bug_score = s1->padding_bug_score;
1016 // MPEG4 timing info
1017 memcpy(&s->last_time_base, &s1->last_time_base,
1018 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1019 (char *) &s1->last_time_base);
1022 s->max_b_frames = s1->max_b_frames;
1023 s->low_delay = s1->low_delay;
1024 s->droppable = s1->droppable;
1026 // DivX handling (doesn't work)
1027 s->divx_packed = s1->divx_packed;
1029 if (s1->bitstream_buffer) {
1030 if (s1->bitstream_buffer_size +
1031 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1032 av_fast_malloc(&s->bitstream_buffer,
1033 &s->allocated_bitstream_buffer_size,
1034 s1->allocated_bitstream_buffer_size);
1035 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1036 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1037 s1->bitstream_buffer_size);
1038 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1039 FF_INPUT_BUFFER_PADDING_SIZE);
1042 // linesize dependend scratch buffer allocation
1043 if (!s->edge_emu_buffer)
1045 if (frame_size_alloc(s, s1->linesize) < 0) {
1046 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1047 "scratch buffers.\n");
1048 return AVERROR(ENOMEM);
1051 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1052 "be allocated due to unknown size.\n");
1055 // MPEG2/interlacing info
1056 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1057 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1059 if (!s1->first_field) {
1060 s->last_pict_type = s1->pict_type;
1061 if (s1->current_picture_ptr)
1062 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1069 * Set the given MpegEncContext to common defaults
1070 * (same for encoding and decoding).
1071 * The changed fields will not depend upon the
1072 * prior state of the MpegEncContext.
1074 void ff_MPV_common_defaults(MpegEncContext *s)
1076 s->y_dc_scale_table =
1077 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1078 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1079 s->progressive_frame = 1;
1080 s->progressive_sequence = 1;
1081 s->picture_structure = PICT_FRAME;
1083 s->coded_picture_number = 0;
1084 s->picture_number = 0;
1089 s->slice_context_count = 1;
1093 * Set the given MpegEncContext to defaults for decoding.
1094 * the changed fields will not depend upon
1095 * the prior state of the MpegEncContext.
1097 void ff_MPV_decode_defaults(MpegEncContext *s)
1099 ff_MPV_common_defaults(s);
1102 static int init_er(MpegEncContext *s)
1104 ERContext *er = &s->er;
1105 int mb_array_size = s->mb_height * s->mb_stride;
1108 er->avctx = s->avctx;
1109 er->mecc = &s->mecc;
1111 er->mb_index2xy = s->mb_index2xy;
1112 er->mb_num = s->mb_num;
1113 er->mb_width = s->mb_width;
1114 er->mb_height = s->mb_height;
1115 er->mb_stride = s->mb_stride;
1116 er->b8_stride = s->b8_stride;
1118 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1119 er->error_status_table = av_mallocz(mb_array_size);
1120 if (!er->er_temp_buffer || !er->error_status_table)
1123 er->mbskip_table = s->mbskip_table;
1124 er->mbintra_table = s->mbintra_table;
1126 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1127 er->dc_val[i] = s->dc_val[i];
1129 er->decode_mb = mpeg_er_decode_mb;
1134 av_freep(&er->er_temp_buffer);
1135 av_freep(&er->error_status_table);
1136 return AVERROR(ENOMEM);
1140 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1142 static int init_context_frame(MpegEncContext *s)
1144 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1146 s->mb_width = (s->width + 15) / 16;
1147 s->mb_stride = s->mb_width + 1;
1148 s->b8_stride = s->mb_width * 2 + 1;
1149 mb_array_size = s->mb_height * s->mb_stride;
1150 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1152 /* set default edge pos, will be overridden
1153 * in decode_header if needed */
1154 s->h_edge_pos = s->mb_width * 16;
1155 s->v_edge_pos = s->mb_height * 16;
1157 s->mb_num = s->mb_width * s->mb_height;
1162 s->block_wrap[3] = s->b8_stride;
1164 s->block_wrap[5] = s->mb_stride;
1166 y_size = s->b8_stride * (2 * s->mb_height + 1);
1167 c_size = s->mb_stride * (s->mb_height + 1);
1168 yc_size = y_size + 2 * c_size;
1170 if (s->mb_height & 1)
1171 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1173 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1174 for (y = 0; y < s->mb_height; y++)
1175 for (x = 0; x < s->mb_width; x++)
1176 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1178 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1181 /* Allocate MV tables */
1182 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1183 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1184 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1185 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1186 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1187 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1188 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1189 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1190 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1191 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1192 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1193 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1195 /* Allocate MB type table */
1196 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1198 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1200 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1201 mb_array_size * sizeof(float), fail);
1202 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1203 mb_array_size * sizeof(float), fail);
1207 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1208 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1209 /* interlaced direct mode decoding tables */
1210 for (i = 0; i < 2; i++) {
1212 for (j = 0; j < 2; j++) {
1213 for (k = 0; k < 2; k++) {
1214 FF_ALLOCZ_OR_GOTO(s->avctx,
1215 s->b_field_mv_table_base[i][j][k],
1216 mv_table_size * 2 * sizeof(int16_t),
1218 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1221 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1222 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1223 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1225 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1228 if (s->out_format == FMT_H263) {
1230 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1231 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1233 /* cbp, ac_pred, pred_dir */
1234 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1235 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1238 if (s->h263_pred || s->h263_plus || !s->encoding) {
1240 // MN: we need these for error resilience of intra-frames
1241 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1242 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1243 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1244 s->dc_val[2] = s->dc_val[1] + c_size;
1245 for (i = 0; i < yc_size; i++)
1246 s->dc_val_base[i] = 1024;
1249 /* which mb is a intra block */
1250 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1251 memset(s->mbintra_table, 1, mb_array_size);
1253 /* init macroblock skip table */
1254 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1255 // Note the + 1 is for a quicker mpeg4 slice_end detection
1259 return AVERROR(ENOMEM);
1263 * init common structure for both encoder and decoder.
1264 * this assumes that some variables like width/height are already set
1266 av_cold int ff_MPV_common_init(MpegEncContext *s)
1269 int nb_slices = (HAVE_THREADS &&
1270 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1271 s->avctx->thread_count : 1;
1273 if (s->encoding && s->avctx->slices)
1274 nb_slices = s->avctx->slices;
1276 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1277 s->mb_height = (s->height + 31) / 32 * 2;
1279 s->mb_height = (s->height + 15) / 16;
1281 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1282 av_log(s->avctx, AV_LOG_ERROR,
1283 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1287 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1290 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1292 max_slices = MAX_THREADS;
1293 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1294 " reducing to %d\n", nb_slices, max_slices);
1295 nb_slices = max_slices;
1298 if ((s->width || s->height) &&
1299 av_image_check_size(s->width, s->height, 0, s->avctx))
1302 ff_dct_common_init(s);
1304 s->flags = s->avctx->flags;
1305 s->flags2 = s->avctx->flags2;
1307 /* set chroma shifts */
1308 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1310 &s->chroma_y_shift);
1312 /* convert fourcc to upper case */
1313 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1315 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1317 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1318 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1319 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1320 s->picture[i].f = av_frame_alloc();
1321 if (!s->picture[i].f)
1324 memset(&s->next_picture, 0, sizeof(s->next_picture));
1325 memset(&s->last_picture, 0, sizeof(s->last_picture));
1326 memset(&s->current_picture, 0, sizeof(s->current_picture));
1327 memset(&s->new_picture, 0, sizeof(s->new_picture));
1328 s->next_picture.f = av_frame_alloc();
1329 if (!s->next_picture.f)
1331 s->last_picture.f = av_frame_alloc();
1332 if (!s->last_picture.f)
1334 s->current_picture.f = av_frame_alloc();
1335 if (!s->current_picture.f)
1337 s->new_picture.f = av_frame_alloc();
1338 if (!s->new_picture.f)
1341 if (init_context_frame(s))
1344 s->parse_context.state = -1;
1346 s->context_initialized = 1;
1347 s->thread_context[0] = s;
1349 // if (s->width && s->height) {
1350 if (nb_slices > 1) {
1351 for (i = 1; i < nb_slices; i++) {
1352 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1353 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1356 for (i = 0; i < nb_slices; i++) {
1357 if (init_duplicate_context(s->thread_context[i]) < 0)
1359 s->thread_context[i]->start_mb_y =
1360 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1361 s->thread_context[i]->end_mb_y =
1362 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1365 if (init_duplicate_context(s) < 0)
1368 s->end_mb_y = s->mb_height;
1370 s->slice_context_count = nb_slices;
1375 ff_MPV_common_end(s);
1380 * Frees and resets MpegEncContext fields depending on the resolution.
1381 * Is used during resolution changes to avoid a full reinitialization of the
1384 static int free_context_frame(MpegEncContext *s)
1388 av_freep(&s->mb_type);
1389 av_freep(&s->p_mv_table_base);
1390 av_freep(&s->b_forw_mv_table_base);
1391 av_freep(&s->b_back_mv_table_base);
1392 av_freep(&s->b_bidir_forw_mv_table_base);
1393 av_freep(&s->b_bidir_back_mv_table_base);
1394 av_freep(&s->b_direct_mv_table_base);
1395 s->p_mv_table = NULL;
1396 s->b_forw_mv_table = NULL;
1397 s->b_back_mv_table = NULL;
1398 s->b_bidir_forw_mv_table = NULL;
1399 s->b_bidir_back_mv_table = NULL;
1400 s->b_direct_mv_table = NULL;
1401 for (i = 0; i < 2; i++) {
1402 for (j = 0; j < 2; j++) {
1403 for (k = 0; k < 2; k++) {
1404 av_freep(&s->b_field_mv_table_base[i][j][k]);
1405 s->b_field_mv_table[i][j][k] = NULL;
1407 av_freep(&s->b_field_select_table[i][j]);
1408 av_freep(&s->p_field_mv_table_base[i][j]);
1409 s->p_field_mv_table[i][j] = NULL;
1411 av_freep(&s->p_field_select_table[i]);
1414 av_freep(&s->dc_val_base);
1415 av_freep(&s->coded_block_base);
1416 av_freep(&s->mbintra_table);
1417 av_freep(&s->cbp_table);
1418 av_freep(&s->pred_dir_table);
1420 av_freep(&s->mbskip_table);
1422 av_freep(&s->er.error_status_table);
1423 av_freep(&s->er.er_temp_buffer);
1424 av_freep(&s->mb_index2xy);
1425 av_freep(&s->lambda_table);
1427 av_freep(&s->cplx_tab);
1428 av_freep(&s->bits_tab);
1430 s->linesize = s->uvlinesize = 0;
1435 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1439 if (s->slice_context_count > 1) {
1440 for (i = 0; i < s->slice_context_count; i++) {
1441 free_duplicate_context(s->thread_context[i]);
1443 for (i = 1; i < s->slice_context_count; i++) {
1444 av_freep(&s->thread_context[i]);
1447 free_duplicate_context(s);
1449 if ((err = free_context_frame(s)) < 0)
1453 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1454 s->picture[i].needs_realloc = 1;
1457 s->last_picture_ptr =
1458 s->next_picture_ptr =
1459 s->current_picture_ptr = NULL;
1462 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1463 s->mb_height = (s->height + 31) / 32 * 2;
1465 s->mb_height = (s->height + 15) / 16;
1467 if ((s->width || s->height) &&
1468 av_image_check_size(s->width, s->height, 0, s->avctx))
1469 return AVERROR_INVALIDDATA;
1471 if ((err = init_context_frame(s)))
1474 s->thread_context[0] = s;
1476 if (s->width && s->height) {
1477 int nb_slices = s->slice_context_count;
1478 if (nb_slices > 1) {
1479 for (i = 1; i < nb_slices; i++) {
1480 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1481 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1484 for (i = 0; i < nb_slices; i++) {
1485 if (init_duplicate_context(s->thread_context[i]) < 0)
1487 s->thread_context[i]->start_mb_y =
1488 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1489 s->thread_context[i]->end_mb_y =
1490 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1493 err = init_duplicate_context(s);
1497 s->end_mb_y = s->mb_height;
1499 s->slice_context_count = nb_slices;
1504 ff_MPV_common_end(s);
1508 /* init common structure for both encoder and decoder */
1509 void ff_MPV_common_end(MpegEncContext *s)
1513 if (s->slice_context_count > 1) {
1514 for (i = 0; i < s->slice_context_count; i++) {
1515 free_duplicate_context(s->thread_context[i]);
1517 for (i = 1; i < s->slice_context_count; i++) {
1518 av_freep(&s->thread_context[i]);
1520 s->slice_context_count = 1;
1521 } else free_duplicate_context(s);
1523 av_freep(&s->parse_context.buffer);
1524 s->parse_context.buffer_size = 0;
1526 av_freep(&s->bitstream_buffer);
1527 s->allocated_bitstream_buffer_size = 0;
1530 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1531 ff_free_picture_tables(&s->picture[i]);
1532 ff_mpeg_unref_picture(s, &s->picture[i]);
1533 av_frame_free(&s->picture[i].f);
1536 av_freep(&s->picture);
1537 ff_free_picture_tables(&s->last_picture);
1538 ff_mpeg_unref_picture(s, &s->last_picture);
1539 av_frame_free(&s->last_picture.f);
1540 ff_free_picture_tables(&s->current_picture);
1541 ff_mpeg_unref_picture(s, &s->current_picture);
1542 av_frame_free(&s->current_picture.f);
1543 ff_free_picture_tables(&s->next_picture);
1544 ff_mpeg_unref_picture(s, &s->next_picture);
1545 av_frame_free(&s->next_picture.f);
1546 ff_free_picture_tables(&s->new_picture);
1547 ff_mpeg_unref_picture(s, &s->new_picture);
1548 av_frame_free(&s->new_picture.f);
1550 free_context_frame(s);
1552 s->context_initialized = 0;
1553 s->last_picture_ptr =
1554 s->next_picture_ptr =
1555 s->current_picture_ptr = NULL;
1556 s->linesize = s->uvlinesize = 0;
1559 av_cold void ff_init_rl(RLTable *rl,
1560 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1562 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1563 uint8_t index_run[MAX_RUN + 1];
1564 int last, run, level, start, end, i;
1566 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1567 if (static_store && rl->max_level[0])
1570 /* compute max_level[], max_run[] and index_run[] */
1571 for (last = 0; last < 2; last++) {
1580 memset(max_level, 0, MAX_RUN + 1);
1581 memset(max_run, 0, MAX_LEVEL + 1);
1582 memset(index_run, rl->n, MAX_RUN + 1);
1583 for (i = start; i < end; i++) {
1584 run = rl->table_run[i];
1585 level = rl->table_level[i];
1586 if (index_run[run] == rl->n)
1588 if (level > max_level[run])
1589 max_level[run] = level;
1590 if (run > max_run[level])
1591 max_run[level] = run;
1594 rl->max_level[last] = static_store[last];
1596 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1597 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1599 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1601 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1602 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1604 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1606 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1607 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1611 av_cold void ff_init_vlc_rl(RLTable *rl)
1615 for (q = 0; q < 32; q++) {
1617 int qadd = (q - 1) | 1;
1623 for (i = 0; i < rl->vlc.table_size; i++) {
1624 int code = rl->vlc.table[i][0];
1625 int len = rl->vlc.table[i][1];
1628 if (len == 0) { // illegal code
1631 } else if (len < 0) { // more bits needed
1635 if (code == rl->n) { // esc
1639 run = rl->table_run[code] + 1;
1640 level = rl->table_level[code] * qmul + qadd;
1641 if (code >= rl->last) run += 192;
1644 rl->rl_vlc[q][i].len = len;
1645 rl->rl_vlc[q][i].level = level;
1646 rl->rl_vlc[q][i].run = run;
1651 static void release_unused_pictures(MpegEncContext *s)
1655 /* release non reference frames */
1656 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1657 if (!s->picture[i].reference)
1658 ff_mpeg_unref_picture(s, &s->picture[i]);
1662 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1664 if (pic == s->last_picture_ptr)
1666 if (pic->f->buf[0] == NULL)
1668 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1673 static int find_unused_picture(MpegEncContext *s, int shared)
1678 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1679 if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1683 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1684 if (pic_is_unused(s, &s->picture[i]))
1689 av_log(s->avctx, AV_LOG_FATAL,
1690 "Internal error, picture buffer overflow\n");
1691 /* We could return -1, but the codec would crash trying to draw into a
1692 * non-existing frame anyway. This is safer than waiting for a random crash.
1693 * Also the return of this is never useful, an encoder must only allocate
1694 * as much as allowed in the specification. This has no relationship to how
1695 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1696 * enough for such valid streams).
1697 * Plus, a decoder has to check stream validity and remove frames if too
1698 * many reference frames are around. Waiting for "OOM" is not correct at
1699 * all. Similarly, missing reference frames have to be replaced by
1700 * interpolated/MC frames, anything else is a bug in the codec ...
1706 int ff_find_unused_picture(MpegEncContext *s, int shared)
1708 int ret = find_unused_picture(s, shared);
1710 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1711 if (s->picture[ret].needs_realloc) {
1712 s->picture[ret].needs_realloc = 0;
1713 ff_free_picture_tables(&s->picture[ret]);
1714 ff_mpeg_unref_picture(s, &s->picture[ret]);
1720 static void gray_frame(AVFrame *frame)
1722 int i, h_chroma_shift, v_chroma_shift;
1724 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1726 for(i=0; i<frame->height; i++)
1727 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1728 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1729 memset(frame->data[1] + frame->linesize[1]*i,
1730 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1731 memset(frame->data[2] + frame->linesize[2]*i,
1732 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1737 * generic function called after decoding
1738 * the header and before a frame is decoded.
1740 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1746 if (!ff_thread_can_start_frame(avctx)) {
1747 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1751 /* mark & release old frames */
1752 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1753 s->last_picture_ptr != s->next_picture_ptr &&
1754 s->last_picture_ptr->f->buf[0]) {
1755 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1758 /* release forgotten pictures */
1759 /* if (mpeg124/h263) */
1760 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1761 if (&s->picture[i] != s->last_picture_ptr &&
1762 &s->picture[i] != s->next_picture_ptr &&
1763 s->picture[i].reference && !s->picture[i].needs_realloc) {
1764 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1765 av_log(avctx, AV_LOG_ERROR,
1766 "releasing zombie picture\n");
1767 ff_mpeg_unref_picture(s, &s->picture[i]);
1771 ff_mpeg_unref_picture(s, &s->current_picture);
1773 release_unused_pictures(s);
1775 if (s->current_picture_ptr &&
1776 s->current_picture_ptr->f->buf[0] == NULL) {
1777 // we already have a unused image
1778 // (maybe it was set before reading the header)
1779 pic = s->current_picture_ptr;
1781 i = ff_find_unused_picture(s, 0);
1783 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1786 pic = &s->picture[i];
1790 if (!s->droppable) {
1791 if (s->pict_type != AV_PICTURE_TYPE_B)
1795 pic->f->coded_picture_number = s->coded_picture_number++;
1797 if (ff_alloc_picture(s, pic, 0) < 0)
1800 s->current_picture_ptr = pic;
1801 // FIXME use only the vars from current_pic
1802 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1803 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1804 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1805 if (s->picture_structure != PICT_FRAME)
1806 s->current_picture_ptr->f->top_field_first =
1807 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1809 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1810 !s->progressive_sequence;
1811 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1813 s->current_picture_ptr->f->pict_type = s->pict_type;
1814 // if (s->flags && CODEC_FLAG_QSCALE)
1815 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1816 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1818 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1819 s->current_picture_ptr)) < 0)
1822 if (s->pict_type != AV_PICTURE_TYPE_B) {
1823 s->last_picture_ptr = s->next_picture_ptr;
1825 s->next_picture_ptr = s->current_picture_ptr;
1827 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1828 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1829 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1830 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1831 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1832 s->pict_type, s->droppable);
1834 if ((s->last_picture_ptr == NULL ||
1835 s->last_picture_ptr->f->buf[0] == NULL) &&
1836 (s->pict_type != AV_PICTURE_TYPE_I ||
1837 s->picture_structure != PICT_FRAME)) {
1838 int h_chroma_shift, v_chroma_shift;
1839 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1840 &h_chroma_shift, &v_chroma_shift);
1841 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1842 av_log(avctx, AV_LOG_DEBUG,
1843 "allocating dummy last picture for B frame\n");
1844 else if (s->pict_type != AV_PICTURE_TYPE_I)
1845 av_log(avctx, AV_LOG_ERROR,
1846 "warning: first frame is no keyframe\n");
1847 else if (s->picture_structure != PICT_FRAME)
1848 av_log(avctx, AV_LOG_DEBUG,
1849 "allocate dummy last picture for field based first keyframe\n");
1851 /* Allocate a dummy frame */
1852 i = ff_find_unused_picture(s, 0);
1854 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1857 s->last_picture_ptr = &s->picture[i];
1859 s->last_picture_ptr->reference = 3;
1860 s->last_picture_ptr->f->key_frame = 0;
1861 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1863 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1864 s->last_picture_ptr = NULL;
1868 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1869 for(i=0; i<avctx->height; i++)
1870 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1871 0x80, avctx->width);
1872 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1873 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1874 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1875 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1876 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1879 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1880 for(i=0; i<avctx->height; i++)
1881 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1885 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1886 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1888 if ((s->next_picture_ptr == NULL ||
1889 s->next_picture_ptr->f->buf[0] == NULL) &&
1890 s->pict_type == AV_PICTURE_TYPE_B) {
1891 /* Allocate a dummy frame */
1892 i = ff_find_unused_picture(s, 0);
1894 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1897 s->next_picture_ptr = &s->picture[i];
1899 s->next_picture_ptr->reference = 3;
1900 s->next_picture_ptr->f->key_frame = 0;
1901 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1903 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1904 s->next_picture_ptr = NULL;
1907 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1908 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1911 #if 0 // BUFREF-FIXME
1912 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1913 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1915 if (s->last_picture_ptr) {
1916 ff_mpeg_unref_picture(s, &s->last_picture);
1917 if (s->last_picture_ptr->f->buf[0] &&
1918 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1919 s->last_picture_ptr)) < 0)
1922 if (s->next_picture_ptr) {
1923 ff_mpeg_unref_picture(s, &s->next_picture);
1924 if (s->next_picture_ptr->f->buf[0] &&
1925 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1926 s->next_picture_ptr)) < 0)
1930 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1931 s->last_picture_ptr->f->buf[0]));
1933 if (s->picture_structure!= PICT_FRAME) {
1935 for (i = 0; i < 4; i++) {
1936 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1937 s->current_picture.f->data[i] +=
1938 s->current_picture.f->linesize[i];
1940 s->current_picture.f->linesize[i] *= 2;
1941 s->last_picture.f->linesize[i] *= 2;
1942 s->next_picture.f->linesize[i] *= 2;
1946 s->err_recognition = avctx->err_recognition;
1948 /* set dequantizer, we can't do it during init as
1949 * it might change for mpeg4 and we can't do it in the header
1950 * decode as init is not called for mpeg4 there yet */
1951 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1952 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1953 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1954 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1955 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1956 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1958 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1959 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1962 if (s->avctx->debug & FF_DEBUG_NOMC) {
1963 gray_frame(s->current_picture_ptr->f);
1969 /* called after a frame has been decoded. */
1970 void ff_MPV_frame_end(MpegEncContext *s)
1974 if (s->current_picture.reference)
1975 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1979 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
1982 return clip_line(ex, ey, sx, sy, maxx);
1987 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
1994 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
2002 * Draw a line from (ex, ey) -> (sx, sy).
2003 * @param w width of the image
2004 * @param h height of the image
2005 * @param stride stride/linesize of the image
2006 * @param color color of the arrow
2008 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2009 int w, int h, int stride, int color)
2013 if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2015 if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2018 sx = av_clip(sx, 0, w - 1);
2019 sy = av_clip(sy, 0, h - 1);
2020 ex = av_clip(ex, 0, w - 1);
2021 ey = av_clip(ey, 0, h - 1);
2023 buf[sy * stride + sx] += color;
2025 if (FFABS(ex - sx) > FFABS(ey - sy)) {
2027 FFSWAP(int, sx, ex);
2028 FFSWAP(int, sy, ey);
2030 buf += sx + sy * stride;
2032 f = ((ey - sy) << 16) / ex;
2033 for (x = 0; x <= ex; x++) {
2035 fr = (x * f) & 0xFFFF;
2036 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2037 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2041 FFSWAP(int, sx, ex);
2042 FFSWAP(int, sy, ey);
2044 buf += sx + sy * stride;
2047 f = ((ex - sx) << 16) / ey;
2050 for(y= 0; y <= ey; y++){
2052 fr = (y*f) & 0xFFFF;
2053 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2054 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2060 * Draw an arrow from (ex, ey) -> (sx, sy).
2061 * @param w width of the image
2062 * @param h height of the image
2063 * @param stride stride/linesize of the image
2064 * @param color color of the arrow
2066 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2067 int ey, int w, int h, int stride, int color, int tail, int direction)
2072 FFSWAP(int, sx, ex);
2073 FFSWAP(int, sy, ey);
2076 sx = av_clip(sx, -100, w + 100);
2077 sy = av_clip(sy, -100, h + 100);
2078 ex = av_clip(ex, -100, w + 100);
2079 ey = av_clip(ey, -100, h + 100);
2084 if (dx * dx + dy * dy > 3 * 3) {
2087 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2089 // FIXME subpixel accuracy
2090 rx = ROUNDED_DIV(rx * 3 << 4, length);
2091 ry = ROUNDED_DIV(ry * 3 << 4, length);
2098 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2099 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2101 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2105 * Print debugging info for the given picture.
2107 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2108 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2110 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2112 if (avctx->hwaccel || !mbtype_table
2113 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2117 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2120 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2121 av_get_picture_type_char(pict->pict_type));
2122 for (y = 0; y < mb_height; y++) {
2123 for (x = 0; x < mb_width; x++) {
2124 if (avctx->debug & FF_DEBUG_SKIP) {
2125 int count = mbskip_table[x + y * mb_stride];
2128 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2130 if (avctx->debug & FF_DEBUG_QP) {
2131 av_log(avctx, AV_LOG_DEBUG, "%2d",
2132 qscale_table[x + y * mb_stride]);
2134 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2135 int mb_type = mbtype_table[x + y * mb_stride];
2136 // Type & MV direction
2137 if (IS_PCM(mb_type))
2138 av_log(avctx, AV_LOG_DEBUG, "P");
2139 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2140 av_log(avctx, AV_LOG_DEBUG, "A");
2141 else if (IS_INTRA4x4(mb_type))
2142 av_log(avctx, AV_LOG_DEBUG, "i");
2143 else if (IS_INTRA16x16(mb_type))
2144 av_log(avctx, AV_LOG_DEBUG, "I");
2145 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2146 av_log(avctx, AV_LOG_DEBUG, "d");
2147 else if (IS_DIRECT(mb_type))
2148 av_log(avctx, AV_LOG_DEBUG, "D");
2149 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2150 av_log(avctx, AV_LOG_DEBUG, "g");
2151 else if (IS_GMC(mb_type))
2152 av_log(avctx, AV_LOG_DEBUG, "G");
2153 else if (IS_SKIP(mb_type))
2154 av_log(avctx, AV_LOG_DEBUG, "S");
2155 else if (!USES_LIST(mb_type, 1))
2156 av_log(avctx, AV_LOG_DEBUG, ">");
2157 else if (!USES_LIST(mb_type, 0))
2158 av_log(avctx, AV_LOG_DEBUG, "<");
2160 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2161 av_log(avctx, AV_LOG_DEBUG, "X");
2165 if (IS_8X8(mb_type))
2166 av_log(avctx, AV_LOG_DEBUG, "+");
2167 else if (IS_16X8(mb_type))
2168 av_log(avctx, AV_LOG_DEBUG, "-");
2169 else if (IS_8X16(mb_type))
2170 av_log(avctx, AV_LOG_DEBUG, "|");
2171 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2172 av_log(avctx, AV_LOG_DEBUG, " ");
2174 av_log(avctx, AV_LOG_DEBUG, "?");
2177 if (IS_INTERLACED(mb_type))
2178 av_log(avctx, AV_LOG_DEBUG, "=");
2180 av_log(avctx, AV_LOG_DEBUG, " ");
2183 av_log(avctx, AV_LOG_DEBUG, "\n");
2187 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2188 (avctx->debug_mv)) {
2189 const int shift = 1 + quarter_sample;
2193 int h_chroma_shift, v_chroma_shift, block_height;
2194 const int width = avctx->width;
2195 const int height = avctx->height;
2196 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2197 const int mv_stride = (mb_width << mv_sample_log2) +
2198 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2200 *low_delay = 0; // needed to see the vectors without trashing the buffers
2202 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2204 av_frame_make_writable(pict);
2206 pict->opaque = NULL;
2207 ptr = pict->data[0];
2208 block_height = 16 >> v_chroma_shift;
2210 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2212 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2213 const int mb_index = mb_x + mb_y * mb_stride;
2214 if ((avctx->debug_mv) && motion_val[0]) {
2216 for (type = 0; type < 3; type++) {
2220 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2221 (pict->pict_type!= AV_PICTURE_TYPE_P))
2226 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2227 (pict->pict_type!= AV_PICTURE_TYPE_B))
2232 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2233 (pict->pict_type!= AV_PICTURE_TYPE_B))
2238 if (!USES_LIST(mbtype_table[mb_index], direction))
2241 if (IS_8X8(mbtype_table[mb_index])) {
2243 for (i = 0; i < 4; i++) {
2244 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2245 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2246 int xy = (mb_x * 2 + (i & 1) +
2247 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2248 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2249 int my = (motion_val[direction][xy][1] >> shift) + sy;
2250 draw_arrow(ptr, sx, sy, mx, my, width,
2251 height, pict->linesize[0], 100, 0, direction);
2253 } else if (IS_16X8(mbtype_table[mb_index])) {
2255 for (i = 0; i < 2; i++) {
2256 int sx = mb_x * 16 + 8;
2257 int sy = mb_y * 16 + 4 + 8 * i;
2258 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2259 int mx = (motion_val[direction][xy][0] >> shift);
2260 int my = (motion_val[direction][xy][1] >> shift);
2262 if (IS_INTERLACED(mbtype_table[mb_index]))
2265 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2266 height, pict->linesize[0], 100, 0, direction);
2268 } else if (IS_8X16(mbtype_table[mb_index])) {
2270 for (i = 0; i < 2; i++) {
2271 int sx = mb_x * 16 + 4 + 8 * i;
2272 int sy = mb_y * 16 + 8;
2273 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2274 int mx = motion_val[direction][xy][0] >> shift;
2275 int my = motion_val[direction][xy][1] >> shift;
2277 if (IS_INTERLACED(mbtype_table[mb_index]))
2280 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2281 height, pict->linesize[0], 100, 0, direction);
2284 int sx= mb_x * 16 + 8;
2285 int sy= mb_y * 16 + 8;
2286 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2287 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2288 int my= (motion_val[direction][xy][1]>>shift) + sy;
2289 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2293 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2294 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2295 0x0101010101010101ULL;
2297 for (y = 0; y < block_height; y++) {
2298 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2299 (block_height * mb_y + y) *
2300 pict->linesize[1]) = c;
2301 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2302 (block_height * mb_y + y) *
2303 pict->linesize[2]) = c;
2306 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2308 int mb_type = mbtype_table[mb_index];
2311 #define COLOR(theta, r) \
2312 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2313 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2317 if (IS_PCM(mb_type)) {
2319 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2320 IS_INTRA16x16(mb_type)) {
2322 } else if (IS_INTRA4x4(mb_type)) {
2324 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2326 } else if (IS_DIRECT(mb_type)) {
2328 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2330 } else if (IS_GMC(mb_type)) {
2332 } else if (IS_SKIP(mb_type)) {
2334 } else if (!USES_LIST(mb_type, 1)) {
2336 } else if (!USES_LIST(mb_type, 0)) {
2339 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2343 u *= 0x0101010101010101ULL;
2344 v *= 0x0101010101010101ULL;
2345 for (y = 0; y < block_height; y++) {
2346 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2347 (block_height * mb_y + y) * pict->linesize[1]) = u;
2348 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2349 (block_height * mb_y + y) * pict->linesize[2]) = v;
2353 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2354 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2355 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2356 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2357 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2359 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2360 for (y = 0; y < 16; y++)
2361 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2362 pict->linesize[0]] ^= 0x80;
2364 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2365 int dm = 1 << (mv_sample_log2 - 2);
2366 for (i = 0; i < 4; i++) {
2367 int sx = mb_x * 16 + 8 * (i & 1);
2368 int sy = mb_y * 16 + 8 * (i >> 1);
2369 int xy = (mb_x * 2 + (i & 1) +
2370 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2372 int32_t *mv = (int32_t *) &motion_val[0][xy];
2373 if (mv[0] != mv[dm] ||
2374 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2375 for (y = 0; y < 8; y++)
2376 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2377 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2378 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2379 pict->linesize[0]) ^= 0x8080808080808080ULL;
2383 if (IS_INTERLACED(mb_type) &&
2384 avctx->codec->id == AV_CODEC_ID_H264) {
2388 mbskip_table[mb_index] = 0;
2394 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2396 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2397 p->qscale_table, p->motion_val, &s->low_delay,
2398 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2401 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2403 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2404 int offset = 2*s->mb_stride + 1;
2406 return AVERROR(ENOMEM);
2407 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2408 ref->size -= offset;
2409 ref->data += offset;
2410 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2413 static inline int hpel_motion_lowres(MpegEncContext *s,
2414 uint8_t *dest, uint8_t *src,
2415 int field_based, int field_select,
2416 int src_x, int src_y,
2417 int width, int height, ptrdiff_t stride,
2418 int h_edge_pos, int v_edge_pos,
2419 int w, int h, h264_chroma_mc_func *pix_op,
2420 int motion_x, int motion_y)
2422 const int lowres = s->avctx->lowres;
2423 const int op_index = FFMIN(lowres, 3);
2424 const int s_mask = (2 << lowres) - 1;
2428 if (s->quarter_sample) {
2433 sx = motion_x & s_mask;
2434 sy = motion_y & s_mask;
2435 src_x += motion_x >> lowres + 1;
2436 src_y += motion_y >> lowres + 1;
2438 src += src_y * stride + src_x;
2440 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2441 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2442 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2443 s->linesize, s->linesize,
2444 w + 1, (h + 1) << field_based,
2445 src_x, src_y << field_based,
2446 h_edge_pos, v_edge_pos);
2447 src = s->edge_emu_buffer;
2451 sx = (sx << 2) >> lowres;
2452 sy = (sy << 2) >> lowres;
2455 pix_op[op_index](dest, src, stride, h, sx, sy);
2459 /* apply one mpeg motion vector to the three components */
2460 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2467 uint8_t **ref_picture,
2468 h264_chroma_mc_func *pix_op,
2469 int motion_x, int motion_y,
2472 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2473 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2474 ptrdiff_t uvlinesize, linesize;
2475 const int lowres = s->avctx->lowres;
2476 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2477 const int block_s = 8>>lowres;
2478 const int s_mask = (2 << lowres) - 1;
2479 const int h_edge_pos = s->h_edge_pos >> lowres;
2480 const int v_edge_pos = s->v_edge_pos >> lowres;
2481 linesize = s->current_picture.f->linesize[0] << field_based;
2482 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2484 // FIXME obviously not perfect but qpel will not work in lowres anyway
2485 if (s->quarter_sample) {
2491 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2494 sx = motion_x & s_mask;
2495 sy = motion_y & s_mask;
2496 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2497 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2499 if (s->out_format == FMT_H263) {
2500 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2501 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2502 uvsrc_x = src_x >> 1;
2503 uvsrc_y = src_y >> 1;
2504 } else if (s->out_format == FMT_H261) {
2505 // even chroma mv's are full pel in H261
2508 uvsx = (2 * mx) & s_mask;
2509 uvsy = (2 * my) & s_mask;
2510 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2511 uvsrc_y = mb_y * block_s + (my >> lowres);
2513 if(s->chroma_y_shift){
2518 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2519 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2521 if(s->chroma_x_shift){
2525 uvsy = motion_y & s_mask;
2527 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2530 uvsx = motion_x & s_mask;
2531 uvsy = motion_y & s_mask;
2538 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2539 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2540 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2542 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2543 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2544 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2545 linesize >> field_based, linesize >> field_based,
2546 17, 17 + field_based,
2547 src_x, src_y << field_based, h_edge_pos,
2549 ptr_y = s->edge_emu_buffer;
2550 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2551 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2552 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2553 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2554 uvlinesize >> field_based, uvlinesize >> field_based,
2556 uvsrc_x, uvsrc_y << field_based,
2557 h_edge_pos >> 1, v_edge_pos >> 1);
2558 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2559 uvlinesize >> field_based,uvlinesize >> field_based,
2561 uvsrc_x, uvsrc_y << field_based,
2562 h_edge_pos >> 1, v_edge_pos >> 1);
2568 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2570 dest_y += s->linesize;
2571 dest_cb += s->uvlinesize;
2572 dest_cr += s->uvlinesize;
2576 ptr_y += s->linesize;
2577 ptr_cb += s->uvlinesize;
2578 ptr_cr += s->uvlinesize;
2581 sx = (sx << 2) >> lowres;
2582 sy = (sy << 2) >> lowres;
2583 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2585 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2586 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2587 uvsx = (uvsx << 2) >> lowres;
2588 uvsy = (uvsy << 2) >> lowres;
2590 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2591 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2594 // FIXME h261 lowres loop filter
2597 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2598 uint8_t *dest_cb, uint8_t *dest_cr,
2599 uint8_t **ref_picture,
2600 h264_chroma_mc_func * pix_op,
2603 const int lowres = s->avctx->lowres;
2604 const int op_index = FFMIN(lowres, 3);
2605 const int block_s = 8 >> lowres;
2606 const int s_mask = (2 << lowres) - 1;
2607 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2608 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2609 int emu = 0, src_x, src_y, sx, sy;
2613 if (s->quarter_sample) {
2618 /* In case of 8X8, we construct a single chroma motion vector
2619 with a special rounding */
2620 mx = ff_h263_round_chroma(mx);
2621 my = ff_h263_round_chroma(my);
2625 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2626 src_y = s->mb_y * block_s + (my >> lowres + 1);
2628 offset = src_y * s->uvlinesize + src_x;
2629 ptr = ref_picture[1] + offset;
2630 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2631 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2632 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2633 s->uvlinesize, s->uvlinesize,
2635 src_x, src_y, h_edge_pos, v_edge_pos);
2636 ptr = s->edge_emu_buffer;
2639 sx = (sx << 2) >> lowres;
2640 sy = (sy << 2) >> lowres;
2641 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2643 ptr = ref_picture[2] + offset;
2645 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2646 s->uvlinesize, s->uvlinesize,
2648 src_x, src_y, h_edge_pos, v_edge_pos);
2649 ptr = s->edge_emu_buffer;
2651 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2655 * motion compensation of a single macroblock
2657 * @param dest_y luma destination pointer
2658 * @param dest_cb chroma cb/u destination pointer
2659 * @param dest_cr chroma cr/v destination pointer
2660 * @param dir direction (0->forward, 1->backward)
2661 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2662 * @param pix_op halfpel motion compensation function (average or put normally)
2663 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2665 static inline void MPV_motion_lowres(MpegEncContext *s,
2666 uint8_t *dest_y, uint8_t *dest_cb,
2668 int dir, uint8_t **ref_picture,
2669 h264_chroma_mc_func *pix_op)
2673 const int lowres = s->avctx->lowres;
2674 const int block_s = 8 >>lowres;
2679 switch (s->mv_type) {
2681 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2683 ref_picture, pix_op,
2684 s->mv[dir][0][0], s->mv[dir][0][1],
2690 for (i = 0; i < 4; i++) {
2691 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2692 s->linesize) * block_s,
2693 ref_picture[0], 0, 0,
2694 (2 * mb_x + (i & 1)) * block_s,
2695 (2 * mb_y + (i >> 1)) * block_s,
2696 s->width, s->height, s->linesize,
2697 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2698 block_s, block_s, pix_op,
2699 s->mv[dir][i][0], s->mv[dir][i][1]);
2701 mx += s->mv[dir][i][0];
2702 my += s->mv[dir][i][1];
2705 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2706 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2710 if (s->picture_structure == PICT_FRAME) {
2712 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2713 1, 0, s->field_select[dir][0],
2714 ref_picture, pix_op,
2715 s->mv[dir][0][0], s->mv[dir][0][1],
2718 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2719 1, 1, s->field_select[dir][1],
2720 ref_picture, pix_op,
2721 s->mv[dir][1][0], s->mv[dir][1][1],
2724 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2725 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2726 ref_picture = s->current_picture_ptr->f->data;
2729 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2730 0, 0, s->field_select[dir][0],
2731 ref_picture, pix_op,
2733 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2737 for (i = 0; i < 2; i++) {
2738 uint8_t **ref2picture;
2740 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2741 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2742 ref2picture = ref_picture;
2744 ref2picture = s->current_picture_ptr->f->data;
2747 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2748 0, 0, s->field_select[dir][i],
2749 ref2picture, pix_op,
2750 s->mv[dir][i][0], s->mv[dir][i][1] +
2751 2 * block_s * i, block_s, mb_y >> 1);
2753 dest_y += 2 * block_s * s->linesize;
2754 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2755 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2759 if (s->picture_structure == PICT_FRAME) {
2760 for (i = 0; i < 2; i++) {
2762 for (j = 0; j < 2; j++) {
2763 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2765 ref_picture, pix_op,
2766 s->mv[dir][2 * i + j][0],
2767 s->mv[dir][2 * i + j][1],
2770 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2773 for (i = 0; i < 2; i++) {
2774 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2775 0, 0, s->picture_structure != i + 1,
2776 ref_picture, pix_op,
2777 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2778 2 * block_s, mb_y >> 1);
2780 // after put we make avg of the same block
2781 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2783 // opposite parity is always in the same
2784 // frame if this is second field
2785 if (!s->first_field) {
2786 ref_picture = s->current_picture_ptr->f->data;
2797 * find the lowest MB row referenced in the MVs
2799 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2801 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2802 int my, off, i, mvs;
2804 if (s->picture_structure != PICT_FRAME || s->mcsel)
2807 switch (s->mv_type) {
2821 for (i = 0; i < mvs; i++) {
2822 my = s->mv[dir][i][1]<<qpel_shift;
2823 my_max = FFMAX(my_max, my);
2824 my_min = FFMIN(my_min, my);
2827 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2829 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2831 return s->mb_height-1;
2834 /* put block[] to dest[] */
2835 static inline void put_dct(MpegEncContext *s,
2836 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2838 s->dct_unquantize_intra(s, block, i, qscale);
2839 s->idsp.idct_put(dest, line_size, block);
2842 /* add block[] to dest[] */
2843 static inline void add_dct(MpegEncContext *s,
2844 int16_t *block, int i, uint8_t *dest, int line_size)
2846 if (s->block_last_index[i] >= 0) {
2847 s->idsp.idct_add(dest, line_size, block);
2851 static inline void add_dequant_dct(MpegEncContext *s,
2852 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2854 if (s->block_last_index[i] >= 0) {
2855 s->dct_unquantize_inter(s, block, i, qscale);
2857 s->idsp.idct_add(dest, line_size, block);
2862 * Clean dc, ac, coded_block for the current non-intra MB.
2864 void ff_clean_intra_table_entries(MpegEncContext *s)
2866 int wrap = s->b8_stride;
2867 int xy = s->block_index[0];
2870 s->dc_val[0][xy + 1 ] =
2871 s->dc_val[0][xy + wrap] =
2872 s->dc_val[0][xy + 1 + wrap] = 1024;
2874 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2875 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2876 if (s->msmpeg4_version>=3) {
2877 s->coded_block[xy ] =
2878 s->coded_block[xy + 1 ] =
2879 s->coded_block[xy + wrap] =
2880 s->coded_block[xy + 1 + wrap] = 0;
2883 wrap = s->mb_stride;
2884 xy = s->mb_x + s->mb_y * wrap;
2886 s->dc_val[2][xy] = 1024;
2888 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2889 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2891 s->mbintra_table[xy]= 0;
2894 /* generic function called after a macroblock has been parsed by the
2895 decoder or after it has been encoded by the encoder.
2897 Important variables used:
2898 s->mb_intra : true if intra macroblock
2899 s->mv_dir : motion vector direction
2900 s->mv_type : motion vector type
2901 s->mv : motion vector
2902 s->interlaced_dct : true if interlaced dct used (mpeg2)
2904 static av_always_inline
2905 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2906 int lowres_flag, int is_mpeg12)
2908 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2911 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2912 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2916 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2917 /* print DCT coefficients */
2919 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2921 for(j=0; j<64; j++){
2922 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2923 block[i][s->idsp.idct_permutation[j]]);
2925 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2929 s->current_picture.qscale_table[mb_xy] = s->qscale;
2931 /* update DC predictors for P macroblocks */
2933 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2934 if(s->mbintra_table[mb_xy])
2935 ff_clean_intra_table_entries(s);
2939 s->last_dc[2] = 128 << s->intra_dc_precision;
2942 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2943 s->mbintra_table[mb_xy]=1;
2945 if ( (s->flags&CODEC_FLAG_PSNR)
2946 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
2947 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2948 uint8_t *dest_y, *dest_cb, *dest_cr;
2949 int dct_linesize, dct_offset;
2950 op_pixels_func (*op_pix)[4];
2951 qpel_mc_func (*op_qpix)[16];
2952 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2953 const int uvlinesize = s->current_picture.f->linesize[1];
2954 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2955 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2957 /* avoid copy if macroblock skipped in last frame too */
2958 /* skip only during decoding as we might trash the buffers during encoding a bit */
2960 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2962 if (s->mb_skipped) {
2964 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2966 } else if(!s->current_picture.reference) {
2969 *mbskip_ptr = 0; /* not skipped */
2973 dct_linesize = linesize << s->interlaced_dct;
2974 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2978 dest_cb= s->dest[1];
2979 dest_cr= s->dest[2];
2981 dest_y = s->b_scratchpad;
2982 dest_cb= s->b_scratchpad+16*linesize;
2983 dest_cr= s->b_scratchpad+32*linesize;
2987 /* motion handling */
2988 /* decoding or more than one mb_type (MC was already done otherwise) */
2991 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2992 if (s->mv_dir & MV_DIR_FORWARD) {
2993 ff_thread_await_progress(&s->last_picture_ptr->tf,
2994 ff_MPV_lowest_referenced_row(s, 0),
2997 if (s->mv_dir & MV_DIR_BACKWARD) {
2998 ff_thread_await_progress(&s->next_picture_ptr->tf,
2999 ff_MPV_lowest_referenced_row(s, 1),
3005 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
3007 if (s->mv_dir & MV_DIR_FORWARD) {
3008 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3009 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
3011 if (s->mv_dir & MV_DIR_BACKWARD) {
3012 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3015 op_qpix = s->me.qpel_put;
3016 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3017 op_pix = s->hdsp.put_pixels_tab;
3019 op_pix = s->hdsp.put_no_rnd_pixels_tab;
3021 if (s->mv_dir & MV_DIR_FORWARD) {
3022 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3023 op_pix = s->hdsp.avg_pixels_tab;
3024 op_qpix= s->me.qpel_avg;
3026 if (s->mv_dir & MV_DIR_BACKWARD) {
3027 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3032 /* skip dequant / idct if we are really late ;) */
3033 if(s->avctx->skip_idct){
3034 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
3035 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
3036 || s->avctx->skip_idct >= AVDISCARD_ALL)
3040 /* add dct residue */
3041 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3042 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3043 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3044 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3045 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3046 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3048 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3049 if (s->chroma_y_shift){
3050 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3051 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3055 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3056 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3057 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3058 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3061 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3062 add_dct(s, block[0], 0, dest_y , dct_linesize);
3063 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3064 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3065 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3067 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3068 if(s->chroma_y_shift){//Chroma420
3069 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3070 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3073 dct_linesize = uvlinesize << s->interlaced_dct;
3074 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3076 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3077 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3078 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3079 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3080 if(!s->chroma_x_shift){//Chroma444
3081 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3082 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3083 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3084 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3089 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3090 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3093 /* dct only in intra block */
3094 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3095 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3096 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3097 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3098 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3100 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3101 if(s->chroma_y_shift){
3102 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3103 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3107 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3108 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3109 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3110 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3114 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3115 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3116 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3117 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3119 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3120 if(s->chroma_y_shift){
3121 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3122 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3125 dct_linesize = uvlinesize << s->interlaced_dct;
3126 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3128 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3129 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3130 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3131 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3132 if(!s->chroma_x_shift){//Chroma444
3133 s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3134 s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3135 s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3136 s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3144 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3145 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3146 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3151 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
3153 if(s->out_format == FMT_MPEG1) {
3154 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
3155 else MPV_decode_mb_internal(s, block, 0, 1);
3158 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
3159 else MPV_decode_mb_internal(s, block, 0, 0);
3162 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3164 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3165 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3166 s->first_field, s->low_delay);
3169 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3170 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3171 const int uvlinesize = s->current_picture.f->linesize[1];
3172 const int mb_size= 4 - s->avctx->lowres;
3174 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3175 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3176 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3177 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3178 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3179 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3180 //block_index is not used by mpeg2, so it is not affected by chroma_format
3182 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3183 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3184 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3186 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3188 if(s->picture_structure==PICT_FRAME){
3189 s->dest[0] += s->mb_y * linesize << mb_size;
3190 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3191 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3193 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3194 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3195 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3196 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3202 * Permute an 8x8 block.
3203 * @param block the block which will be permuted according to the given permutation vector
3204 * @param permutation the permutation vector
3205 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3206 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3207 * (inverse) permutated to scantable order!
3209 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3215 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3217 for(i=0; i<=last; i++){
3218 const int j= scantable[i];
3223 for(i=0; i<=last; i++){
3224 const int j= scantable[i];
3225 const int perm_j= permutation[j];
3226 block[perm_j]= temp[j];
3230 void ff_mpeg_flush(AVCodecContext *avctx){
3232 MpegEncContext *s = avctx->priv_data;
3234 if(s==NULL || s->picture==NULL)
3237 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3238 ff_mpeg_unref_picture(s, &s->picture[i]);
3239 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3241 ff_mpeg_unref_picture(s, &s->current_picture);
3242 ff_mpeg_unref_picture(s, &s->last_picture);
3243 ff_mpeg_unref_picture(s, &s->next_picture);
3245 s->mb_x= s->mb_y= 0;
3248 s->parse_context.state= -1;
3249 s->parse_context.frame_start_found= 0;
3250 s->parse_context.overread= 0;
3251 s->parse_context.overread_index= 0;
3252 s->parse_context.index= 0;
3253 s->parse_context.last_index= 0;
3254 s->bitstream_buffer_size=0;
3259 * set qscale and update qscale dependent variables.
3261 void ff_set_qscale(MpegEncContext * s, int qscale)
3265 else if (qscale > 31)
3269 s->chroma_qscale= s->chroma_qscale_table[qscale];
3271 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3272 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3275 void ff_MPV_report_decode_progress(MpegEncContext *s)
3277 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3278 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);