2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/timer.h"
37 #include "h264chroma.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
49 static const uint8_t ff_default_chroma_qscale_table[32] = {
50 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
51 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
52 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
55 const uint8_t ff_mpeg1_dc_scale_table[128] = {
56 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
57 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
67 static const uint8_t mpeg2_dc_scale_table1[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
79 static const uint8_t mpeg2_dc_scale_table2[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
91 static const uint8_t mpeg2_dc_scale_table3[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
104 ff_mpeg1_dc_scale_table,
105 mpeg2_dc_scale_table1,
106 mpeg2_dc_scale_table2,
107 mpeg2_dc_scale_table3,
110 const uint8_t ff_alternate_horizontal_scan[64] = {
111 0, 1, 2, 3, 8, 9, 16, 17,
112 10, 11, 4, 5, 6, 7, 15, 14,
113 13, 12, 19, 18, 24, 25, 32, 33,
114 26, 27, 20, 21, 22, 23, 28, 29,
115 30, 31, 34, 35, 40, 41, 48, 49,
116 42, 43, 36, 37, 38, 39, 44, 45,
117 46, 47, 50, 51, 56, 57, 58, 59,
118 52, 53, 54, 55, 60, 61, 62, 63,
121 const uint8_t ff_alternate_vertical_scan[64] = {
122 0, 8, 16, 24, 1, 9, 2, 10,
123 17, 25, 32, 40, 48, 56, 57, 49,
124 41, 33, 26, 18, 3, 11, 4, 12,
125 19, 27, 34, 42, 50, 58, 35, 43,
126 51, 59, 20, 28, 5, 13, 6, 14,
127 21, 29, 36, 44, 52, 60, 37, 45,
128 53, 61, 22, 30, 7, 15, 23, 31,
129 38, 46, 54, 62, 39, 47, 55, 63,
132 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
133 int16_t *block, int n, int qscale)
135 int i, level, nCoeffs;
136 const uint16_t *quant_matrix;
138 nCoeffs= s->block_last_index[n];
140 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
141 /* XXX: only mpeg1 */
142 quant_matrix = s->intra_matrix;
143 for(i=1;i<=nCoeffs;i++) {
144 int j= s->intra_scantable.permutated[i];
149 level = (int)(level * qscale * quant_matrix[j]) >> 3;
150 level = (level - 1) | 1;
153 level = (int)(level * qscale * quant_matrix[j]) >> 3;
154 level = (level - 1) | 1;
161 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
162 int16_t *block, int n, int qscale)
164 int i, level, nCoeffs;
165 const uint16_t *quant_matrix;
167 nCoeffs= s->block_last_index[n];
169 quant_matrix = s->inter_matrix;
170 for(i=0; i<=nCoeffs; i++) {
171 int j= s->intra_scantable.permutated[i];
176 level = (((level << 1) + 1) * qscale *
177 ((int) (quant_matrix[j]))) >> 4;
178 level = (level - 1) | 1;
181 level = (((level << 1) + 1) * qscale *
182 ((int) (quant_matrix[j]))) >> 4;
183 level = (level - 1) | 1;
190 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
191 int16_t *block, int n, int qscale)
193 int i, level, nCoeffs;
194 const uint16_t *quant_matrix;
196 if(s->alternate_scan) nCoeffs= 63;
197 else nCoeffs= s->block_last_index[n];
199 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
200 quant_matrix = s->intra_matrix;
201 for(i=1;i<=nCoeffs;i++) {
202 int j= s->intra_scantable.permutated[i];
207 level = (int)(level * qscale * quant_matrix[j]) >> 3;
210 level = (int)(level * qscale * quant_matrix[j]) >> 3;
217 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
218 int16_t *block, int n, int qscale)
220 int i, level, nCoeffs;
221 const uint16_t *quant_matrix;
224 if(s->alternate_scan) nCoeffs= 63;
225 else nCoeffs= s->block_last_index[n];
227 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
229 quant_matrix = s->intra_matrix;
230 for(i=1;i<=nCoeffs;i++) {
231 int j= s->intra_scantable.permutated[i];
236 level = (int)(level * qscale * quant_matrix[j]) >> 3;
239 level = (int)(level * qscale * quant_matrix[j]) >> 3;
248 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
249 int16_t *block, int n, int qscale)
251 int i, level, nCoeffs;
252 const uint16_t *quant_matrix;
255 if(s->alternate_scan) nCoeffs= 63;
256 else nCoeffs= s->block_last_index[n];
258 quant_matrix = s->inter_matrix;
259 for(i=0; i<=nCoeffs; i++) {
260 int j= s->intra_scantable.permutated[i];
265 level = (((level << 1) + 1) * qscale *
266 ((int) (quant_matrix[j]))) >> 4;
269 level = (((level << 1) + 1) * qscale *
270 ((int) (quant_matrix[j]))) >> 4;
279 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
280 int16_t *block, int n, int qscale)
282 int i, level, qmul, qadd;
285 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
290 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
291 qadd = (qscale - 1) | 1;
298 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
300 for(i=1; i<=nCoeffs; i++) {
304 level = level * qmul - qadd;
306 level = level * qmul + qadd;
313 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
314 int16_t *block, int n, int qscale)
316 int i, level, qmul, qadd;
319 av_assert2(s->block_last_index[n]>=0);
321 qadd = (qscale - 1) | 1;
324 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
326 for(i=0; i<=nCoeffs; i++) {
330 level = level * qmul - qadd;
332 level = level * qmul + qadd;
339 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
341 int mb_x, int mb_y, int mb_intra, int mb_skipped)
343 MpegEncContext *s = opaque;
346 s->mv_type = mv_type;
347 s->mb_intra = mb_intra;
348 s->mb_skipped = mb_skipped;
351 memcpy(s->mv, mv, sizeof(*mv));
353 ff_init_block_index(s);
354 ff_update_block_index(s);
356 s->bdsp.clear_blocks(s->block[0]);
358 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
359 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
360 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
363 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
364 ff_MPV_decode_mb(s, s->block);
367 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
370 memset(dst + h*linesize, 128, 16);
373 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
376 memset(dst + h*linesize, 128, 8);
379 /* init common dct for both encoder and decoder */
380 av_cold int ff_dct_common_init(MpegEncContext *s)
382 ff_blockdsp_init(&s->bdsp, s->avctx);
383 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
384 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
385 ff_idctdsp_init(&s->idsp, s->avctx);
386 ff_me_cmp_init(&s->mecc, s->avctx);
387 ff_mpegvideodsp_init(&s->mdsp);
388 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
390 if (s->avctx->debug & FF_DEBUG_NOMC) {
392 for (i=0; i<4; i++) {
393 s->hdsp.avg_pixels_tab[0][i] = gray16;
394 s->hdsp.put_pixels_tab[0][i] = gray16;
395 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
397 s->hdsp.avg_pixels_tab[1][i] = gray8;
398 s->hdsp.put_pixels_tab[1][i] = gray8;
399 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
403 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
404 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
405 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
406 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
407 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
408 if (s->flags & CODEC_FLAG_BITEXACT)
409 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
410 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
412 if (HAVE_INTRINSICS_NEON)
413 ff_MPV_common_init_neon(s);
416 ff_MPV_common_init_axp(s);
418 ff_MPV_common_init_arm(s);
420 ff_MPV_common_init_ppc(s);
422 ff_MPV_common_init_x86(s);
424 /* load & permutate scantables
425 * note: only wmv uses different ones
427 if (s->alternate_scan) {
428 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
429 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
431 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
432 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
434 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
435 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
440 static int frame_size_alloc(MpegEncContext *s, int linesize)
442 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
444 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
448 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
449 return AVERROR_PATCHWELCOME;
452 // edge emu needs blocksize + filter length - 1
453 // (= 17x17 for halfpel / 21x21 for h264)
454 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
455 // at uvlinesize. It supports only YUV420 so 24x24 is enough
456 // linesize * interlaced * MBsize
457 // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
458 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 68,
461 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
463 s->me.temp = s->me.scratchpad;
464 s->rd_scratchpad = s->me.scratchpad;
465 s->b_scratchpad = s->me.scratchpad;
466 s->obmc_scratchpad = s->me.scratchpad + 16;
470 av_freep(&s->edge_emu_buffer);
471 return AVERROR(ENOMEM);
475 * Allocate a frame buffer
477 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
479 int edges_needed = av_codec_is_encoder(s->avctx->codec);
483 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
484 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
485 s->codec_id != AV_CODEC_ID_MSS2) {
487 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
488 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
491 r = ff_thread_get_buffer(s->avctx, &pic->tf,
492 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
494 pic->f->width = s->avctx->width;
495 pic->f->height = s->avctx->height;
496 pic->f->format = s->avctx->pix_fmt;
497 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
500 if (r < 0 || !pic->f->buf[0]) {
501 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
508 for (i = 0; pic->f->data[i]; i++) {
509 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
510 pic->f->linesize[i] +
511 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
512 pic->f->data[i] += offset;
514 pic->f->width = s->avctx->width;
515 pic->f->height = s->avctx->height;
518 if (s->avctx->hwaccel) {
519 assert(!pic->hwaccel_picture_private);
520 if (s->avctx->hwaccel->frame_priv_data_size) {
521 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
522 if (!pic->hwaccel_priv_buf) {
523 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
526 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
530 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
531 s->uvlinesize != pic->f->linesize[1])) {
532 av_log(s->avctx, AV_LOG_ERROR,
533 "get_buffer() failed (stride changed)\n");
534 ff_mpeg_unref_picture(s, pic);
538 if (pic->f->linesize[1] != pic->f->linesize[2]) {
539 av_log(s->avctx, AV_LOG_ERROR,
540 "get_buffer() failed (uv stride mismatch)\n");
541 ff_mpeg_unref_picture(s, pic);
545 if (!s->edge_emu_buffer &&
546 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
547 av_log(s->avctx, AV_LOG_ERROR,
548 "get_buffer() failed to allocate context scratch buffers.\n");
549 ff_mpeg_unref_picture(s, pic);
556 void ff_free_picture_tables(Picture *pic)
560 pic->alloc_mb_width =
561 pic->alloc_mb_height = 0;
563 av_buffer_unref(&pic->mb_var_buf);
564 av_buffer_unref(&pic->mc_mb_var_buf);
565 av_buffer_unref(&pic->mb_mean_buf);
566 av_buffer_unref(&pic->mbskip_table_buf);
567 av_buffer_unref(&pic->qscale_table_buf);
568 av_buffer_unref(&pic->mb_type_buf);
570 for (i = 0; i < 2; i++) {
571 av_buffer_unref(&pic->motion_val_buf[i]);
572 av_buffer_unref(&pic->ref_index_buf[i]);
576 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
578 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
579 const int mb_array_size = s->mb_stride * s->mb_height;
580 const int b8_array_size = s->b8_stride * s->mb_height * 2;
584 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
585 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
586 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
588 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
589 return AVERROR(ENOMEM);
592 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
593 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
594 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
595 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
596 return AVERROR(ENOMEM);
599 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
600 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
601 int ref_index_size = 4 * mb_array_size;
603 for (i = 0; mv_size && i < 2; i++) {
604 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
605 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
606 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
607 return AVERROR(ENOMEM);
611 pic->alloc_mb_width = s->mb_width;
612 pic->alloc_mb_height = s->mb_height;
617 static int make_tables_writable(Picture *pic)
620 #define MAKE_WRITABLE(table) \
623 (ret = av_buffer_make_writable(&pic->table)) < 0)\
627 MAKE_WRITABLE(mb_var_buf);
628 MAKE_WRITABLE(mc_mb_var_buf);
629 MAKE_WRITABLE(mb_mean_buf);
630 MAKE_WRITABLE(mbskip_table_buf);
631 MAKE_WRITABLE(qscale_table_buf);
632 MAKE_WRITABLE(mb_type_buf);
634 for (i = 0; i < 2; i++) {
635 MAKE_WRITABLE(motion_val_buf[i]);
636 MAKE_WRITABLE(ref_index_buf[i]);
643 * Allocate a Picture.
644 * The pixels are allocated/set by calling get_buffer() if shared = 0
646 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
650 if (pic->qscale_table_buf)
651 if ( pic->alloc_mb_width != s->mb_width
652 || pic->alloc_mb_height != s->mb_height)
653 ff_free_picture_tables(pic);
656 av_assert0(pic->f->data[0]);
659 av_assert0(!pic->f->buf[0]);
661 if (alloc_frame_buffer(s, pic) < 0)
664 s->linesize = pic->f->linesize[0];
665 s->uvlinesize = pic->f->linesize[1];
668 if (!pic->qscale_table_buf)
669 ret = alloc_picture_tables(s, pic);
671 ret = make_tables_writable(pic);
676 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
677 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
678 pic->mb_mean = pic->mb_mean_buf->data;
681 pic->mbskip_table = pic->mbskip_table_buf->data;
682 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
683 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
685 if (pic->motion_val_buf[0]) {
686 for (i = 0; i < 2; i++) {
687 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
688 pic->ref_index[i] = pic->ref_index_buf[i]->data;
694 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
695 ff_mpeg_unref_picture(s, pic);
696 ff_free_picture_tables(pic);
697 return AVERROR(ENOMEM);
701 * Deallocate a picture.
703 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
705 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
708 /* WM Image / Screen codecs allocate internal buffers with different
709 * dimensions / colorspaces; ignore user-defined callbacks for these. */
710 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
711 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
712 s->codec_id != AV_CODEC_ID_MSS2)
713 ff_thread_release_buffer(s->avctx, &pic->tf);
715 av_frame_unref(pic->f);
717 av_buffer_unref(&pic->hwaccel_priv_buf);
719 if (pic->needs_realloc)
720 ff_free_picture_tables(pic);
722 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
725 static int update_picture_tables(Picture *dst, Picture *src)
729 #define UPDATE_TABLE(table)\
732 (!dst->table || dst->table->buffer != src->table->buffer)) {\
733 av_buffer_unref(&dst->table);\
734 dst->table = av_buffer_ref(src->table);\
736 ff_free_picture_tables(dst);\
737 return AVERROR(ENOMEM);\
742 UPDATE_TABLE(mb_var_buf);
743 UPDATE_TABLE(mc_mb_var_buf);
744 UPDATE_TABLE(mb_mean_buf);
745 UPDATE_TABLE(mbskip_table_buf);
746 UPDATE_TABLE(qscale_table_buf);
747 UPDATE_TABLE(mb_type_buf);
748 for (i = 0; i < 2; i++) {
749 UPDATE_TABLE(motion_val_buf[i]);
750 UPDATE_TABLE(ref_index_buf[i]);
753 dst->mb_var = src->mb_var;
754 dst->mc_mb_var = src->mc_mb_var;
755 dst->mb_mean = src->mb_mean;
756 dst->mbskip_table = src->mbskip_table;
757 dst->qscale_table = src->qscale_table;
758 dst->mb_type = src->mb_type;
759 for (i = 0; i < 2; i++) {
760 dst->motion_val[i] = src->motion_val[i];
761 dst->ref_index[i] = src->ref_index[i];
764 dst->alloc_mb_width = src->alloc_mb_width;
765 dst->alloc_mb_height = src->alloc_mb_height;
770 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
774 av_assert0(!dst->f->buf[0]);
775 av_assert0(src->f->buf[0]);
779 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
783 ret = update_picture_tables(dst, src);
787 if (src->hwaccel_picture_private) {
788 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
789 if (!dst->hwaccel_priv_buf)
791 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
794 dst->field_picture = src->field_picture;
795 dst->mb_var_sum = src->mb_var_sum;
796 dst->mc_mb_var_sum = src->mc_mb_var_sum;
797 dst->b_frame_score = src->b_frame_score;
798 dst->needs_realloc = src->needs_realloc;
799 dst->reference = src->reference;
800 dst->shared = src->shared;
804 ff_mpeg_unref_picture(s, dst);
808 static void exchange_uv(MpegEncContext *s)
813 s->pblocks[4] = s->pblocks[5];
817 static int init_duplicate_context(MpegEncContext *s)
819 int y_size = s->b8_stride * (2 * s->mb_height + 1);
820 int c_size = s->mb_stride * (s->mb_height + 1);
821 int yc_size = y_size + 2 * c_size;
824 if (s->mb_height & 1)
825 yc_size += 2*s->b8_stride + 2*s->mb_stride;
832 s->obmc_scratchpad = NULL;
835 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
836 ME_MAP_SIZE * sizeof(uint32_t), fail)
837 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
838 ME_MAP_SIZE * sizeof(uint32_t), fail)
839 if (s->avctx->noise_reduction) {
840 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
841 2 * 64 * sizeof(int), fail)
844 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
845 s->block = s->blocks[0];
847 for (i = 0; i < 12; i++) {
848 s->pblocks[i] = &s->block[i];
850 if (s->avctx->codec_tag == AV_RL32("VCR2"))
853 if (s->out_format == FMT_H263) {
855 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
856 yc_size * sizeof(int16_t) * 16, fail);
857 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
858 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
859 s->ac_val[2] = s->ac_val[1] + c_size;
864 return -1; // free() through ff_MPV_common_end()
867 static void free_duplicate_context(MpegEncContext *s)
872 av_freep(&s->edge_emu_buffer);
873 av_freep(&s->me.scratchpad);
877 s->obmc_scratchpad = NULL;
879 av_freep(&s->dct_error_sum);
880 av_freep(&s->me.map);
881 av_freep(&s->me.score_map);
882 av_freep(&s->blocks);
883 av_freep(&s->ac_val_base);
887 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
889 #define COPY(a) bak->a = src->a
890 COPY(edge_emu_buffer);
895 COPY(obmc_scratchpad);
902 COPY(me.map_generation);
914 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
918 // FIXME copy only needed parts
920 backup_duplicate_context(&bak, dst);
921 memcpy(dst, src, sizeof(MpegEncContext));
922 backup_duplicate_context(dst, &bak);
923 for (i = 0; i < 12; i++) {
924 dst->pblocks[i] = &dst->block[i];
926 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
928 if (!dst->edge_emu_buffer &&
929 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
930 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
931 "scratch buffers.\n");
934 // STOP_TIMER("update_duplicate_context")
935 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
939 int ff_mpeg_update_thread_context(AVCodecContext *dst,
940 const AVCodecContext *src)
943 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
950 // FIXME can parameters change on I-frames?
951 // in that case dst may need a reinit
952 if (!s->context_initialized) {
953 memcpy(s, s1, sizeof(MpegEncContext));
956 s->bitstream_buffer = NULL;
957 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
959 if (s1->context_initialized){
960 // s->picture_range_start += MAX_PICTURE_COUNT;
961 // s->picture_range_end += MAX_PICTURE_COUNT;
962 if((ret = ff_MPV_common_init(s)) < 0){
963 memset(s, 0, sizeof(MpegEncContext));
970 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
971 s->context_reinit = 0;
972 s->height = s1->height;
973 s->width = s1->width;
974 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
978 s->avctx->coded_height = s1->avctx->coded_height;
979 s->avctx->coded_width = s1->avctx->coded_width;
980 s->avctx->width = s1->avctx->width;
981 s->avctx->height = s1->avctx->height;
983 s->coded_picture_number = s1->coded_picture_number;
984 s->picture_number = s1->picture_number;
986 av_assert0(!s->picture || s->picture != s1->picture);
988 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
989 ff_mpeg_unref_picture(s, &s->picture[i]);
990 if (s1->picture[i].f->buf[0] &&
991 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
995 #define UPDATE_PICTURE(pic)\
997 ff_mpeg_unref_picture(s, &s->pic);\
998 if (s1->pic.f && s1->pic.f->buf[0])\
999 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
1001 ret = update_picture_tables(&s->pic, &s1->pic);\
1006 UPDATE_PICTURE(current_picture);
1007 UPDATE_PICTURE(last_picture);
1008 UPDATE_PICTURE(next_picture);
1010 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1011 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1012 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1014 // Error/bug resilience
1015 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1016 s->workaround_bugs = s1->workaround_bugs;
1017 s->padding_bug_score = s1->padding_bug_score;
1019 // MPEG4 timing info
1020 memcpy(&s->last_time_base, &s1->last_time_base,
1021 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1022 (char *) &s1->last_time_base);
1025 s->max_b_frames = s1->max_b_frames;
1026 s->low_delay = s1->low_delay;
1027 s->droppable = s1->droppable;
1029 // DivX handling (doesn't work)
1030 s->divx_packed = s1->divx_packed;
1032 if (s1->bitstream_buffer) {
1033 if (s1->bitstream_buffer_size +
1034 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1035 av_fast_malloc(&s->bitstream_buffer,
1036 &s->allocated_bitstream_buffer_size,
1037 s1->allocated_bitstream_buffer_size);
1038 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1039 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1040 s1->bitstream_buffer_size);
1041 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1042 FF_INPUT_BUFFER_PADDING_SIZE);
1045 // linesize dependend scratch buffer allocation
1046 if (!s->edge_emu_buffer)
1048 if (frame_size_alloc(s, s1->linesize) < 0) {
1049 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1050 "scratch buffers.\n");
1051 return AVERROR(ENOMEM);
1054 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1055 "be allocated due to unknown size.\n");
1058 // MPEG2/interlacing info
1059 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1060 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1062 if (!s1->first_field) {
1063 s->last_pict_type = s1->pict_type;
1064 if (s1->current_picture_ptr)
1065 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1072 * Set the given MpegEncContext to common defaults
1073 * (same for encoding and decoding).
1074 * The changed fields will not depend upon the
1075 * prior state of the MpegEncContext.
1077 void ff_MPV_common_defaults(MpegEncContext *s)
1079 s->y_dc_scale_table =
1080 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1081 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1082 s->progressive_frame = 1;
1083 s->progressive_sequence = 1;
1084 s->picture_structure = PICT_FRAME;
1086 s->coded_picture_number = 0;
1087 s->picture_number = 0;
1092 s->slice_context_count = 1;
1096 * Set the given MpegEncContext to defaults for decoding.
1097 * the changed fields will not depend upon
1098 * the prior state of the MpegEncContext.
1100 void ff_MPV_decode_defaults(MpegEncContext *s)
1102 ff_MPV_common_defaults(s);
1105 static int init_er(MpegEncContext *s)
1107 ERContext *er = &s->er;
1108 int mb_array_size = s->mb_height * s->mb_stride;
1111 er->avctx = s->avctx;
1112 er->mecc = &s->mecc;
1114 er->mb_index2xy = s->mb_index2xy;
1115 er->mb_num = s->mb_num;
1116 er->mb_width = s->mb_width;
1117 er->mb_height = s->mb_height;
1118 er->mb_stride = s->mb_stride;
1119 er->b8_stride = s->b8_stride;
1121 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1122 er->error_status_table = av_mallocz(mb_array_size);
1123 if (!er->er_temp_buffer || !er->error_status_table)
1126 er->mbskip_table = s->mbskip_table;
1127 er->mbintra_table = s->mbintra_table;
1129 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1130 er->dc_val[i] = s->dc_val[i];
1132 er->decode_mb = mpeg_er_decode_mb;
1137 av_freep(&er->er_temp_buffer);
1138 av_freep(&er->error_status_table);
1139 return AVERROR(ENOMEM);
1143 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1145 static int init_context_frame(MpegEncContext *s)
1147 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1149 s->mb_width = (s->width + 15) / 16;
1150 s->mb_stride = s->mb_width + 1;
1151 s->b8_stride = s->mb_width * 2 + 1;
1152 mb_array_size = s->mb_height * s->mb_stride;
1153 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1155 /* set default edge pos, will be overridden
1156 * in decode_header if needed */
1157 s->h_edge_pos = s->mb_width * 16;
1158 s->v_edge_pos = s->mb_height * 16;
1160 s->mb_num = s->mb_width * s->mb_height;
1165 s->block_wrap[3] = s->b8_stride;
1167 s->block_wrap[5] = s->mb_stride;
1169 y_size = s->b8_stride * (2 * s->mb_height + 1);
1170 c_size = s->mb_stride * (s->mb_height + 1);
1171 yc_size = y_size + 2 * c_size;
1173 if (s->mb_height & 1)
1174 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1176 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1177 for (y = 0; y < s->mb_height; y++)
1178 for (x = 0; x < s->mb_width; x++)
1179 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1181 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1184 /* Allocate MV tables */
1185 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1186 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1187 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1188 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1189 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1190 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1191 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1192 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1193 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1194 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1195 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1196 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1198 /* Allocate MB type table */
1199 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1201 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1203 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1204 mb_array_size * sizeof(float), fail);
1205 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1206 mb_array_size * sizeof(float), fail);
1210 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1211 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1212 /* interlaced direct mode decoding tables */
1213 for (i = 0; i < 2; i++) {
1215 for (j = 0; j < 2; j++) {
1216 for (k = 0; k < 2; k++) {
1217 FF_ALLOCZ_OR_GOTO(s->avctx,
1218 s->b_field_mv_table_base[i][j][k],
1219 mv_table_size * 2 * sizeof(int16_t),
1221 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1224 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1225 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1226 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1228 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1231 if (s->out_format == FMT_H263) {
1233 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1234 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1236 /* cbp, ac_pred, pred_dir */
1237 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1238 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1241 if (s->h263_pred || s->h263_plus || !s->encoding) {
1243 // MN: we need these for error resilience of intra-frames
1244 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1245 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1246 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1247 s->dc_val[2] = s->dc_val[1] + c_size;
1248 for (i = 0; i < yc_size; i++)
1249 s->dc_val_base[i] = 1024;
1252 /* which mb is a intra block */
1253 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1254 memset(s->mbintra_table, 1, mb_array_size);
1256 /* init macroblock skip table */
1257 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1258 // Note the + 1 is for a quicker mpeg4 slice_end detection
1262 return AVERROR(ENOMEM);
1266 * init common structure for both encoder and decoder.
1267 * this assumes that some variables like width/height are already set
1269 av_cold int ff_MPV_common_init(MpegEncContext *s)
1272 int nb_slices = (HAVE_THREADS &&
1273 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1274 s->avctx->thread_count : 1;
1276 if (s->encoding && s->avctx->slices)
1277 nb_slices = s->avctx->slices;
1279 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1280 s->mb_height = (s->height + 31) / 32 * 2;
1282 s->mb_height = (s->height + 15) / 16;
1284 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1285 av_log(s->avctx, AV_LOG_ERROR,
1286 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1290 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1293 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1295 max_slices = MAX_THREADS;
1296 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1297 " reducing to %d\n", nb_slices, max_slices);
1298 nb_slices = max_slices;
1301 if ((s->width || s->height) &&
1302 av_image_check_size(s->width, s->height, 0, s->avctx))
1305 ff_dct_common_init(s);
1307 s->flags = s->avctx->flags;
1308 s->flags2 = s->avctx->flags2;
1310 /* set chroma shifts */
1311 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1313 &s->chroma_y_shift);
1315 /* convert fourcc to upper case */
1316 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1318 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1320 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1321 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1322 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1323 s->picture[i].f = av_frame_alloc();
1324 if (!s->picture[i].f)
1327 memset(&s->next_picture, 0, sizeof(s->next_picture));
1328 memset(&s->last_picture, 0, sizeof(s->last_picture));
1329 memset(&s->current_picture, 0, sizeof(s->current_picture));
1330 memset(&s->new_picture, 0, sizeof(s->new_picture));
1331 s->next_picture.f = av_frame_alloc();
1332 if (!s->next_picture.f)
1334 s->last_picture.f = av_frame_alloc();
1335 if (!s->last_picture.f)
1337 s->current_picture.f = av_frame_alloc();
1338 if (!s->current_picture.f)
1340 s->new_picture.f = av_frame_alloc();
1341 if (!s->new_picture.f)
1344 if (init_context_frame(s))
1347 s->parse_context.state = -1;
1349 s->context_initialized = 1;
1350 s->thread_context[0] = s;
1352 // if (s->width && s->height) {
1353 if (nb_slices > 1) {
1354 for (i = 1; i < nb_slices; i++) {
1355 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1356 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1359 for (i = 0; i < nb_slices; i++) {
1360 if (init_duplicate_context(s->thread_context[i]) < 0)
1362 s->thread_context[i]->start_mb_y =
1363 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1364 s->thread_context[i]->end_mb_y =
1365 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1368 if (init_duplicate_context(s) < 0)
1371 s->end_mb_y = s->mb_height;
1373 s->slice_context_count = nb_slices;
1378 ff_MPV_common_end(s);
1383 * Frees and resets MpegEncContext fields depending on the resolution.
1384 * Is used during resolution changes to avoid a full reinitialization of the
1387 static int free_context_frame(MpegEncContext *s)
1391 av_freep(&s->mb_type);
1392 av_freep(&s->p_mv_table_base);
1393 av_freep(&s->b_forw_mv_table_base);
1394 av_freep(&s->b_back_mv_table_base);
1395 av_freep(&s->b_bidir_forw_mv_table_base);
1396 av_freep(&s->b_bidir_back_mv_table_base);
1397 av_freep(&s->b_direct_mv_table_base);
1398 s->p_mv_table = NULL;
1399 s->b_forw_mv_table = NULL;
1400 s->b_back_mv_table = NULL;
1401 s->b_bidir_forw_mv_table = NULL;
1402 s->b_bidir_back_mv_table = NULL;
1403 s->b_direct_mv_table = NULL;
1404 for (i = 0; i < 2; i++) {
1405 for (j = 0; j < 2; j++) {
1406 for (k = 0; k < 2; k++) {
1407 av_freep(&s->b_field_mv_table_base[i][j][k]);
1408 s->b_field_mv_table[i][j][k] = NULL;
1410 av_freep(&s->b_field_select_table[i][j]);
1411 av_freep(&s->p_field_mv_table_base[i][j]);
1412 s->p_field_mv_table[i][j] = NULL;
1414 av_freep(&s->p_field_select_table[i]);
1417 av_freep(&s->dc_val_base);
1418 av_freep(&s->coded_block_base);
1419 av_freep(&s->mbintra_table);
1420 av_freep(&s->cbp_table);
1421 av_freep(&s->pred_dir_table);
1423 av_freep(&s->mbskip_table);
1425 av_freep(&s->er.error_status_table);
1426 av_freep(&s->er.er_temp_buffer);
1427 av_freep(&s->mb_index2xy);
1428 av_freep(&s->lambda_table);
1430 av_freep(&s->cplx_tab);
1431 av_freep(&s->bits_tab);
1433 s->linesize = s->uvlinesize = 0;
1438 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1442 if (s->slice_context_count > 1) {
1443 for (i = 0; i < s->slice_context_count; i++) {
1444 free_duplicate_context(s->thread_context[i]);
1446 for (i = 1; i < s->slice_context_count; i++) {
1447 av_freep(&s->thread_context[i]);
1450 free_duplicate_context(s);
1452 if ((err = free_context_frame(s)) < 0)
1456 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1457 s->picture[i].needs_realloc = 1;
1460 s->last_picture_ptr =
1461 s->next_picture_ptr =
1462 s->current_picture_ptr = NULL;
1465 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1466 s->mb_height = (s->height + 31) / 32 * 2;
1468 s->mb_height = (s->height + 15) / 16;
1470 if ((s->width || s->height) &&
1471 av_image_check_size(s->width, s->height, 0, s->avctx))
1472 return AVERROR_INVALIDDATA;
1474 if ((err = init_context_frame(s)))
1477 s->thread_context[0] = s;
1479 if (s->width && s->height) {
1480 int nb_slices = s->slice_context_count;
1481 if (nb_slices > 1) {
1482 for (i = 1; i < nb_slices; i++) {
1483 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1484 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1487 for (i = 0; i < nb_slices; i++) {
1488 if (init_duplicate_context(s->thread_context[i]) < 0)
1490 s->thread_context[i]->start_mb_y =
1491 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1492 s->thread_context[i]->end_mb_y =
1493 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1496 err = init_duplicate_context(s);
1500 s->end_mb_y = s->mb_height;
1502 s->slice_context_count = nb_slices;
1507 ff_MPV_common_end(s);
1511 /* init common structure for both encoder and decoder */
1512 void ff_MPV_common_end(MpegEncContext *s)
1516 if (s->slice_context_count > 1) {
1517 for (i = 0; i < s->slice_context_count; i++) {
1518 free_duplicate_context(s->thread_context[i]);
1520 for (i = 1; i < s->slice_context_count; i++) {
1521 av_freep(&s->thread_context[i]);
1523 s->slice_context_count = 1;
1524 } else free_duplicate_context(s);
1526 av_freep(&s->parse_context.buffer);
1527 s->parse_context.buffer_size = 0;
1529 av_freep(&s->bitstream_buffer);
1530 s->allocated_bitstream_buffer_size = 0;
1533 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1534 ff_free_picture_tables(&s->picture[i]);
1535 ff_mpeg_unref_picture(s, &s->picture[i]);
1536 av_frame_free(&s->picture[i].f);
1539 av_freep(&s->picture);
1540 ff_free_picture_tables(&s->last_picture);
1541 ff_mpeg_unref_picture(s, &s->last_picture);
1542 av_frame_free(&s->last_picture.f);
1543 ff_free_picture_tables(&s->current_picture);
1544 ff_mpeg_unref_picture(s, &s->current_picture);
1545 av_frame_free(&s->current_picture.f);
1546 ff_free_picture_tables(&s->next_picture);
1547 ff_mpeg_unref_picture(s, &s->next_picture);
1548 av_frame_free(&s->next_picture.f);
1549 ff_free_picture_tables(&s->new_picture);
1550 ff_mpeg_unref_picture(s, &s->new_picture);
1551 av_frame_free(&s->new_picture.f);
1553 free_context_frame(s);
1555 s->context_initialized = 0;
1556 s->last_picture_ptr =
1557 s->next_picture_ptr =
1558 s->current_picture_ptr = NULL;
1559 s->linesize = s->uvlinesize = 0;
1562 av_cold void ff_init_rl(RLTable *rl,
1563 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1565 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1566 uint8_t index_run[MAX_RUN + 1];
1567 int last, run, level, start, end, i;
1569 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1570 if (static_store && rl->max_level[0])
1573 /* compute max_level[], max_run[] and index_run[] */
1574 for (last = 0; last < 2; last++) {
1583 memset(max_level, 0, MAX_RUN + 1);
1584 memset(max_run, 0, MAX_LEVEL + 1);
1585 memset(index_run, rl->n, MAX_RUN + 1);
1586 for (i = start; i < end; i++) {
1587 run = rl->table_run[i];
1588 level = rl->table_level[i];
1589 if (index_run[run] == rl->n)
1591 if (level > max_level[run])
1592 max_level[run] = level;
1593 if (run > max_run[level])
1594 max_run[level] = run;
1597 rl->max_level[last] = static_store[last];
1599 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1600 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1602 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1604 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1605 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1607 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1609 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1610 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1614 av_cold void ff_init_vlc_rl(RLTable *rl)
1618 for (q = 0; q < 32; q++) {
1620 int qadd = (q - 1) | 1;
1626 for (i = 0; i < rl->vlc.table_size; i++) {
1627 int code = rl->vlc.table[i][0];
1628 int len = rl->vlc.table[i][1];
1631 if (len == 0) { // illegal code
1634 } else if (len < 0) { // more bits needed
1638 if (code == rl->n) { // esc
1642 run = rl->table_run[code] + 1;
1643 level = rl->table_level[code] * qmul + qadd;
1644 if (code >= rl->last) run += 192;
1647 rl->rl_vlc[q][i].len = len;
1648 rl->rl_vlc[q][i].level = level;
1649 rl->rl_vlc[q][i].run = run;
1654 static void release_unused_pictures(MpegEncContext *s)
1658 /* release non reference frames */
1659 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1660 if (!s->picture[i].reference)
1661 ff_mpeg_unref_picture(s, &s->picture[i]);
1665 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1667 if (pic == s->last_picture_ptr)
1669 if (pic->f->buf[0] == NULL)
1671 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1676 static int find_unused_picture(MpegEncContext *s, int shared)
1681 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1682 if (s->picture[i].f->buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1686 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1687 if (pic_is_unused(s, &s->picture[i]))
1692 av_log(s->avctx, AV_LOG_FATAL,
1693 "Internal error, picture buffer overflow\n");
1694 /* We could return -1, but the codec would crash trying to draw into a
1695 * non-existing frame anyway. This is safer than waiting for a random crash.
1696 * Also the return of this is never useful, an encoder must only allocate
1697 * as much as allowed in the specification. This has no relationship to how
1698 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1699 * enough for such valid streams).
1700 * Plus, a decoder has to check stream validity and remove frames if too
1701 * many reference frames are around. Waiting for "OOM" is not correct at
1702 * all. Similarly, missing reference frames have to be replaced by
1703 * interpolated/MC frames, anything else is a bug in the codec ...
1709 int ff_find_unused_picture(MpegEncContext *s, int shared)
1711 int ret = find_unused_picture(s, shared);
1713 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1714 if (s->picture[ret].needs_realloc) {
1715 s->picture[ret].needs_realloc = 0;
1716 ff_free_picture_tables(&s->picture[ret]);
1717 ff_mpeg_unref_picture(s, &s->picture[ret]);
1723 static void gray_frame(AVFrame *frame)
1725 int i, h_chroma_shift, v_chroma_shift;
1727 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1729 for(i=0; i<frame->height; i++)
1730 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1731 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1732 memset(frame->data[1] + frame->linesize[1]*i,
1733 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1734 memset(frame->data[2] + frame->linesize[2]*i,
1735 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1740 * generic function called after decoding
1741 * the header and before a frame is decoded.
1743 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1749 if (!ff_thread_can_start_frame(avctx)) {
1750 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1754 /* mark & release old frames */
1755 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1756 s->last_picture_ptr != s->next_picture_ptr &&
1757 s->last_picture_ptr->f->buf[0]) {
1758 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1761 /* release forgotten pictures */
1762 /* if (mpeg124/h263) */
1763 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1764 if (&s->picture[i] != s->last_picture_ptr &&
1765 &s->picture[i] != s->next_picture_ptr &&
1766 s->picture[i].reference && !s->picture[i].needs_realloc) {
1767 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1768 av_log(avctx, AV_LOG_ERROR,
1769 "releasing zombie picture\n");
1770 ff_mpeg_unref_picture(s, &s->picture[i]);
1774 ff_mpeg_unref_picture(s, &s->current_picture);
1776 release_unused_pictures(s);
1778 if (s->current_picture_ptr &&
1779 s->current_picture_ptr->f->buf[0] == NULL) {
1780 // we already have a unused image
1781 // (maybe it was set before reading the header)
1782 pic = s->current_picture_ptr;
1784 i = ff_find_unused_picture(s, 0);
1786 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1789 pic = &s->picture[i];
1793 if (!s->droppable) {
1794 if (s->pict_type != AV_PICTURE_TYPE_B)
1798 pic->f->coded_picture_number = s->coded_picture_number++;
1800 if (ff_alloc_picture(s, pic, 0) < 0)
1803 s->current_picture_ptr = pic;
1804 // FIXME use only the vars from current_pic
1805 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1806 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1807 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1808 if (s->picture_structure != PICT_FRAME)
1809 s->current_picture_ptr->f->top_field_first =
1810 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1812 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1813 !s->progressive_sequence;
1814 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1816 s->current_picture_ptr->f->pict_type = s->pict_type;
1817 // if (s->flags && CODEC_FLAG_QSCALE)
1818 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1819 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1821 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1822 s->current_picture_ptr)) < 0)
1825 if (s->pict_type != AV_PICTURE_TYPE_B) {
1826 s->last_picture_ptr = s->next_picture_ptr;
1828 s->next_picture_ptr = s->current_picture_ptr;
1830 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1831 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1832 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1833 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1834 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1835 s->pict_type, s->droppable);
1837 if ((s->last_picture_ptr == NULL ||
1838 s->last_picture_ptr->f->buf[0] == NULL) &&
1839 (s->pict_type != AV_PICTURE_TYPE_I ||
1840 s->picture_structure != PICT_FRAME)) {
1841 int h_chroma_shift, v_chroma_shift;
1842 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1843 &h_chroma_shift, &v_chroma_shift);
1844 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1845 av_log(avctx, AV_LOG_DEBUG,
1846 "allocating dummy last picture for B frame\n");
1847 else if (s->pict_type != AV_PICTURE_TYPE_I)
1848 av_log(avctx, AV_LOG_ERROR,
1849 "warning: first frame is no keyframe\n");
1850 else if (s->picture_structure != PICT_FRAME)
1851 av_log(avctx, AV_LOG_DEBUG,
1852 "allocate dummy last picture for field based first keyframe\n");
1854 /* Allocate a dummy frame */
1855 i = ff_find_unused_picture(s, 0);
1857 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1860 s->last_picture_ptr = &s->picture[i];
1862 s->last_picture_ptr->reference = 3;
1863 s->last_picture_ptr->f->key_frame = 0;
1864 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1866 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1867 s->last_picture_ptr = NULL;
1871 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1872 for(i=0; i<avctx->height; i++)
1873 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1874 0x80, avctx->width);
1875 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1876 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1877 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1878 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1879 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1882 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1883 for(i=0; i<avctx->height; i++)
1884 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1888 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1889 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1891 if ((s->next_picture_ptr == NULL ||
1892 s->next_picture_ptr->f->buf[0] == NULL) &&
1893 s->pict_type == AV_PICTURE_TYPE_B) {
1894 /* Allocate a dummy frame */
1895 i = ff_find_unused_picture(s, 0);
1897 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1900 s->next_picture_ptr = &s->picture[i];
1902 s->next_picture_ptr->reference = 3;
1903 s->next_picture_ptr->f->key_frame = 0;
1904 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1906 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1907 s->next_picture_ptr = NULL;
1910 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1911 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1914 #if 0 // BUFREF-FIXME
1915 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1916 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1918 if (s->last_picture_ptr) {
1919 ff_mpeg_unref_picture(s, &s->last_picture);
1920 if (s->last_picture_ptr->f->buf[0] &&
1921 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1922 s->last_picture_ptr)) < 0)
1925 if (s->next_picture_ptr) {
1926 ff_mpeg_unref_picture(s, &s->next_picture);
1927 if (s->next_picture_ptr->f->buf[0] &&
1928 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1929 s->next_picture_ptr)) < 0)
1933 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1934 s->last_picture_ptr->f->buf[0]));
1936 if (s->picture_structure!= PICT_FRAME) {
1938 for (i = 0; i < 4; i++) {
1939 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1940 s->current_picture.f->data[i] +=
1941 s->current_picture.f->linesize[i];
1943 s->current_picture.f->linesize[i] *= 2;
1944 s->last_picture.f->linesize[i] *= 2;
1945 s->next_picture.f->linesize[i] *= 2;
1949 s->err_recognition = avctx->err_recognition;
1951 /* set dequantizer, we can't do it during init as
1952 * it might change for mpeg4 and we can't do it in the header
1953 * decode as init is not called for mpeg4 there yet */
1954 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1955 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1956 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1957 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1958 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1959 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1961 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1962 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1965 if (s->avctx->debug & FF_DEBUG_NOMC) {
1966 gray_frame(s->current_picture_ptr->f);
1972 /* called after a frame has been decoded. */
1973 void ff_MPV_frame_end(MpegEncContext *s)
1977 if (s->current_picture.reference)
1978 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1982 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
1985 return clip_line(ex, ey, sx, sy, maxx);
1990 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
1997 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
2005 * Draw a line from (ex, ey) -> (sx, sy).
2006 * @param w width of the image
2007 * @param h height of the image
2008 * @param stride stride/linesize of the image
2009 * @param color color of the arrow
2011 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2012 int w, int h, int stride, int color)
2016 if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2018 if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2021 sx = av_clip(sx, 0, w - 1);
2022 sy = av_clip(sy, 0, h - 1);
2023 ex = av_clip(ex, 0, w - 1);
2024 ey = av_clip(ey, 0, h - 1);
2026 buf[sy * stride + sx] += color;
2028 if (FFABS(ex - sx) > FFABS(ey - sy)) {
2030 FFSWAP(int, sx, ex);
2031 FFSWAP(int, sy, ey);
2033 buf += sx + sy * stride;
2035 f = ((ey - sy) << 16) / ex;
2036 for (x = 0; x <= ex; x++) {
2038 fr = (x * f) & 0xFFFF;
2039 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2040 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2044 FFSWAP(int, sx, ex);
2045 FFSWAP(int, sy, ey);
2047 buf += sx + sy * stride;
2050 f = ((ex - sx) << 16) / ey;
2053 for(y= 0; y <= ey; y++){
2055 fr = (y*f) & 0xFFFF;
2056 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2057 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2063 * Draw an arrow from (ex, ey) -> (sx, sy).
2064 * @param w width of the image
2065 * @param h height of the image
2066 * @param stride stride/linesize of the image
2067 * @param color color of the arrow
2069 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2070 int ey, int w, int h, int stride, int color, int tail, int direction)
2075 FFSWAP(int, sx, ex);
2076 FFSWAP(int, sy, ey);
2079 sx = av_clip(sx, -100, w + 100);
2080 sy = av_clip(sy, -100, h + 100);
2081 ex = av_clip(ex, -100, w + 100);
2082 ey = av_clip(ey, -100, h + 100);
2087 if (dx * dx + dy * dy > 3 * 3) {
2090 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2092 // FIXME subpixel accuracy
2093 rx = ROUNDED_DIV(rx * 3 << 4, length);
2094 ry = ROUNDED_DIV(ry * 3 << 4, length);
2101 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2102 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2104 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2108 * Print debugging info for the given picture.
2110 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2111 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2113 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2115 if (avctx->hwaccel || !mbtype_table
2116 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2120 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2123 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2124 av_get_picture_type_char(pict->pict_type));
2125 for (y = 0; y < mb_height; y++) {
2126 for (x = 0; x < mb_width; x++) {
2127 if (avctx->debug & FF_DEBUG_SKIP) {
2128 int count = mbskip_table[x + y * mb_stride];
2131 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2133 if (avctx->debug & FF_DEBUG_QP) {
2134 av_log(avctx, AV_LOG_DEBUG, "%2d",
2135 qscale_table[x + y * mb_stride]);
2137 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2138 int mb_type = mbtype_table[x + y * mb_stride];
2139 // Type & MV direction
2140 if (IS_PCM(mb_type))
2141 av_log(avctx, AV_LOG_DEBUG, "P");
2142 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2143 av_log(avctx, AV_LOG_DEBUG, "A");
2144 else if (IS_INTRA4x4(mb_type))
2145 av_log(avctx, AV_LOG_DEBUG, "i");
2146 else if (IS_INTRA16x16(mb_type))
2147 av_log(avctx, AV_LOG_DEBUG, "I");
2148 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2149 av_log(avctx, AV_LOG_DEBUG, "d");
2150 else if (IS_DIRECT(mb_type))
2151 av_log(avctx, AV_LOG_DEBUG, "D");
2152 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2153 av_log(avctx, AV_LOG_DEBUG, "g");
2154 else if (IS_GMC(mb_type))
2155 av_log(avctx, AV_LOG_DEBUG, "G");
2156 else if (IS_SKIP(mb_type))
2157 av_log(avctx, AV_LOG_DEBUG, "S");
2158 else if (!USES_LIST(mb_type, 1))
2159 av_log(avctx, AV_LOG_DEBUG, ">");
2160 else if (!USES_LIST(mb_type, 0))
2161 av_log(avctx, AV_LOG_DEBUG, "<");
2163 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2164 av_log(avctx, AV_LOG_DEBUG, "X");
2168 if (IS_8X8(mb_type))
2169 av_log(avctx, AV_LOG_DEBUG, "+");
2170 else if (IS_16X8(mb_type))
2171 av_log(avctx, AV_LOG_DEBUG, "-");
2172 else if (IS_8X16(mb_type))
2173 av_log(avctx, AV_LOG_DEBUG, "|");
2174 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2175 av_log(avctx, AV_LOG_DEBUG, " ");
2177 av_log(avctx, AV_LOG_DEBUG, "?");
2180 if (IS_INTERLACED(mb_type))
2181 av_log(avctx, AV_LOG_DEBUG, "=");
2183 av_log(avctx, AV_LOG_DEBUG, " ");
2186 av_log(avctx, AV_LOG_DEBUG, "\n");
2190 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2191 (avctx->debug_mv)) {
2192 const int shift = 1 + quarter_sample;
2196 int h_chroma_shift, v_chroma_shift, block_height;
2197 const int width = avctx->width;
2198 const int height = avctx->height;
2199 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2200 const int mv_stride = (mb_width << mv_sample_log2) +
2201 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2203 *low_delay = 0; // needed to see the vectors without trashing the buffers
2205 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2207 av_frame_make_writable(pict);
2209 pict->opaque = NULL;
2210 ptr = pict->data[0];
2211 block_height = 16 >> v_chroma_shift;
2213 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2215 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2216 const int mb_index = mb_x + mb_y * mb_stride;
2217 if ((avctx->debug_mv) && motion_val[0]) {
2219 for (type = 0; type < 3; type++) {
2223 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2224 (pict->pict_type!= AV_PICTURE_TYPE_P))
2229 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2230 (pict->pict_type!= AV_PICTURE_TYPE_B))
2235 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2236 (pict->pict_type!= AV_PICTURE_TYPE_B))
2241 if (!USES_LIST(mbtype_table[mb_index], direction))
2244 if (IS_8X8(mbtype_table[mb_index])) {
2246 for (i = 0; i < 4; i++) {
2247 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2248 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2249 int xy = (mb_x * 2 + (i & 1) +
2250 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2251 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2252 int my = (motion_val[direction][xy][1] >> shift) + sy;
2253 draw_arrow(ptr, sx, sy, mx, my, width,
2254 height, pict->linesize[0], 100, 0, direction);
2256 } else if (IS_16X8(mbtype_table[mb_index])) {
2258 for (i = 0; i < 2; i++) {
2259 int sx = mb_x * 16 + 8;
2260 int sy = mb_y * 16 + 4 + 8 * i;
2261 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2262 int mx = (motion_val[direction][xy][0] >> shift);
2263 int my = (motion_val[direction][xy][1] >> shift);
2265 if (IS_INTERLACED(mbtype_table[mb_index]))
2268 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2269 height, pict->linesize[0], 100, 0, direction);
2271 } else if (IS_8X16(mbtype_table[mb_index])) {
2273 for (i = 0; i < 2; i++) {
2274 int sx = mb_x * 16 + 4 + 8 * i;
2275 int sy = mb_y * 16 + 8;
2276 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2277 int mx = motion_val[direction][xy][0] >> shift;
2278 int my = motion_val[direction][xy][1] >> shift;
2280 if (IS_INTERLACED(mbtype_table[mb_index]))
2283 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2284 height, pict->linesize[0], 100, 0, direction);
2287 int sx= mb_x * 16 + 8;
2288 int sy= mb_y * 16 + 8;
2289 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2290 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2291 int my= (motion_val[direction][xy][1]>>shift) + sy;
2292 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2296 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2297 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2298 0x0101010101010101ULL;
2300 for (y = 0; y < block_height; y++) {
2301 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2302 (block_height * mb_y + y) *
2303 pict->linesize[1]) = c;
2304 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2305 (block_height * mb_y + y) *
2306 pict->linesize[2]) = c;
2309 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2311 int mb_type = mbtype_table[mb_index];
2314 #define COLOR(theta, r) \
2315 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2316 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2320 if (IS_PCM(mb_type)) {
2322 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2323 IS_INTRA16x16(mb_type)) {
2325 } else if (IS_INTRA4x4(mb_type)) {
2327 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2329 } else if (IS_DIRECT(mb_type)) {
2331 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2333 } else if (IS_GMC(mb_type)) {
2335 } else if (IS_SKIP(mb_type)) {
2337 } else if (!USES_LIST(mb_type, 1)) {
2339 } else if (!USES_LIST(mb_type, 0)) {
2342 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2346 u *= 0x0101010101010101ULL;
2347 v *= 0x0101010101010101ULL;
2348 for (y = 0; y < block_height; y++) {
2349 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2350 (block_height * mb_y + y) * pict->linesize[1]) = u;
2351 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2352 (block_height * mb_y + y) * pict->linesize[2]) = v;
2356 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2357 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2358 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2359 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2360 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2362 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2363 for (y = 0; y < 16; y++)
2364 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2365 pict->linesize[0]] ^= 0x80;
2367 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2368 int dm = 1 << (mv_sample_log2 - 2);
2369 for (i = 0; i < 4; i++) {
2370 int sx = mb_x * 16 + 8 * (i & 1);
2371 int sy = mb_y * 16 + 8 * (i >> 1);
2372 int xy = (mb_x * 2 + (i & 1) +
2373 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2375 int32_t *mv = (int32_t *) &motion_val[0][xy];
2376 if (mv[0] != mv[dm] ||
2377 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2378 for (y = 0; y < 8; y++)
2379 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2380 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2381 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2382 pict->linesize[0]) ^= 0x8080808080808080ULL;
2386 if (IS_INTERLACED(mb_type) &&
2387 avctx->codec->id == AV_CODEC_ID_H264) {
2391 mbskip_table[mb_index] = 0;
2397 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2399 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2400 p->qscale_table, p->motion_val, &s->low_delay,
2401 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2404 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2406 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2407 int offset = 2*s->mb_stride + 1;
2409 return AVERROR(ENOMEM);
2410 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2411 ref->size -= offset;
2412 ref->data += offset;
2413 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2416 static inline int hpel_motion_lowres(MpegEncContext *s,
2417 uint8_t *dest, uint8_t *src,
2418 int field_based, int field_select,
2419 int src_x, int src_y,
2420 int width, int height, ptrdiff_t stride,
2421 int h_edge_pos, int v_edge_pos,
2422 int w, int h, h264_chroma_mc_func *pix_op,
2423 int motion_x, int motion_y)
2425 const int lowres = s->avctx->lowres;
2426 const int op_index = FFMIN(lowres, 3);
2427 const int s_mask = (2 << lowres) - 1;
2431 if (s->quarter_sample) {
2436 sx = motion_x & s_mask;
2437 sy = motion_y & s_mask;
2438 src_x += motion_x >> lowres + 1;
2439 src_y += motion_y >> lowres + 1;
2441 src += src_y * stride + src_x;
2443 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2444 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2445 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2446 s->linesize, s->linesize,
2447 w + 1, (h + 1) << field_based,
2448 src_x, src_y << field_based,
2449 h_edge_pos, v_edge_pos);
2450 src = s->edge_emu_buffer;
2454 sx = (sx << 2) >> lowres;
2455 sy = (sy << 2) >> lowres;
2458 pix_op[op_index](dest, src, stride, h, sx, sy);
2462 /* apply one mpeg motion vector to the three components */
2463 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2470 uint8_t **ref_picture,
2471 h264_chroma_mc_func *pix_op,
2472 int motion_x, int motion_y,
2475 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2476 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2477 ptrdiff_t uvlinesize, linesize;
2478 const int lowres = s->avctx->lowres;
2479 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2480 const int block_s = 8>>lowres;
2481 const int s_mask = (2 << lowres) - 1;
2482 const int h_edge_pos = s->h_edge_pos >> lowres;
2483 const int v_edge_pos = s->v_edge_pos >> lowres;
2484 linesize = s->current_picture.f->linesize[0] << field_based;
2485 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2487 // FIXME obviously not perfect but qpel will not work in lowres anyway
2488 if (s->quarter_sample) {
2494 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2497 sx = motion_x & s_mask;
2498 sy = motion_y & s_mask;
2499 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2500 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2502 if (s->out_format == FMT_H263) {
2503 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2504 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2505 uvsrc_x = src_x >> 1;
2506 uvsrc_y = src_y >> 1;
2507 } else if (s->out_format == FMT_H261) {
2508 // even chroma mv's are full pel in H261
2511 uvsx = (2 * mx) & s_mask;
2512 uvsy = (2 * my) & s_mask;
2513 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2514 uvsrc_y = mb_y * block_s + (my >> lowres);
2516 if(s->chroma_y_shift){
2521 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2522 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2524 if(s->chroma_x_shift){
2528 uvsy = motion_y & s_mask;
2530 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2533 uvsx = motion_x & s_mask;
2534 uvsy = motion_y & s_mask;
2541 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2542 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2543 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2545 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2546 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2547 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2548 linesize >> field_based, linesize >> field_based,
2549 17, 17 + field_based,
2550 src_x, src_y << field_based, h_edge_pos,
2552 ptr_y = s->edge_emu_buffer;
2553 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2554 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2555 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2556 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2557 uvlinesize >> field_based, uvlinesize >> field_based,
2559 uvsrc_x, uvsrc_y << field_based,
2560 h_edge_pos >> 1, v_edge_pos >> 1);
2561 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2562 uvlinesize >> field_based,uvlinesize >> field_based,
2564 uvsrc_x, uvsrc_y << field_based,
2565 h_edge_pos >> 1, v_edge_pos >> 1);
2571 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2573 dest_y += s->linesize;
2574 dest_cb += s->uvlinesize;
2575 dest_cr += s->uvlinesize;
2579 ptr_y += s->linesize;
2580 ptr_cb += s->uvlinesize;
2581 ptr_cr += s->uvlinesize;
2584 sx = (sx << 2) >> lowres;
2585 sy = (sy << 2) >> lowres;
2586 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2588 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2589 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2590 uvsx = (uvsx << 2) >> lowres;
2591 uvsy = (uvsy << 2) >> lowres;
2593 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2594 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2597 // FIXME h261 lowres loop filter
2600 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2601 uint8_t *dest_cb, uint8_t *dest_cr,
2602 uint8_t **ref_picture,
2603 h264_chroma_mc_func * pix_op,
2606 const int lowres = s->avctx->lowres;
2607 const int op_index = FFMIN(lowres, 3);
2608 const int block_s = 8 >> lowres;
2609 const int s_mask = (2 << lowres) - 1;
2610 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2611 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2612 int emu = 0, src_x, src_y, sx, sy;
2616 if (s->quarter_sample) {
2621 /* In case of 8X8, we construct a single chroma motion vector
2622 with a special rounding */
2623 mx = ff_h263_round_chroma(mx);
2624 my = ff_h263_round_chroma(my);
2628 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2629 src_y = s->mb_y * block_s + (my >> lowres + 1);
2631 offset = src_y * s->uvlinesize + src_x;
2632 ptr = ref_picture[1] + offset;
2633 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2634 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2635 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2636 s->uvlinesize, s->uvlinesize,
2638 src_x, src_y, h_edge_pos, v_edge_pos);
2639 ptr = s->edge_emu_buffer;
2642 sx = (sx << 2) >> lowres;
2643 sy = (sy << 2) >> lowres;
2644 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2646 ptr = ref_picture[2] + offset;
2648 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2649 s->uvlinesize, s->uvlinesize,
2651 src_x, src_y, h_edge_pos, v_edge_pos);
2652 ptr = s->edge_emu_buffer;
2654 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2658 * motion compensation of a single macroblock
2660 * @param dest_y luma destination pointer
2661 * @param dest_cb chroma cb/u destination pointer
2662 * @param dest_cr chroma cr/v destination pointer
2663 * @param dir direction (0->forward, 1->backward)
2664 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2665 * @param pix_op halfpel motion compensation function (average or put normally)
2666 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2668 static inline void MPV_motion_lowres(MpegEncContext *s,
2669 uint8_t *dest_y, uint8_t *dest_cb,
2671 int dir, uint8_t **ref_picture,
2672 h264_chroma_mc_func *pix_op)
2676 const int lowres = s->avctx->lowres;
2677 const int block_s = 8 >>lowres;
2682 switch (s->mv_type) {
2684 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2686 ref_picture, pix_op,
2687 s->mv[dir][0][0], s->mv[dir][0][1],
2693 for (i = 0; i < 4; i++) {
2694 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2695 s->linesize) * block_s,
2696 ref_picture[0], 0, 0,
2697 (2 * mb_x + (i & 1)) * block_s,
2698 (2 * mb_y + (i >> 1)) * block_s,
2699 s->width, s->height, s->linesize,
2700 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2701 block_s, block_s, pix_op,
2702 s->mv[dir][i][0], s->mv[dir][i][1]);
2704 mx += s->mv[dir][i][0];
2705 my += s->mv[dir][i][1];
2708 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2709 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2713 if (s->picture_structure == PICT_FRAME) {
2715 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2716 1, 0, s->field_select[dir][0],
2717 ref_picture, pix_op,
2718 s->mv[dir][0][0], s->mv[dir][0][1],
2721 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2722 1, 1, s->field_select[dir][1],
2723 ref_picture, pix_op,
2724 s->mv[dir][1][0], s->mv[dir][1][1],
2727 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2728 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2729 ref_picture = s->current_picture_ptr->f->data;
2732 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2733 0, 0, s->field_select[dir][0],
2734 ref_picture, pix_op,
2736 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2740 for (i = 0; i < 2; i++) {
2741 uint8_t **ref2picture;
2743 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2744 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2745 ref2picture = ref_picture;
2747 ref2picture = s->current_picture_ptr->f->data;
2750 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2751 0, 0, s->field_select[dir][i],
2752 ref2picture, pix_op,
2753 s->mv[dir][i][0], s->mv[dir][i][1] +
2754 2 * block_s * i, block_s, mb_y >> 1);
2756 dest_y += 2 * block_s * s->linesize;
2757 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2758 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2762 if (s->picture_structure == PICT_FRAME) {
2763 for (i = 0; i < 2; i++) {
2765 for (j = 0; j < 2; j++) {
2766 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2768 ref_picture, pix_op,
2769 s->mv[dir][2 * i + j][0],
2770 s->mv[dir][2 * i + j][1],
2773 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2776 for (i = 0; i < 2; i++) {
2777 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2778 0, 0, s->picture_structure != i + 1,
2779 ref_picture, pix_op,
2780 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2781 2 * block_s, mb_y >> 1);
2783 // after put we make avg of the same block
2784 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2786 // opposite parity is always in the same
2787 // frame if this is second field
2788 if (!s->first_field) {
2789 ref_picture = s->current_picture_ptr->f->data;
2800 * find the lowest MB row referenced in the MVs
2802 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2804 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2805 int my, off, i, mvs;
2807 if (s->picture_structure != PICT_FRAME || s->mcsel)
2810 switch (s->mv_type) {
2824 for (i = 0; i < mvs; i++) {
2825 my = s->mv[dir][i][1]<<qpel_shift;
2826 my_max = FFMAX(my_max, my);
2827 my_min = FFMIN(my_min, my);
2830 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2832 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2834 return s->mb_height-1;
2837 /* put block[] to dest[] */
2838 static inline void put_dct(MpegEncContext *s,
2839 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2841 s->dct_unquantize_intra(s, block, i, qscale);
2842 s->idsp.idct_put(dest, line_size, block);
2845 /* add block[] to dest[] */
2846 static inline void add_dct(MpegEncContext *s,
2847 int16_t *block, int i, uint8_t *dest, int line_size)
2849 if (s->block_last_index[i] >= 0) {
2850 s->idsp.idct_add(dest, line_size, block);
2854 static inline void add_dequant_dct(MpegEncContext *s,
2855 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2857 if (s->block_last_index[i] >= 0) {
2858 s->dct_unquantize_inter(s, block, i, qscale);
2860 s->idsp.idct_add(dest, line_size, block);
2865 * Clean dc, ac, coded_block for the current non-intra MB.
2867 void ff_clean_intra_table_entries(MpegEncContext *s)
2869 int wrap = s->b8_stride;
2870 int xy = s->block_index[0];
2873 s->dc_val[0][xy + 1 ] =
2874 s->dc_val[0][xy + wrap] =
2875 s->dc_val[0][xy + 1 + wrap] = 1024;
2877 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2878 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2879 if (s->msmpeg4_version>=3) {
2880 s->coded_block[xy ] =
2881 s->coded_block[xy + 1 ] =
2882 s->coded_block[xy + wrap] =
2883 s->coded_block[xy + 1 + wrap] = 0;
2886 wrap = s->mb_stride;
2887 xy = s->mb_x + s->mb_y * wrap;
2889 s->dc_val[2][xy] = 1024;
2891 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2892 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2894 s->mbintra_table[xy]= 0;
2897 /* generic function called after a macroblock has been parsed by the
2898 decoder or after it has been encoded by the encoder.
2900 Important variables used:
2901 s->mb_intra : true if intra macroblock
2902 s->mv_dir : motion vector direction
2903 s->mv_type : motion vector type
2904 s->mv : motion vector
2905 s->interlaced_dct : true if interlaced dct used (mpeg2)
2907 static av_always_inline
2908 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2909 int lowres_flag, int is_mpeg12)
2911 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2914 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2915 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2919 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2920 /* print DCT coefficients */
2922 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2924 for(j=0; j<64; j++){
2925 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2926 block[i][s->idsp.idct_permutation[j]]);
2928 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2932 s->current_picture.qscale_table[mb_xy] = s->qscale;
2934 /* update DC predictors for P macroblocks */
2936 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2937 if(s->mbintra_table[mb_xy])
2938 ff_clean_intra_table_entries(s);
2942 s->last_dc[2] = 128 << s->intra_dc_precision;
2945 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2946 s->mbintra_table[mb_xy]=1;
2948 if ( (s->flags&CODEC_FLAG_PSNR)
2949 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
2950 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2951 uint8_t *dest_y, *dest_cb, *dest_cr;
2952 int dct_linesize, dct_offset;
2953 op_pixels_func (*op_pix)[4];
2954 qpel_mc_func (*op_qpix)[16];
2955 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2956 const int uvlinesize = s->current_picture.f->linesize[1];
2957 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2958 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2960 /* avoid copy if macroblock skipped in last frame too */
2961 /* skip only during decoding as we might trash the buffers during encoding a bit */
2963 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2965 if (s->mb_skipped) {
2967 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2969 } else if(!s->current_picture.reference) {
2972 *mbskip_ptr = 0; /* not skipped */
2976 dct_linesize = linesize << s->interlaced_dct;
2977 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2981 dest_cb= s->dest[1];
2982 dest_cr= s->dest[2];
2984 dest_y = s->b_scratchpad;
2985 dest_cb= s->b_scratchpad+16*linesize;
2986 dest_cr= s->b_scratchpad+32*linesize;
2990 /* motion handling */
2991 /* decoding or more than one mb_type (MC was already done otherwise) */
2994 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2995 if (s->mv_dir & MV_DIR_FORWARD) {
2996 ff_thread_await_progress(&s->last_picture_ptr->tf,
2997 ff_MPV_lowest_referenced_row(s, 0),
3000 if (s->mv_dir & MV_DIR_BACKWARD) {
3001 ff_thread_await_progress(&s->next_picture_ptr->tf,
3002 ff_MPV_lowest_referenced_row(s, 1),
3008 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
3010 if (s->mv_dir & MV_DIR_FORWARD) {
3011 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3012 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
3014 if (s->mv_dir & MV_DIR_BACKWARD) {
3015 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3018 op_qpix = s->me.qpel_put;
3019 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3020 op_pix = s->hdsp.put_pixels_tab;
3022 op_pix = s->hdsp.put_no_rnd_pixels_tab;
3024 if (s->mv_dir & MV_DIR_FORWARD) {
3025 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3026 op_pix = s->hdsp.avg_pixels_tab;
3027 op_qpix= s->me.qpel_avg;
3029 if (s->mv_dir & MV_DIR_BACKWARD) {
3030 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3035 /* skip dequant / idct if we are really late ;) */
3036 if(s->avctx->skip_idct){
3037 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
3038 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
3039 || s->avctx->skip_idct >= AVDISCARD_ALL)
3043 /* add dct residue */
3044 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3045 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3046 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3047 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3048 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3049 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3051 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3052 if (s->chroma_y_shift){
3053 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3054 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3058 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3059 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3060 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3061 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3064 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3065 add_dct(s, block[0], 0, dest_y , dct_linesize);
3066 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3067 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3068 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3070 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3071 if(s->chroma_y_shift){//Chroma420
3072 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3073 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3076 dct_linesize = uvlinesize << s->interlaced_dct;
3077 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3079 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3080 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3081 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3082 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3083 if(!s->chroma_x_shift){//Chroma444
3084 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3085 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3086 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3087 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3092 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3093 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3096 /* dct only in intra block */
3097 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3098 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3099 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3100 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3101 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3103 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3104 if(s->chroma_y_shift){
3105 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3106 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3110 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3111 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3112 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3113 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3117 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3118 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3119 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3120 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3122 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3123 if(s->chroma_y_shift){
3124 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3125 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3128 dct_linesize = uvlinesize << s->interlaced_dct;
3129 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3131 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3132 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3133 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3134 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3135 if(!s->chroma_x_shift){//Chroma444
3136 s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3137 s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3138 s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3139 s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3147 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3148 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3149 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3154 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
3156 if(s->out_format == FMT_MPEG1) {
3157 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
3158 else MPV_decode_mb_internal(s, block, 0, 1);
3161 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
3162 else MPV_decode_mb_internal(s, block, 0, 0);
3165 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3167 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3168 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3169 s->first_field, s->low_delay);
3172 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3173 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3174 const int uvlinesize = s->current_picture.f->linesize[1];
3175 const int mb_size= 4 - s->avctx->lowres;
3177 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3178 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3179 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3180 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3181 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3182 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3183 //block_index is not used by mpeg2, so it is not affected by chroma_format
3185 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3186 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3187 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3189 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3191 if(s->picture_structure==PICT_FRAME){
3192 s->dest[0] += s->mb_y * linesize << mb_size;
3193 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3194 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3196 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3197 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3198 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3199 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3205 * Permute an 8x8 block.
3206 * @param block the block which will be permuted according to the given permutation vector
3207 * @param permutation the permutation vector
3208 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3209 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3210 * (inverse) permutated to scantable order!
3212 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3218 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3220 for(i=0; i<=last; i++){
3221 const int j= scantable[i];
3226 for(i=0; i<=last; i++){
3227 const int j= scantable[i];
3228 const int perm_j= permutation[j];
3229 block[perm_j]= temp[j];
3233 void ff_mpeg_flush(AVCodecContext *avctx){
3235 MpegEncContext *s = avctx->priv_data;
3237 if(s==NULL || s->picture==NULL)
3240 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3241 ff_mpeg_unref_picture(s, &s->picture[i]);
3242 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3244 ff_mpeg_unref_picture(s, &s->current_picture);
3245 ff_mpeg_unref_picture(s, &s->last_picture);
3246 ff_mpeg_unref_picture(s, &s->next_picture);
3248 s->mb_x= s->mb_y= 0;
3251 s->parse_context.state= -1;
3252 s->parse_context.frame_start_found= 0;
3253 s->parse_context.overread= 0;
3254 s->parse_context.overread_index= 0;
3255 s->parse_context.index= 0;
3256 s->parse_context.last_index= 0;
3257 s->bitstream_buffer_size=0;
3262 * set qscale and update qscale dependent variables.
3264 void ff_set_qscale(MpegEncContext * s, int qscale)
3268 else if (qscale > 31)
3272 s->chroma_qscale= s->chroma_qscale_table[qscale];
3274 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3275 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3278 void ff_MPV_report_decode_progress(MpegEncContext *s)
3280 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3281 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);