2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/motion_vector.h"
35 #include "libavutil/timer.h"
38 #include "h264chroma.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
50 static const uint8_t ff_default_chroma_qscale_table[32] = {
51 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
53 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
56 const uint8_t ff_mpeg1_dc_scale_table[128] = {
57 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
68 static const uint8_t mpeg2_dc_scale_table1[128] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
80 static const uint8_t mpeg2_dc_scale_table2[128] = {
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 static const uint8_t mpeg2_dc_scale_table3[128] = {
93 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
105 ff_mpeg1_dc_scale_table,
106 mpeg2_dc_scale_table1,
107 mpeg2_dc_scale_table2,
108 mpeg2_dc_scale_table3,
111 const uint8_t ff_alternate_horizontal_scan[64] = {
112 0, 1, 2, 3, 8, 9, 16, 17,
113 10, 11, 4, 5, 6, 7, 15, 14,
114 13, 12, 19, 18, 24, 25, 32, 33,
115 26, 27, 20, 21, 22, 23, 28, 29,
116 30, 31, 34, 35, 40, 41, 48, 49,
117 42, 43, 36, 37, 38, 39, 44, 45,
118 46, 47, 50, 51, 56, 57, 58, 59,
119 52, 53, 54, 55, 60, 61, 62, 63,
122 const uint8_t ff_alternate_vertical_scan[64] = {
123 0, 8, 16, 24, 1, 9, 2, 10,
124 17, 25, 32, 40, 48, 56, 57, 49,
125 41, 33, 26, 18, 3, 11, 4, 12,
126 19, 27, 34, 42, 50, 58, 35, 43,
127 51, 59, 20, 28, 5, 13, 6, 14,
128 21, 29, 36, 44, 52, 60, 37, 45,
129 53, 61, 22, 30, 7, 15, 23, 31,
130 38, 46, 54, 62, 39, 47, 55, 63,
133 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
134 int16_t *block, int n, int qscale)
136 int i, level, nCoeffs;
137 const uint16_t *quant_matrix;
139 nCoeffs= s->block_last_index[n];
141 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
142 /* XXX: only mpeg1 */
143 quant_matrix = s->intra_matrix;
144 for(i=1;i<=nCoeffs;i++) {
145 int j= s->intra_scantable.permutated[i];
150 level = (int)(level * qscale * quant_matrix[j]) >> 3;
151 level = (level - 1) | 1;
154 level = (int)(level * qscale * quant_matrix[j]) >> 3;
155 level = (level - 1) | 1;
162 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
163 int16_t *block, int n, int qscale)
165 int i, level, nCoeffs;
166 const uint16_t *quant_matrix;
168 nCoeffs= s->block_last_index[n];
170 quant_matrix = s->inter_matrix;
171 for(i=0; i<=nCoeffs; i++) {
172 int j= s->intra_scantable.permutated[i];
177 level = (((level << 1) + 1) * qscale *
178 ((int) (quant_matrix[j]))) >> 4;
179 level = (level - 1) | 1;
182 level = (((level << 1) + 1) * qscale *
183 ((int) (quant_matrix[j]))) >> 4;
184 level = (level - 1) | 1;
191 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
192 int16_t *block, int n, int qscale)
194 int i, level, nCoeffs;
195 const uint16_t *quant_matrix;
197 if(s->alternate_scan) nCoeffs= 63;
198 else nCoeffs= s->block_last_index[n];
200 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
201 quant_matrix = s->intra_matrix;
202 for(i=1;i<=nCoeffs;i++) {
203 int j= s->intra_scantable.permutated[i];
208 level = (int)(level * qscale * quant_matrix[j]) >> 3;
211 level = (int)(level * qscale * quant_matrix[j]) >> 3;
218 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
219 int16_t *block, int n, int qscale)
221 int i, level, nCoeffs;
222 const uint16_t *quant_matrix;
225 if(s->alternate_scan) nCoeffs= 63;
226 else nCoeffs= s->block_last_index[n];
228 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
230 quant_matrix = s->intra_matrix;
231 for(i=1;i<=nCoeffs;i++) {
232 int j= s->intra_scantable.permutated[i];
237 level = (int)(level * qscale * quant_matrix[j]) >> 3;
240 level = (int)(level * qscale * quant_matrix[j]) >> 3;
249 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
250 int16_t *block, int n, int qscale)
252 int i, level, nCoeffs;
253 const uint16_t *quant_matrix;
256 if(s->alternate_scan) nCoeffs= 63;
257 else nCoeffs= s->block_last_index[n];
259 quant_matrix = s->inter_matrix;
260 for(i=0; i<=nCoeffs; i++) {
261 int j= s->intra_scantable.permutated[i];
266 level = (((level << 1) + 1) * qscale *
267 ((int) (quant_matrix[j]))) >> 4;
270 level = (((level << 1) + 1) * qscale *
271 ((int) (quant_matrix[j]))) >> 4;
280 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
281 int16_t *block, int n, int qscale)
283 int i, level, qmul, qadd;
286 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
291 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
292 qadd = (qscale - 1) | 1;
299 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
301 for(i=1; i<=nCoeffs; i++) {
305 level = level * qmul - qadd;
307 level = level * qmul + qadd;
314 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
315 int16_t *block, int n, int qscale)
317 int i, level, qmul, qadd;
320 av_assert2(s->block_last_index[n]>=0);
322 qadd = (qscale - 1) | 1;
325 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
327 for(i=0; i<=nCoeffs; i++) {
331 level = level * qmul - qadd;
333 level = level * qmul + qadd;
340 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
342 int mb_x, int mb_y, int mb_intra, int mb_skipped)
344 MpegEncContext *s = opaque;
347 s->mv_type = mv_type;
348 s->mb_intra = mb_intra;
349 s->mb_skipped = mb_skipped;
352 memcpy(s->mv, mv, sizeof(*mv));
354 ff_init_block_index(s);
355 ff_update_block_index(s);
357 s->bdsp.clear_blocks(s->block[0]);
359 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
360 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
364 av_log(s->avctx, AV_LOG_DEBUG,
365 "Interlaced error concealment is not fully implemented\n");
366 ff_mpv_decode_mb(s, s->block);
369 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
372 memset(dst + h*linesize, 128, 16);
375 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
378 memset(dst + h*linesize, 128, 8);
381 /* init common dct for both encoder and decoder */
382 static av_cold int dct_init(MpegEncContext *s)
384 ff_blockdsp_init(&s->bdsp, s->avctx);
385 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
386 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
387 ff_mpegvideodsp_init(&s->mdsp);
388 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
390 if (s->avctx->debug & FF_DEBUG_NOMC) {
392 for (i=0; i<4; i++) {
393 s->hdsp.avg_pixels_tab[0][i] = gray16;
394 s->hdsp.put_pixels_tab[0][i] = gray16;
395 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
397 s->hdsp.avg_pixels_tab[1][i] = gray8;
398 s->hdsp.put_pixels_tab[1][i] = gray8;
399 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
403 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
404 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
405 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
406 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
407 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
408 if (s->avctx->flags & CODEC_FLAG_BITEXACT)
409 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
410 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
412 if (HAVE_INTRINSICS_NEON)
413 ff_mpv_common_init_neon(s);
416 ff_mpv_common_init_axp(s);
418 ff_mpv_common_init_arm(s);
420 ff_mpv_common_init_ppc(s);
422 ff_mpv_common_init_x86(s);
427 av_cold void ff_mpv_idct_init(MpegEncContext *s)
429 ff_idctdsp_init(&s->idsp, s->avctx);
431 /* load & permutate scantables
432 * note: only wmv uses different ones
434 if (s->alternate_scan) {
435 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
436 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
438 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
439 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
441 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
442 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
445 static int frame_size_alloc(MpegEncContext *s, int linesize)
447 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
449 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
453 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
454 return AVERROR_PATCHWELCOME;
457 // edge emu needs blocksize + filter length - 1
458 // (= 17x17 for halfpel / 21x21 for h264)
459 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
460 // at uvlinesize. It supports only YUV420 so 24x24 is enough
461 // linesize * interlaced * MBsize
462 // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
463 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size, 4 * 68,
466 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size, 4 * 16 * 2,
468 s->me.temp = s->me.scratchpad;
469 s->rd_scratchpad = s->me.scratchpad;
470 s->b_scratchpad = s->me.scratchpad;
471 s->obmc_scratchpad = s->me.scratchpad + 16;
475 av_freep(&s->edge_emu_buffer);
476 return AVERROR(ENOMEM);
480 * Allocate a frame buffer
482 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
484 int edges_needed = av_codec_is_encoder(s->avctx->codec);
488 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
489 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
490 s->codec_id != AV_CODEC_ID_MSS2) {
492 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
493 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
496 r = ff_thread_get_buffer(s->avctx, &pic->tf,
497 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
499 pic->f->width = s->avctx->width;
500 pic->f->height = s->avctx->height;
501 pic->f->format = s->avctx->pix_fmt;
502 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
505 if (r < 0 || !pic->f->buf[0]) {
506 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
513 for (i = 0; pic->f->data[i]; i++) {
514 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
515 pic->f->linesize[i] +
516 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
517 pic->f->data[i] += offset;
519 pic->f->width = s->avctx->width;
520 pic->f->height = s->avctx->height;
523 if (s->avctx->hwaccel) {
524 assert(!pic->hwaccel_picture_private);
525 if (s->avctx->hwaccel->frame_priv_data_size) {
526 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
527 if (!pic->hwaccel_priv_buf) {
528 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
531 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
535 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
536 s->uvlinesize != pic->f->linesize[1])) {
537 av_log(s->avctx, AV_LOG_ERROR,
538 "get_buffer() failed (stride changed)\n");
539 ff_mpeg_unref_picture(s->avctx, pic);
543 if (pic->f->linesize[1] != pic->f->linesize[2]) {
544 av_log(s->avctx, AV_LOG_ERROR,
545 "get_buffer() failed (uv stride mismatch)\n");
546 ff_mpeg_unref_picture(s->avctx, pic);
550 if (!s->edge_emu_buffer &&
551 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
552 av_log(s->avctx, AV_LOG_ERROR,
553 "get_buffer() failed to allocate context scratch buffers.\n");
554 ff_mpeg_unref_picture(s->avctx, pic);
561 void ff_free_picture_tables(Picture *pic)
565 pic->alloc_mb_width =
566 pic->alloc_mb_height = 0;
568 av_buffer_unref(&pic->mb_var_buf);
569 av_buffer_unref(&pic->mc_mb_var_buf);
570 av_buffer_unref(&pic->mb_mean_buf);
571 av_buffer_unref(&pic->mbskip_table_buf);
572 av_buffer_unref(&pic->qscale_table_buf);
573 av_buffer_unref(&pic->mb_type_buf);
575 for (i = 0; i < 2; i++) {
576 av_buffer_unref(&pic->motion_val_buf[i]);
577 av_buffer_unref(&pic->ref_index_buf[i]);
581 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
583 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
584 const int mb_array_size = s->mb_stride * s->mb_height;
585 const int b8_array_size = s->b8_stride * s->mb_height * 2;
589 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
590 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
591 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
593 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
594 return AVERROR(ENOMEM);
597 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
598 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
599 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
600 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
601 return AVERROR(ENOMEM);
604 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv ||
605 (s->avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) {
606 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
607 int ref_index_size = 4 * mb_array_size;
609 for (i = 0; mv_size && i < 2; i++) {
610 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
611 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
612 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
613 return AVERROR(ENOMEM);
617 pic->alloc_mb_width = s->mb_width;
618 pic->alloc_mb_height = s->mb_height;
623 static int make_tables_writable(Picture *pic)
626 #define MAKE_WRITABLE(table) \
629 (ret = av_buffer_make_writable(&pic->table)) < 0)\
633 MAKE_WRITABLE(mb_var_buf);
634 MAKE_WRITABLE(mc_mb_var_buf);
635 MAKE_WRITABLE(mb_mean_buf);
636 MAKE_WRITABLE(mbskip_table_buf);
637 MAKE_WRITABLE(qscale_table_buf);
638 MAKE_WRITABLE(mb_type_buf);
640 for (i = 0; i < 2; i++) {
641 MAKE_WRITABLE(motion_val_buf[i]);
642 MAKE_WRITABLE(ref_index_buf[i]);
649 * Allocate a Picture.
650 * The pixels are allocated/set by calling get_buffer() if shared = 0
652 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
656 if (pic->qscale_table_buf)
657 if ( pic->alloc_mb_width != s->mb_width
658 || pic->alloc_mb_height != s->mb_height)
659 ff_free_picture_tables(pic);
662 av_assert0(pic->f->data[0]);
665 av_assert0(!pic->f->buf[0]);
667 if (alloc_frame_buffer(s, pic) < 0)
670 s->linesize = pic->f->linesize[0];
671 s->uvlinesize = pic->f->linesize[1];
674 if (!pic->qscale_table_buf)
675 ret = alloc_picture_tables(s, pic);
677 ret = make_tables_writable(pic);
682 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
683 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
684 pic->mb_mean = pic->mb_mean_buf->data;
687 pic->mbskip_table = pic->mbskip_table_buf->data;
688 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
689 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
691 if (pic->motion_val_buf[0]) {
692 for (i = 0; i < 2; i++) {
693 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
694 pic->ref_index[i] = pic->ref_index_buf[i]->data;
700 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
701 ff_mpeg_unref_picture(s->avctx, pic);
702 ff_free_picture_tables(pic);
703 return AVERROR(ENOMEM);
707 * Deallocate a picture.
709 void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
711 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
714 /* WM Image / Screen codecs allocate internal buffers with different
715 * dimensions / colorspaces; ignore user-defined callbacks for these. */
716 if (avctx->codec->id != AV_CODEC_ID_WMV3IMAGE &&
717 avctx->codec->id != AV_CODEC_ID_VC1IMAGE &&
718 avctx->codec->id != AV_CODEC_ID_MSS2)
719 ff_thread_release_buffer(avctx, &pic->tf);
721 av_frame_unref(pic->f);
723 av_buffer_unref(&pic->hwaccel_priv_buf);
725 if (pic->needs_realloc)
726 ff_free_picture_tables(pic);
728 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
731 static int update_picture_tables(Picture *dst, Picture *src)
735 #define UPDATE_TABLE(table)\
738 (!dst->table || dst->table->buffer != src->table->buffer)) {\
739 av_buffer_unref(&dst->table);\
740 dst->table = av_buffer_ref(src->table);\
742 ff_free_picture_tables(dst);\
743 return AVERROR(ENOMEM);\
748 UPDATE_TABLE(mb_var_buf);
749 UPDATE_TABLE(mc_mb_var_buf);
750 UPDATE_TABLE(mb_mean_buf);
751 UPDATE_TABLE(mbskip_table_buf);
752 UPDATE_TABLE(qscale_table_buf);
753 UPDATE_TABLE(mb_type_buf);
754 for (i = 0; i < 2; i++) {
755 UPDATE_TABLE(motion_val_buf[i]);
756 UPDATE_TABLE(ref_index_buf[i]);
759 dst->mb_var = src->mb_var;
760 dst->mc_mb_var = src->mc_mb_var;
761 dst->mb_mean = src->mb_mean;
762 dst->mbskip_table = src->mbskip_table;
763 dst->qscale_table = src->qscale_table;
764 dst->mb_type = src->mb_type;
765 for (i = 0; i < 2; i++) {
766 dst->motion_val[i] = src->motion_val[i];
767 dst->ref_index[i] = src->ref_index[i];
770 dst->alloc_mb_width = src->alloc_mb_width;
771 dst->alloc_mb_height = src->alloc_mb_height;
776 int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
780 av_assert0(!dst->f->buf[0]);
781 av_assert0(src->f->buf[0]);
785 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
789 ret = update_picture_tables(dst, src);
793 if (src->hwaccel_picture_private) {
794 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
795 if (!dst->hwaccel_priv_buf)
797 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
800 dst->field_picture = src->field_picture;
801 dst->mb_var_sum = src->mb_var_sum;
802 dst->mc_mb_var_sum = src->mc_mb_var_sum;
803 dst->b_frame_score = src->b_frame_score;
804 dst->needs_realloc = src->needs_realloc;
805 dst->reference = src->reference;
806 dst->shared = src->shared;
810 ff_mpeg_unref_picture(avctx, dst);
814 static int init_duplicate_context(MpegEncContext *s)
816 int y_size = s->b8_stride * (2 * s->mb_height + 1);
817 int c_size = s->mb_stride * (s->mb_height + 1);
818 int yc_size = y_size + 2 * c_size;
821 if (s->mb_height & 1)
822 yc_size += 2*s->b8_stride + 2*s->mb_stride;
829 s->obmc_scratchpad = NULL;
832 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
833 ME_MAP_SIZE * sizeof(uint32_t), fail)
834 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
835 ME_MAP_SIZE * sizeof(uint32_t), fail)
836 if (s->avctx->noise_reduction) {
837 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
838 2 * 64 * sizeof(int), fail)
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
842 s->block = s->blocks[0];
844 for (i = 0; i < 12; i++) {
845 s->pblocks[i] = &s->block[i];
847 if (s->avctx->codec_tag == AV_RL32("VCR2")) {
849 FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
852 if (s->out_format == FMT_H263) {
854 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
855 yc_size * sizeof(int16_t) * 16, fail);
856 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
857 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
858 s->ac_val[2] = s->ac_val[1] + c_size;
863 return -1; // free() through ff_mpv_common_end()
866 static void free_duplicate_context(MpegEncContext *s)
871 av_freep(&s->edge_emu_buffer);
872 av_freep(&s->me.scratchpad);
876 s->obmc_scratchpad = NULL;
878 av_freep(&s->dct_error_sum);
879 av_freep(&s->me.map);
880 av_freep(&s->me.score_map);
881 av_freep(&s->blocks);
882 av_freep(&s->ac_val_base);
886 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
888 #define COPY(a) bak->a = src->a
889 COPY(edge_emu_buffer);
894 COPY(obmc_scratchpad);
901 COPY(me.map_generation);
913 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
917 // FIXME copy only needed parts
919 backup_duplicate_context(&bak, dst);
920 memcpy(dst, src, sizeof(MpegEncContext));
921 backup_duplicate_context(dst, &bak);
922 for (i = 0; i < 12; i++) {
923 dst->pblocks[i] = &dst->block[i];
925 if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
927 FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
929 if (!dst->edge_emu_buffer &&
930 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
931 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
932 "scratch buffers.\n");
935 // STOP_TIMER("update_duplicate_context")
936 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
940 int ff_mpeg_update_thread_context(AVCodecContext *dst,
941 const AVCodecContext *src)
944 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
951 // FIXME can parameters change on I-frames?
952 // in that case dst may need a reinit
953 if (!s->context_initialized) {
955 memcpy(s, s1, sizeof(MpegEncContext));
958 s->bitstream_buffer = NULL;
959 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
961 if (s1->context_initialized){
962 // s->picture_range_start += MAX_PICTURE_COUNT;
963 // s->picture_range_end += MAX_PICTURE_COUNT;
965 if((err = ff_mpv_common_init(s)) < 0){
966 memset(s, 0, sizeof(MpegEncContext));
973 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
974 s->context_reinit = 0;
975 s->height = s1->height;
976 s->width = s1->width;
977 if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
981 s->avctx->coded_height = s1->avctx->coded_height;
982 s->avctx->coded_width = s1->avctx->coded_width;
983 s->avctx->width = s1->avctx->width;
984 s->avctx->height = s1->avctx->height;
986 s->coded_picture_number = s1->coded_picture_number;
987 s->picture_number = s1->picture_number;
989 av_assert0(!s->picture || s->picture != s1->picture);
991 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
992 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
993 if (s1->picture[i].f->buf[0] &&
994 (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
998 #define UPDATE_PICTURE(pic)\
1000 ff_mpeg_unref_picture(s->avctx, &s->pic);\
1001 if (s1->pic.f && s1->pic.f->buf[0])\
1002 ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
1004 ret = update_picture_tables(&s->pic, &s1->pic);\
1009 UPDATE_PICTURE(current_picture);
1010 UPDATE_PICTURE(last_picture);
1011 UPDATE_PICTURE(next_picture);
1013 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
1014 ((pic && pic >= old_ctx->picture && \
1015 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
1016 &new_ctx->picture[pic - old_ctx->picture] : NULL)
1018 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1019 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1020 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1022 // Error/bug resilience
1023 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1024 s->workaround_bugs = s1->workaround_bugs;
1025 s->padding_bug_score = s1->padding_bug_score;
1027 // MPEG4 timing info
1028 memcpy(&s->last_time_base, &s1->last_time_base,
1029 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1030 (char *) &s1->last_time_base);
1033 s->max_b_frames = s1->max_b_frames;
1034 s->low_delay = s1->low_delay;
1035 s->droppable = s1->droppable;
1037 // DivX handling (doesn't work)
1038 s->divx_packed = s1->divx_packed;
1040 if (s1->bitstream_buffer) {
1041 if (s1->bitstream_buffer_size +
1042 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) {
1043 av_fast_malloc(&s->bitstream_buffer,
1044 &s->allocated_bitstream_buffer_size,
1045 s1->allocated_bitstream_buffer_size);
1046 if (!s->bitstream_buffer) {
1047 s->bitstream_buffer_size = 0;
1048 return AVERROR(ENOMEM);
1051 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1052 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1053 s1->bitstream_buffer_size);
1054 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1055 FF_INPUT_BUFFER_PADDING_SIZE);
1058 // linesize dependend scratch buffer allocation
1059 if (!s->edge_emu_buffer)
1061 if (frame_size_alloc(s, s1->linesize) < 0) {
1062 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1063 "scratch buffers.\n");
1064 return AVERROR(ENOMEM);
1067 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1068 "be allocated due to unknown size.\n");
1071 // MPEG2/interlacing info
1072 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1073 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1075 if (!s1->first_field) {
1076 s->last_pict_type = s1->pict_type;
1077 if (s1->current_picture_ptr)
1078 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1085 * Set the given MpegEncContext to common defaults
1086 * (same for encoding and decoding).
1087 * The changed fields will not depend upon the
1088 * prior state of the MpegEncContext.
1090 void ff_mpv_common_defaults(MpegEncContext *s)
1092 s->y_dc_scale_table =
1093 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1094 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1095 s->progressive_frame = 1;
1096 s->progressive_sequence = 1;
1097 s->picture_structure = PICT_FRAME;
1099 s->coded_picture_number = 0;
1100 s->picture_number = 0;
1105 s->slice_context_count = 1;
1109 * Set the given MpegEncContext to defaults for decoding.
1110 * the changed fields will not depend upon
1111 * the prior state of the MpegEncContext.
1113 void ff_mpv_decode_defaults(MpegEncContext *s)
1115 ff_mpv_common_defaults(s);
1118 void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
1121 s->width = avctx->coded_width;
1122 s->height = avctx->coded_height;
1123 s->codec_id = avctx->codec->id;
1124 s->workaround_bugs = avctx->workaround_bugs;
1126 /* convert fourcc to upper case */
1127 s->codec_tag = avpriv_toupper4(avctx->codec_tag);
1130 static int init_er(MpegEncContext *s)
1132 ERContext *er = &s->er;
1133 int mb_array_size = s->mb_height * s->mb_stride;
1136 er->avctx = s->avctx;
1138 er->mb_index2xy = s->mb_index2xy;
1139 er->mb_num = s->mb_num;
1140 er->mb_width = s->mb_width;
1141 er->mb_height = s->mb_height;
1142 er->mb_stride = s->mb_stride;
1143 er->b8_stride = s->b8_stride;
1145 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1146 er->error_status_table = av_mallocz(mb_array_size);
1147 if (!er->er_temp_buffer || !er->error_status_table)
1150 er->mbskip_table = s->mbskip_table;
1151 er->mbintra_table = s->mbintra_table;
1153 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1154 er->dc_val[i] = s->dc_val[i];
1156 er->decode_mb = mpeg_er_decode_mb;
1161 av_freep(&er->er_temp_buffer);
1162 av_freep(&er->error_status_table);
1163 return AVERROR(ENOMEM);
1167 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1169 static int init_context_frame(MpegEncContext *s)
1171 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1173 s->mb_width = (s->width + 15) / 16;
1174 s->mb_stride = s->mb_width + 1;
1175 s->b8_stride = s->mb_width * 2 + 1;
1176 mb_array_size = s->mb_height * s->mb_stride;
1177 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1179 /* set default edge pos, will be overridden
1180 * in decode_header if needed */
1181 s->h_edge_pos = s->mb_width * 16;
1182 s->v_edge_pos = s->mb_height * 16;
1184 s->mb_num = s->mb_width * s->mb_height;
1189 s->block_wrap[3] = s->b8_stride;
1191 s->block_wrap[5] = s->mb_stride;
1193 y_size = s->b8_stride * (2 * s->mb_height + 1);
1194 c_size = s->mb_stride * (s->mb_height + 1);
1195 yc_size = y_size + 2 * c_size;
1197 if (s->mb_height & 1)
1198 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1200 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1201 for (y = 0; y < s->mb_height; y++)
1202 for (x = 0; x < s->mb_width; x++)
1203 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1205 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1208 /* Allocate MV tables */
1209 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1210 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1211 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1212 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1213 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1214 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1215 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1216 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1217 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1218 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1219 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1220 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1222 /* Allocate MB type table */
1223 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1225 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1227 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1228 mb_array_size * sizeof(float), fail);
1229 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1230 mb_array_size * sizeof(float), fail);
1234 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1235 (s->avctx->flags & CODEC_FLAG_INTERLACED_ME)) {
1236 /* interlaced direct mode decoding tables */
1237 for (i = 0; i < 2; i++) {
1239 for (j = 0; j < 2; j++) {
1240 for (k = 0; k < 2; k++) {
1241 FF_ALLOCZ_OR_GOTO(s->avctx,
1242 s->b_field_mv_table_base[i][j][k],
1243 mv_table_size * 2 * sizeof(int16_t),
1245 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1248 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1249 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1250 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1252 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1255 if (s->out_format == FMT_H263) {
1257 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1258 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1260 /* cbp, ac_pred, pred_dir */
1261 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1262 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1265 if (s->h263_pred || s->h263_plus || !s->encoding) {
1267 // MN: we need these for error resilience of intra-frames
1268 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1269 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1270 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1271 s->dc_val[2] = s->dc_val[1] + c_size;
1272 for (i = 0; i < yc_size; i++)
1273 s->dc_val_base[i] = 1024;
1276 /* which mb is a intra block */
1277 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1278 memset(s->mbintra_table, 1, mb_array_size);
1280 /* init macroblock skip table */
1281 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1282 // Note the + 1 is for a quicker mpeg4 slice_end detection
1286 return AVERROR(ENOMEM);
1290 * init common structure for both encoder and decoder.
1291 * this assumes that some variables like width/height are already set
1293 av_cold int ff_mpv_common_init(MpegEncContext *s)
1296 int nb_slices = (HAVE_THREADS &&
1297 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1298 s->avctx->thread_count : 1;
1300 if (s->encoding && s->avctx->slices)
1301 nb_slices = s->avctx->slices;
1303 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1304 s->mb_height = (s->height + 31) / 32 * 2;
1306 s->mb_height = (s->height + 15) / 16;
1308 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1309 av_log(s->avctx, AV_LOG_ERROR,
1310 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1314 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1317 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1319 max_slices = MAX_THREADS;
1320 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1321 " reducing to %d\n", nb_slices, max_slices);
1322 nb_slices = max_slices;
1325 if ((s->width || s->height) &&
1326 av_image_check_size(s->width, s->height, 0, s->avctx))
1331 /* set chroma shifts */
1332 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1334 &s->chroma_y_shift);
1337 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1338 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1339 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1340 s->picture[i].f = av_frame_alloc();
1341 if (!s->picture[i].f)
1344 memset(&s->next_picture, 0, sizeof(s->next_picture));
1345 memset(&s->last_picture, 0, sizeof(s->last_picture));
1346 memset(&s->current_picture, 0, sizeof(s->current_picture));
1347 memset(&s->new_picture, 0, sizeof(s->new_picture));
1348 s->next_picture.f = av_frame_alloc();
1349 if (!s->next_picture.f)
1351 s->last_picture.f = av_frame_alloc();
1352 if (!s->last_picture.f)
1354 s->current_picture.f = av_frame_alloc();
1355 if (!s->current_picture.f)
1357 s->new_picture.f = av_frame_alloc();
1358 if (!s->new_picture.f)
1361 if (init_context_frame(s))
1364 s->parse_context.state = -1;
1366 s->context_initialized = 1;
1367 s->thread_context[0] = s;
1369 // if (s->width && s->height) {
1370 if (nb_slices > 1) {
1371 for (i = 1; i < nb_slices; i++) {
1372 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1373 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1376 for (i = 0; i < nb_slices; i++) {
1377 if (init_duplicate_context(s->thread_context[i]) < 0)
1379 s->thread_context[i]->start_mb_y =
1380 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1381 s->thread_context[i]->end_mb_y =
1382 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1385 if (init_duplicate_context(s) < 0)
1388 s->end_mb_y = s->mb_height;
1390 s->slice_context_count = nb_slices;
1395 ff_mpv_common_end(s);
1400 * Frees and resets MpegEncContext fields depending on the resolution.
1401 * Is used during resolution changes to avoid a full reinitialization of the
1404 static void free_context_frame(MpegEncContext *s)
1408 av_freep(&s->mb_type);
1409 av_freep(&s->p_mv_table_base);
1410 av_freep(&s->b_forw_mv_table_base);
1411 av_freep(&s->b_back_mv_table_base);
1412 av_freep(&s->b_bidir_forw_mv_table_base);
1413 av_freep(&s->b_bidir_back_mv_table_base);
1414 av_freep(&s->b_direct_mv_table_base);
1415 s->p_mv_table = NULL;
1416 s->b_forw_mv_table = NULL;
1417 s->b_back_mv_table = NULL;
1418 s->b_bidir_forw_mv_table = NULL;
1419 s->b_bidir_back_mv_table = NULL;
1420 s->b_direct_mv_table = NULL;
1421 for (i = 0; i < 2; i++) {
1422 for (j = 0; j < 2; j++) {
1423 for (k = 0; k < 2; k++) {
1424 av_freep(&s->b_field_mv_table_base[i][j][k]);
1425 s->b_field_mv_table[i][j][k] = NULL;
1427 av_freep(&s->b_field_select_table[i][j]);
1428 av_freep(&s->p_field_mv_table_base[i][j]);
1429 s->p_field_mv_table[i][j] = NULL;
1431 av_freep(&s->p_field_select_table[i]);
1434 av_freep(&s->dc_val_base);
1435 av_freep(&s->coded_block_base);
1436 av_freep(&s->mbintra_table);
1437 av_freep(&s->cbp_table);
1438 av_freep(&s->pred_dir_table);
1440 av_freep(&s->mbskip_table);
1442 av_freep(&s->er.error_status_table);
1443 av_freep(&s->er.er_temp_buffer);
1444 av_freep(&s->mb_index2xy);
1445 av_freep(&s->lambda_table);
1447 av_freep(&s->cplx_tab);
1448 av_freep(&s->bits_tab);
1450 s->linesize = s->uvlinesize = 0;
1453 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1457 if (!s->context_initialized)
1458 return AVERROR(EINVAL);
1460 if (s->slice_context_count > 1) {
1461 for (i = 0; i < s->slice_context_count; i++) {
1462 free_duplicate_context(s->thread_context[i]);
1464 for (i = 1; i < s->slice_context_count; i++) {
1465 av_freep(&s->thread_context[i]);
1468 free_duplicate_context(s);
1470 free_context_frame(s);
1473 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1474 s->picture[i].needs_realloc = 1;
1477 s->last_picture_ptr =
1478 s->next_picture_ptr =
1479 s->current_picture_ptr = NULL;
1482 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1483 s->mb_height = (s->height + 31) / 32 * 2;
1485 s->mb_height = (s->height + 15) / 16;
1487 if ((s->width || s->height) &&
1488 (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1491 if ((err = init_context_frame(s)))
1494 s->thread_context[0] = s;
1496 if (s->width && s->height) {
1497 int nb_slices = s->slice_context_count;
1498 if (nb_slices > 1) {
1499 for (i = 1; i < nb_slices; i++) {
1500 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1501 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1504 for (i = 0; i < nb_slices; i++) {
1505 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1507 s->thread_context[i]->start_mb_y =
1508 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1509 s->thread_context[i]->end_mb_y =
1510 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1513 err = init_duplicate_context(s);
1517 s->end_mb_y = s->mb_height;
1519 s->slice_context_count = nb_slices;
1524 ff_mpv_common_end(s);
1528 /* init common structure for both encoder and decoder */
1529 void ff_mpv_common_end(MpegEncContext *s)
1533 if (s->slice_context_count > 1) {
1534 for (i = 0; i < s->slice_context_count; i++) {
1535 free_duplicate_context(s->thread_context[i]);
1537 for (i = 1; i < s->slice_context_count; i++) {
1538 av_freep(&s->thread_context[i]);
1540 s->slice_context_count = 1;
1541 } else free_duplicate_context(s);
1543 av_freep(&s->parse_context.buffer);
1544 s->parse_context.buffer_size = 0;
1546 av_freep(&s->bitstream_buffer);
1547 s->allocated_bitstream_buffer_size = 0;
1550 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1551 ff_free_picture_tables(&s->picture[i]);
1552 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1553 av_frame_free(&s->picture[i].f);
1556 av_freep(&s->picture);
1557 ff_free_picture_tables(&s->last_picture);
1558 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1559 av_frame_free(&s->last_picture.f);
1560 ff_free_picture_tables(&s->current_picture);
1561 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1562 av_frame_free(&s->current_picture.f);
1563 ff_free_picture_tables(&s->next_picture);
1564 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1565 av_frame_free(&s->next_picture.f);
1566 ff_free_picture_tables(&s->new_picture);
1567 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1568 av_frame_free(&s->new_picture.f);
1570 free_context_frame(s);
1572 s->context_initialized = 0;
1573 s->last_picture_ptr =
1574 s->next_picture_ptr =
1575 s->current_picture_ptr = NULL;
1576 s->linesize = s->uvlinesize = 0;
1579 static void release_unused_pictures(AVCodecContext *avctx, Picture *picture)
1583 /* release non reference frames */
1584 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1585 if (!picture[i].reference)
1586 ff_mpeg_unref_picture(avctx, &picture[i]);
1590 static inline int pic_is_unused(Picture *pic)
1592 if (!pic->f->buf[0])
1594 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1599 static int find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
1604 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1605 if (!picture[i].f->buf[0])
1609 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1610 if (pic_is_unused(&picture[i]))
1615 av_log(avctx, AV_LOG_FATAL,
1616 "Internal error, picture buffer overflow\n");
1617 /* We could return -1, but the codec would crash trying to draw into a
1618 * non-existing frame anyway. This is safer than waiting for a random crash.
1619 * Also the return of this is never useful, an encoder must only allocate
1620 * as much as allowed in the specification. This has no relationship to how
1621 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1622 * enough for such valid streams).
1623 * Plus, a decoder has to check stream validity and remove frames if too
1624 * many reference frames are around. Waiting for "OOM" is not correct at
1625 * all. Similarly, missing reference frames have to be replaced by
1626 * interpolated/MC frames, anything else is a bug in the codec ...
1632 int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
1634 int ret = find_unused_picture(avctx, picture, shared);
1636 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1637 if (picture[ret].needs_realloc) {
1638 picture[ret].needs_realloc = 0;
1639 ff_free_picture_tables(&picture[ret]);
1640 ff_mpeg_unref_picture(avctx, &picture[ret]);
1646 static void gray_frame(AVFrame *frame)
1648 int i, h_chroma_shift, v_chroma_shift;
1650 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1652 for(i=0; i<frame->height; i++)
1653 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1654 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1655 memset(frame->data[1] + frame->linesize[1]*i,
1656 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1657 memset(frame->data[2] + frame->linesize[2]*i,
1658 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1663 * generic function called after decoding
1664 * the header and before a frame is decoded.
1666 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1672 if (!ff_thread_can_start_frame(avctx)) {
1673 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1677 /* mark & release old frames */
1678 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1679 s->last_picture_ptr != s->next_picture_ptr &&
1680 s->last_picture_ptr->f->buf[0]) {
1681 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1684 /* release forgotten pictures */
1685 /* if (mpeg124/h263) */
1686 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1687 if (&s->picture[i] != s->last_picture_ptr &&
1688 &s->picture[i] != s->next_picture_ptr &&
1689 s->picture[i].reference && !s->picture[i].needs_realloc) {
1690 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1691 av_log(avctx, AV_LOG_ERROR,
1692 "releasing zombie picture\n");
1693 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1697 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1699 release_unused_pictures(s->avctx, s->picture);
1701 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1702 // we already have a unused image
1703 // (maybe it was set before reading the header)
1704 pic = s->current_picture_ptr;
1706 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1708 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1711 pic = &s->picture[i];
1715 if (!s->droppable) {
1716 if (s->pict_type != AV_PICTURE_TYPE_B)
1720 pic->f->coded_picture_number = s->coded_picture_number++;
1722 if (ff_alloc_picture(s, pic, 0) < 0)
1725 s->current_picture_ptr = pic;
1726 // FIXME use only the vars from current_pic
1727 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1728 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1729 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1730 if (s->picture_structure != PICT_FRAME)
1731 s->current_picture_ptr->f->top_field_first =
1732 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1734 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1735 !s->progressive_sequence;
1736 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1738 s->current_picture_ptr->f->pict_type = s->pict_type;
1739 // if (s->avctx->flags && CODEC_FLAG_QSCALE)
1740 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1741 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1743 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1744 s->current_picture_ptr)) < 0)
1747 if (s->pict_type != AV_PICTURE_TYPE_B) {
1748 s->last_picture_ptr = s->next_picture_ptr;
1750 s->next_picture_ptr = s->current_picture_ptr;
1752 ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1753 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1754 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1755 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1756 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1757 s->pict_type, s->droppable);
1759 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1760 (s->pict_type != AV_PICTURE_TYPE_I ||
1761 s->picture_structure != PICT_FRAME)) {
1762 int h_chroma_shift, v_chroma_shift;
1763 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1764 &h_chroma_shift, &v_chroma_shift);
1765 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1766 av_log(avctx, AV_LOG_DEBUG,
1767 "allocating dummy last picture for B frame\n");
1768 else if (s->pict_type != AV_PICTURE_TYPE_I)
1769 av_log(avctx, AV_LOG_ERROR,
1770 "warning: first frame is no keyframe\n");
1771 else if (s->picture_structure != PICT_FRAME)
1772 av_log(avctx, AV_LOG_DEBUG,
1773 "allocate dummy last picture for field based first keyframe\n");
1775 /* Allocate a dummy frame */
1776 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1778 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1781 s->last_picture_ptr = &s->picture[i];
1783 s->last_picture_ptr->reference = 3;
1784 s->last_picture_ptr->f->key_frame = 0;
1785 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1787 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1788 s->last_picture_ptr = NULL;
1792 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1793 for(i=0; i<avctx->height; i++)
1794 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1795 0x80, avctx->width);
1796 if (s->last_picture_ptr->f->data[2]) {
1797 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1798 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1799 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1800 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1801 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1805 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1806 for(i=0; i<avctx->height; i++)
1807 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1811 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1812 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1814 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1815 s->pict_type == AV_PICTURE_TYPE_B) {
1816 /* Allocate a dummy frame */
1817 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1819 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1822 s->next_picture_ptr = &s->picture[i];
1824 s->next_picture_ptr->reference = 3;
1825 s->next_picture_ptr->f->key_frame = 0;
1826 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1828 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1829 s->next_picture_ptr = NULL;
1832 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1833 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1836 #if 0 // BUFREF-FIXME
1837 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1838 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1840 if (s->last_picture_ptr) {
1841 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1842 if (s->last_picture_ptr->f->buf[0] &&
1843 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1844 s->last_picture_ptr)) < 0)
1847 if (s->next_picture_ptr) {
1848 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1849 if (s->next_picture_ptr->f->buf[0] &&
1850 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1851 s->next_picture_ptr)) < 0)
1855 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1856 s->last_picture_ptr->f->buf[0]));
1858 if (s->picture_structure!= PICT_FRAME) {
1860 for (i = 0; i < 4; i++) {
1861 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1862 s->current_picture.f->data[i] +=
1863 s->current_picture.f->linesize[i];
1865 s->current_picture.f->linesize[i] *= 2;
1866 s->last_picture.f->linesize[i] *= 2;
1867 s->next_picture.f->linesize[i] *= 2;
1871 /* set dequantizer, we can't do it during init as
1872 * it might change for mpeg4 and we can't do it in the header
1873 * decode as init is not called for mpeg4 there yet */
1874 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1875 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1876 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1877 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1878 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1879 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1881 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1882 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1885 if (s->avctx->debug & FF_DEBUG_NOMC) {
1886 gray_frame(s->current_picture_ptr->f);
1892 /* called after a frame has been decoded. */
1893 void ff_mpv_frame_end(MpegEncContext *s)
1897 if (s->current_picture.reference)
1898 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1903 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
1906 return clip_line(ex, ey, sx, sy, maxx);
1911 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
1918 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
1926 * Draw a line from (ex, ey) -> (sx, sy).
1927 * @param w width of the image
1928 * @param h height of the image
1929 * @param stride stride/linesize of the image
1930 * @param color color of the arrow
1932 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1933 int w, int h, int stride, int color)
1937 if (clip_line(&sx, &sy, &ex, &ey, w - 1))
1939 if (clip_line(&sy, &sx, &ey, &ex, h - 1))
1942 sx = av_clip(sx, 0, w - 1);
1943 sy = av_clip(sy, 0, h - 1);
1944 ex = av_clip(ex, 0, w - 1);
1945 ey = av_clip(ey, 0, h - 1);
1947 buf[sy * stride + sx] += color;
1949 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1951 FFSWAP(int, sx, ex);
1952 FFSWAP(int, sy, ey);
1954 buf += sx + sy * stride;
1956 f = ((ey - sy) << 16) / ex;
1957 for (x = 0; x <= ex; x++) {
1959 fr = (x * f) & 0xFFFF;
1960 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1961 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1965 FFSWAP(int, sx, ex);
1966 FFSWAP(int, sy, ey);
1968 buf += sx + sy * stride;
1971 f = ((ex - sx) << 16) / ey;
1974 for(y= 0; y <= ey; y++){
1976 fr = (y*f) & 0xFFFF;
1977 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1978 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1984 * Draw an arrow from (ex, ey) -> (sx, sy).
1985 * @param w width of the image
1986 * @param h height of the image
1987 * @param stride stride/linesize of the image
1988 * @param color color of the arrow
1990 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1991 int ey, int w, int h, int stride, int color, int tail, int direction)
1996 FFSWAP(int, sx, ex);
1997 FFSWAP(int, sy, ey);
2000 sx = av_clip(sx, -100, w + 100);
2001 sy = av_clip(sy, -100, h + 100);
2002 ex = av_clip(ex, -100, w + 100);
2003 ey = av_clip(ey, -100, h + 100);
2008 if (dx * dx + dy * dy > 3 * 3) {
2011 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2013 // FIXME subpixel accuracy
2014 rx = ROUNDED_DIV(rx * 3 << 4, length);
2015 ry = ROUNDED_DIV(ry * 3 << 4, length);
2022 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2023 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2025 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2029 static int add_mb(AVMotionVector *mb, uint32_t mb_type,
2030 int dst_x, int dst_y,
2031 int src_x, int src_y,
2034 mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
2035 mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
2040 mb->source = direction ? 1 : -1;
2041 mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
2046 * Print debugging info for the given picture.
2048 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2049 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2051 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2053 if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
2054 const int shift = 1 + quarter_sample;
2055 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2056 const int mv_stride = (mb_width << mv_sample_log2) +
2057 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2058 int mb_x, mb_y, mbcount = 0;
2060 /* size is width * height * 2 * 4 where 2 is for directions and 4 is
2061 * for the maximum number of MB (4 MB in case of IS_8x8) */
2062 AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
2066 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2067 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2068 int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2069 for (direction = 0; direction < 2; direction++) {
2070 if (!USES_LIST(mb_type, direction))
2072 if (IS_8X8(mb_type)) {
2073 for (i = 0; i < 4; i++) {
2074 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2075 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2076 int xy = (mb_x * 2 + (i & 1) +
2077 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2078 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2079 int my = (motion_val[direction][xy][1] >> shift) + sy;
2080 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2082 } else if (IS_16X8(mb_type)) {
2083 for (i = 0; i < 2; i++) {
2084 int sx = mb_x * 16 + 8;
2085 int sy = mb_y * 16 + 4 + 8 * i;
2086 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2087 int mx = (motion_val[direction][xy][0] >> shift);
2088 int my = (motion_val[direction][xy][1] >> shift);
2090 if (IS_INTERLACED(mb_type))
2093 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2095 } else if (IS_8X16(mb_type)) {
2096 for (i = 0; i < 2; i++) {
2097 int sx = mb_x * 16 + 4 + 8 * i;
2098 int sy = mb_y * 16 + 8;
2099 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2100 int mx = motion_val[direction][xy][0] >> shift;
2101 int my = motion_val[direction][xy][1] >> shift;
2103 if (IS_INTERLACED(mb_type))
2106 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2109 int sx = mb_x * 16 + 8;
2110 int sy = mb_y * 16 + 8;
2111 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2112 int mx = (motion_val[direction][xy][0]>>shift) + sx;
2113 int my = (motion_val[direction][xy][1]>>shift) + sy;
2114 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2121 AVFrameSideData *sd;
2123 av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
2124 sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
2129 memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
2135 /* TODO: export all the following to make them accessible for users (and filters) */
2136 if (avctx->hwaccel || !mbtype_table
2137 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2141 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2144 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2145 av_get_picture_type_char(pict->pict_type));
2146 for (y = 0; y < mb_height; y++) {
2147 for (x = 0; x < mb_width; x++) {
2148 if (avctx->debug & FF_DEBUG_SKIP) {
2149 int count = mbskip_table ? mbskip_table[x + y * mb_stride] : 0;
2152 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2154 if (avctx->debug & FF_DEBUG_QP) {
2155 av_log(avctx, AV_LOG_DEBUG, "%2d",
2156 qscale_table[x + y * mb_stride]);
2158 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2159 int mb_type = mbtype_table[x + y * mb_stride];
2160 // Type & MV direction
2161 if (IS_PCM(mb_type))
2162 av_log(avctx, AV_LOG_DEBUG, "P");
2163 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2164 av_log(avctx, AV_LOG_DEBUG, "A");
2165 else if (IS_INTRA4x4(mb_type))
2166 av_log(avctx, AV_LOG_DEBUG, "i");
2167 else if (IS_INTRA16x16(mb_type))
2168 av_log(avctx, AV_LOG_DEBUG, "I");
2169 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2170 av_log(avctx, AV_LOG_DEBUG, "d");
2171 else if (IS_DIRECT(mb_type))
2172 av_log(avctx, AV_LOG_DEBUG, "D");
2173 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2174 av_log(avctx, AV_LOG_DEBUG, "g");
2175 else if (IS_GMC(mb_type))
2176 av_log(avctx, AV_LOG_DEBUG, "G");
2177 else if (IS_SKIP(mb_type))
2178 av_log(avctx, AV_LOG_DEBUG, "S");
2179 else if (!USES_LIST(mb_type, 1))
2180 av_log(avctx, AV_LOG_DEBUG, ">");
2181 else if (!USES_LIST(mb_type, 0))
2182 av_log(avctx, AV_LOG_DEBUG, "<");
2184 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2185 av_log(avctx, AV_LOG_DEBUG, "X");
2189 if (IS_8X8(mb_type))
2190 av_log(avctx, AV_LOG_DEBUG, "+");
2191 else if (IS_16X8(mb_type))
2192 av_log(avctx, AV_LOG_DEBUG, "-");
2193 else if (IS_8X16(mb_type))
2194 av_log(avctx, AV_LOG_DEBUG, "|");
2195 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2196 av_log(avctx, AV_LOG_DEBUG, " ");
2198 av_log(avctx, AV_LOG_DEBUG, "?");
2201 if (IS_INTERLACED(mb_type))
2202 av_log(avctx, AV_LOG_DEBUG, "=");
2204 av_log(avctx, AV_LOG_DEBUG, " ");
2207 av_log(avctx, AV_LOG_DEBUG, "\n");
2211 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2212 (avctx->debug_mv)) {
2215 int h_chroma_shift, v_chroma_shift, block_height;
2217 const int shift = 1 + quarter_sample;
2219 const int width = avctx->width;
2220 const int height = avctx->height;
2222 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2223 const int mv_stride = (mb_width << mv_sample_log2) +
2224 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2226 *low_delay = 0; // needed to see the vectors without trashing the buffers
2228 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2230 av_frame_make_writable(pict);
2232 pict->opaque = NULL;
2234 ptr = pict->data[0];
2236 block_height = 16 >> v_chroma_shift;
2238 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2240 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2241 const int mb_index = mb_x + mb_y * mb_stride;
2243 if ((avctx->debug_mv) && motion_val[0]) {
2245 for (type = 0; type < 3; type++) {
2249 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2250 (pict->pict_type!= AV_PICTURE_TYPE_P))
2255 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2256 (pict->pict_type!= AV_PICTURE_TYPE_B))
2261 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2262 (pict->pict_type!= AV_PICTURE_TYPE_B))
2267 if (!USES_LIST(mbtype_table[mb_index], direction))
2270 if (IS_8X8(mbtype_table[mb_index])) {
2272 for (i = 0; i < 4; i++) {
2273 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2274 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2275 int xy = (mb_x * 2 + (i & 1) +
2276 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2277 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2278 int my = (motion_val[direction][xy][1] >> shift) + sy;
2279 draw_arrow(ptr, sx, sy, mx, my, width,
2280 height, pict->linesize[0], 100, 0, direction);
2282 } else if (IS_16X8(mbtype_table[mb_index])) {
2284 for (i = 0; i < 2; i++) {
2285 int sx = mb_x * 16 + 8;
2286 int sy = mb_y * 16 + 4 + 8 * i;
2287 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2288 int mx = (motion_val[direction][xy][0] >> shift);
2289 int my = (motion_val[direction][xy][1] >> shift);
2291 if (IS_INTERLACED(mbtype_table[mb_index]))
2294 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2295 height, pict->linesize[0], 100, 0, direction);
2297 } else if (IS_8X16(mbtype_table[mb_index])) {
2299 for (i = 0; i < 2; i++) {
2300 int sx = mb_x * 16 + 4 + 8 * i;
2301 int sy = mb_y * 16 + 8;
2302 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2303 int mx = motion_val[direction][xy][0] >> shift;
2304 int my = motion_val[direction][xy][1] >> shift;
2306 if (IS_INTERLACED(mbtype_table[mb_index]))
2309 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2310 height, pict->linesize[0], 100, 0, direction);
2313 int sx= mb_x * 16 + 8;
2314 int sy= mb_y * 16 + 8;
2315 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2316 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2317 int my= (motion_val[direction][xy][1]>>shift) + sy;
2318 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2323 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2324 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2325 0x0101010101010101ULL;
2327 for (y = 0; y < block_height; y++) {
2328 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2329 (block_height * mb_y + y) *
2330 pict->linesize[1]) = c;
2331 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2332 (block_height * mb_y + y) *
2333 pict->linesize[2]) = c;
2336 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2338 int mb_type = mbtype_table[mb_index];
2341 #define COLOR(theta, r) \
2342 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2343 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2347 if (IS_PCM(mb_type)) {
2349 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2350 IS_INTRA16x16(mb_type)) {
2352 } else if (IS_INTRA4x4(mb_type)) {
2354 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2356 } else if (IS_DIRECT(mb_type)) {
2358 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2360 } else if (IS_GMC(mb_type)) {
2362 } else if (IS_SKIP(mb_type)) {
2364 } else if (!USES_LIST(mb_type, 1)) {
2366 } else if (!USES_LIST(mb_type, 0)) {
2369 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2373 u *= 0x0101010101010101ULL;
2374 v *= 0x0101010101010101ULL;
2375 for (y = 0; y < block_height; y++) {
2376 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2377 (block_height * mb_y + y) * pict->linesize[1]) = u;
2378 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2379 (block_height * mb_y + y) * pict->linesize[2]) = v;
2383 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2384 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2385 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2386 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2387 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2389 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2390 for (y = 0; y < 16; y++)
2391 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2392 pict->linesize[0]] ^= 0x80;
2394 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2395 int dm = 1 << (mv_sample_log2 - 2);
2396 for (i = 0; i < 4; i++) {
2397 int sx = mb_x * 16 + 8 * (i & 1);
2398 int sy = mb_y * 16 + 8 * (i >> 1);
2399 int xy = (mb_x * 2 + (i & 1) +
2400 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2402 int32_t *mv = (int32_t *) &motion_val[0][xy];
2403 if (mv[0] != mv[dm] ||
2404 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2405 for (y = 0; y < 8; y++)
2406 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2407 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2408 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2409 pict->linesize[0]) ^= 0x8080808080808080ULL;
2413 if (IS_INTERLACED(mb_type) &&
2414 avctx->codec->id == AV_CODEC_ID_H264) {
2419 mbskip_table[mb_index] = 0;
2425 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2427 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2428 p->qscale_table, p->motion_val, &s->low_delay,
2429 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2432 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2434 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2435 int offset = 2*s->mb_stride + 1;
2437 return AVERROR(ENOMEM);
2438 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2439 ref->size -= offset;
2440 ref->data += offset;
2441 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2444 static inline int hpel_motion_lowres(MpegEncContext *s,
2445 uint8_t *dest, uint8_t *src,
2446 int field_based, int field_select,
2447 int src_x, int src_y,
2448 int width, int height, ptrdiff_t stride,
2449 int h_edge_pos, int v_edge_pos,
2450 int w, int h, h264_chroma_mc_func *pix_op,
2451 int motion_x, int motion_y)
2453 const int lowres = s->avctx->lowres;
2454 const int op_index = FFMIN(lowres, 3);
2455 const int s_mask = (2 << lowres) - 1;
2459 if (s->quarter_sample) {
2464 sx = motion_x & s_mask;
2465 sy = motion_y & s_mask;
2466 src_x += motion_x >> lowres + 1;
2467 src_y += motion_y >> lowres + 1;
2469 src += src_y * stride + src_x;
2471 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2472 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2473 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2474 s->linesize, s->linesize,
2475 w + 1, (h + 1) << field_based,
2476 src_x, src_y << field_based,
2477 h_edge_pos, v_edge_pos);
2478 src = s->edge_emu_buffer;
2482 sx = (sx << 2) >> lowres;
2483 sy = (sy << 2) >> lowres;
2486 pix_op[op_index](dest, src, stride, h, sx, sy);
2490 /* apply one mpeg motion vector to the three components */
2491 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2498 uint8_t **ref_picture,
2499 h264_chroma_mc_func *pix_op,
2500 int motion_x, int motion_y,
2503 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2504 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2505 ptrdiff_t uvlinesize, linesize;
2506 const int lowres = s->avctx->lowres;
2507 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2508 const int block_s = 8>>lowres;
2509 const int s_mask = (2 << lowres) - 1;
2510 const int h_edge_pos = s->h_edge_pos >> lowres;
2511 const int v_edge_pos = s->v_edge_pos >> lowres;
2512 linesize = s->current_picture.f->linesize[0] << field_based;
2513 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2515 // FIXME obviously not perfect but qpel will not work in lowres anyway
2516 if (s->quarter_sample) {
2522 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2525 sx = motion_x & s_mask;
2526 sy = motion_y & s_mask;
2527 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2528 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2530 if (s->out_format == FMT_H263) {
2531 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2532 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2533 uvsrc_x = src_x >> 1;
2534 uvsrc_y = src_y >> 1;
2535 } else if (s->out_format == FMT_H261) {
2536 // even chroma mv's are full pel in H261
2539 uvsx = (2 * mx) & s_mask;
2540 uvsy = (2 * my) & s_mask;
2541 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2542 uvsrc_y = mb_y * block_s + (my >> lowres);
2544 if(s->chroma_y_shift){
2549 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2550 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2552 if(s->chroma_x_shift){
2556 uvsy = motion_y & s_mask;
2558 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2561 uvsx = motion_x & s_mask;
2562 uvsy = motion_y & s_mask;
2569 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2570 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2571 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2573 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2574 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2575 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2576 linesize >> field_based, linesize >> field_based,
2577 17, 17 + field_based,
2578 src_x, src_y << field_based, h_edge_pos,
2580 ptr_y = s->edge_emu_buffer;
2581 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2582 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2583 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2584 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2585 uvlinesize >> field_based, uvlinesize >> field_based,
2587 uvsrc_x, uvsrc_y << field_based,
2588 h_edge_pos >> 1, v_edge_pos >> 1);
2589 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2590 uvlinesize >> field_based,uvlinesize >> field_based,
2592 uvsrc_x, uvsrc_y << field_based,
2593 h_edge_pos >> 1, v_edge_pos >> 1);
2599 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2601 dest_y += s->linesize;
2602 dest_cb += s->uvlinesize;
2603 dest_cr += s->uvlinesize;
2607 ptr_y += s->linesize;
2608 ptr_cb += s->uvlinesize;
2609 ptr_cr += s->uvlinesize;
2612 sx = (sx << 2) >> lowres;
2613 sy = (sy << 2) >> lowres;
2614 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2616 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2617 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2618 uvsx = (uvsx << 2) >> lowres;
2619 uvsy = (uvsy << 2) >> lowres;
2621 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2622 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2625 // FIXME h261 lowres loop filter
2628 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2629 uint8_t *dest_cb, uint8_t *dest_cr,
2630 uint8_t **ref_picture,
2631 h264_chroma_mc_func * pix_op,
2634 const int lowres = s->avctx->lowres;
2635 const int op_index = FFMIN(lowres, 3);
2636 const int block_s = 8 >> lowres;
2637 const int s_mask = (2 << lowres) - 1;
2638 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2639 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2640 int emu = 0, src_x, src_y, sx, sy;
2644 if (s->quarter_sample) {
2649 /* In case of 8X8, we construct a single chroma motion vector
2650 with a special rounding */
2651 mx = ff_h263_round_chroma(mx);
2652 my = ff_h263_round_chroma(my);
2656 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2657 src_y = s->mb_y * block_s + (my >> lowres + 1);
2659 offset = src_y * s->uvlinesize + src_x;
2660 ptr = ref_picture[1] + offset;
2661 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2662 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2663 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2664 s->uvlinesize, s->uvlinesize,
2666 src_x, src_y, h_edge_pos, v_edge_pos);
2667 ptr = s->edge_emu_buffer;
2670 sx = (sx << 2) >> lowres;
2671 sy = (sy << 2) >> lowres;
2672 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2674 ptr = ref_picture[2] + offset;
2676 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2677 s->uvlinesize, s->uvlinesize,
2679 src_x, src_y, h_edge_pos, v_edge_pos);
2680 ptr = s->edge_emu_buffer;
2682 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2686 * motion compensation of a single macroblock
2688 * @param dest_y luma destination pointer
2689 * @param dest_cb chroma cb/u destination pointer
2690 * @param dest_cr chroma cr/v destination pointer
2691 * @param dir direction (0->forward, 1->backward)
2692 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2693 * @param pix_op halfpel motion compensation function (average or put normally)
2694 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2696 static inline void MPV_motion_lowres(MpegEncContext *s,
2697 uint8_t *dest_y, uint8_t *dest_cb,
2699 int dir, uint8_t **ref_picture,
2700 h264_chroma_mc_func *pix_op)
2704 const int lowres = s->avctx->lowres;
2705 const int block_s = 8 >>lowres;
2710 switch (s->mv_type) {
2712 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2714 ref_picture, pix_op,
2715 s->mv[dir][0][0], s->mv[dir][0][1],
2721 for (i = 0; i < 4; i++) {
2722 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2723 s->linesize) * block_s,
2724 ref_picture[0], 0, 0,
2725 (2 * mb_x + (i & 1)) * block_s,
2726 (2 * mb_y + (i >> 1)) * block_s,
2727 s->width, s->height, s->linesize,
2728 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2729 block_s, block_s, pix_op,
2730 s->mv[dir][i][0], s->mv[dir][i][1]);
2732 mx += s->mv[dir][i][0];
2733 my += s->mv[dir][i][1];
2736 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY))
2737 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2741 if (s->picture_structure == PICT_FRAME) {
2743 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2744 1, 0, s->field_select[dir][0],
2745 ref_picture, pix_op,
2746 s->mv[dir][0][0], s->mv[dir][0][1],
2749 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2750 1, 1, s->field_select[dir][1],
2751 ref_picture, pix_op,
2752 s->mv[dir][1][0], s->mv[dir][1][1],
2755 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2756 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2757 ref_picture = s->current_picture_ptr->f->data;
2760 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2761 0, 0, s->field_select[dir][0],
2762 ref_picture, pix_op,
2764 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2768 for (i = 0; i < 2; i++) {
2769 uint8_t **ref2picture;
2771 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2772 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2773 ref2picture = ref_picture;
2775 ref2picture = s->current_picture_ptr->f->data;
2778 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2779 0, 0, s->field_select[dir][i],
2780 ref2picture, pix_op,
2781 s->mv[dir][i][0], s->mv[dir][i][1] +
2782 2 * block_s * i, block_s, mb_y >> 1);
2784 dest_y += 2 * block_s * s->linesize;
2785 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2786 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2790 if (s->picture_structure == PICT_FRAME) {
2791 for (i = 0; i < 2; i++) {
2793 for (j = 0; j < 2; j++) {
2794 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2796 ref_picture, pix_op,
2797 s->mv[dir][2 * i + j][0],
2798 s->mv[dir][2 * i + j][1],
2801 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2804 for (i = 0; i < 2; i++) {
2805 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2806 0, 0, s->picture_structure != i + 1,
2807 ref_picture, pix_op,
2808 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2809 2 * block_s, mb_y >> 1);
2811 // after put we make avg of the same block
2812 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2814 // opposite parity is always in the same
2815 // frame if this is second field
2816 if (!s->first_field) {
2817 ref_picture = s->current_picture_ptr->f->data;
2828 * find the lowest MB row referenced in the MVs
2830 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2832 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2833 int my, off, i, mvs;
2835 if (s->picture_structure != PICT_FRAME || s->mcsel)
2838 switch (s->mv_type) {
2852 for (i = 0; i < mvs; i++) {
2853 my = s->mv[dir][i][1];
2854 my_max = FFMAX(my_max, my);
2855 my_min = FFMIN(my_min, my);
2858 off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
2860 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2862 return s->mb_height-1;
2865 /* put block[] to dest[] */
2866 static inline void put_dct(MpegEncContext *s,
2867 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2869 s->dct_unquantize_intra(s, block, i, qscale);
2870 s->idsp.idct_put(dest, line_size, block);
2873 /* add block[] to dest[] */
2874 static inline void add_dct(MpegEncContext *s,
2875 int16_t *block, int i, uint8_t *dest, int line_size)
2877 if (s->block_last_index[i] >= 0) {
2878 s->idsp.idct_add(dest, line_size, block);
2882 static inline void add_dequant_dct(MpegEncContext *s,
2883 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2885 if (s->block_last_index[i] >= 0) {
2886 s->dct_unquantize_inter(s, block, i, qscale);
2888 s->idsp.idct_add(dest, line_size, block);
2893 * Clean dc, ac, coded_block for the current non-intra MB.
2895 void ff_clean_intra_table_entries(MpegEncContext *s)
2897 int wrap = s->b8_stride;
2898 int xy = s->block_index[0];
2901 s->dc_val[0][xy + 1 ] =
2902 s->dc_val[0][xy + wrap] =
2903 s->dc_val[0][xy + 1 + wrap] = 1024;
2905 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2906 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2907 if (s->msmpeg4_version>=3) {
2908 s->coded_block[xy ] =
2909 s->coded_block[xy + 1 ] =
2910 s->coded_block[xy + wrap] =
2911 s->coded_block[xy + 1 + wrap] = 0;
2914 wrap = s->mb_stride;
2915 xy = s->mb_x + s->mb_y * wrap;
2917 s->dc_val[2][xy] = 1024;
2919 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2920 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2922 s->mbintra_table[xy]= 0;
2925 /* generic function called after a macroblock has been parsed by the
2926 decoder or after it has been encoded by the encoder.
2928 Important variables used:
2929 s->mb_intra : true if intra macroblock
2930 s->mv_dir : motion vector direction
2931 s->mv_type : motion vector type
2932 s->mv : motion vector
2933 s->interlaced_dct : true if interlaced dct used (mpeg2)
2935 static av_always_inline
2936 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2937 int lowres_flag, int is_mpeg12)
2939 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2942 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
2943 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
2947 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2948 /* print DCT coefficients */
2950 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2952 for(j=0; j<64; j++){
2953 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
2954 block[i][s->idsp.idct_permutation[j]]);
2956 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2960 s->current_picture.qscale_table[mb_xy] = s->qscale;
2962 /* update DC predictors for P macroblocks */
2964 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2965 if(s->mbintra_table[mb_xy])
2966 ff_clean_intra_table_entries(s);
2970 s->last_dc[2] = 128 << s->intra_dc_precision;
2973 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2974 s->mbintra_table[mb_xy]=1;
2976 if ((s->avctx->flags & CODEC_FLAG_PSNR) || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor ||
2977 !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
2978 s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
2979 uint8_t *dest_y, *dest_cb, *dest_cr;
2980 int dct_linesize, dct_offset;
2981 op_pixels_func (*op_pix)[4];
2982 qpel_mc_func (*op_qpix)[16];
2983 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2984 const int uvlinesize = s->current_picture.f->linesize[1];
2985 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2986 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2988 /* avoid copy if macroblock skipped in last frame too */
2989 /* skip only during decoding as we might trash the buffers during encoding a bit */
2991 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2993 if (s->mb_skipped) {
2995 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2997 } else if(!s->current_picture.reference) {
3000 *mbskip_ptr = 0; /* not skipped */
3004 dct_linesize = linesize << s->interlaced_dct;
3005 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
3009 dest_cb= s->dest[1];
3010 dest_cr= s->dest[2];
3012 dest_y = s->b_scratchpad;
3013 dest_cb= s->b_scratchpad+16*linesize;
3014 dest_cr= s->b_scratchpad+32*linesize;
3018 /* motion handling */
3019 /* decoding or more than one mb_type (MC was already done otherwise) */
3022 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
3023 if (s->mv_dir & MV_DIR_FORWARD) {
3024 ff_thread_await_progress(&s->last_picture_ptr->tf,
3025 ff_mpv_lowest_referenced_row(s, 0),
3028 if (s->mv_dir & MV_DIR_BACKWARD) {
3029 ff_thread_await_progress(&s->next_picture_ptr->tf,
3030 ff_mpv_lowest_referenced_row(s, 1),
3036 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
3038 if (s->mv_dir & MV_DIR_FORWARD) {
3039 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3040 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
3042 if (s->mv_dir & MV_DIR_BACKWARD) {
3043 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3046 op_qpix = s->me.qpel_put;
3047 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3048 op_pix = s->hdsp.put_pixels_tab;
3050 op_pix = s->hdsp.put_no_rnd_pixels_tab;
3052 if (s->mv_dir & MV_DIR_FORWARD) {
3053 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3054 op_pix = s->hdsp.avg_pixels_tab;
3055 op_qpix= s->me.qpel_avg;
3057 if (s->mv_dir & MV_DIR_BACKWARD) {
3058 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3063 /* skip dequant / idct if we are really late ;) */
3064 if(s->avctx->skip_idct){
3065 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
3066 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
3067 || s->avctx->skip_idct >= AVDISCARD_ALL)
3071 /* add dct residue */
3072 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3073 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3074 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3075 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3076 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3077 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3079 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
3080 if (s->chroma_y_shift){
3081 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3082 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3086 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3087 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3088 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3089 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3092 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3093 add_dct(s, block[0], 0, dest_y , dct_linesize);
3094 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3095 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3096 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3098 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
3099 if(s->chroma_y_shift){//Chroma420
3100 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3101 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3104 dct_linesize = uvlinesize << s->interlaced_dct;
3105 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3107 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3108 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3109 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3110 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3111 if(!s->chroma_x_shift){//Chroma444
3112 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3113 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3114 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3115 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3120 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3121 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3124 /* dct only in intra block */
3125 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3126 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3127 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3128 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3129 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3131 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
3132 if(s->chroma_y_shift){
3133 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3134 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3138 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3139 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3140 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3141 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3145 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3146 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3147 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3148 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3150 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
3151 if(s->chroma_y_shift){
3152 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3153 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3156 dct_linesize = uvlinesize << s->interlaced_dct;
3157 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3159 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3160 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3161 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3162 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3163 if(!s->chroma_x_shift){//Chroma444
3164 s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3165 s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3166 s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3167 s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3175 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3176 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
3177 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3178 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3184 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
3187 if(s->out_format == FMT_MPEG1) {
3188 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
3189 else mpv_decode_mb_internal(s, block, 0, 1);
3192 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
3193 else mpv_decode_mb_internal(s, block, 0, 0);
3196 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3198 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3199 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3200 s->first_field, s->low_delay);
3203 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3204 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3205 const int uvlinesize = s->current_picture.f->linesize[1];
3206 const int mb_size= 4 - s->avctx->lowres;
3208 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3209 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3210 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3211 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3212 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3213 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3214 //block_index is not used by mpeg2, so it is not affected by chroma_format
3216 s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << mb_size);
3217 s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (mb_size - s->chroma_x_shift));
3218 s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (mb_size - s->chroma_x_shift));
3220 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3222 if(s->picture_structure==PICT_FRAME){
3223 s->dest[0] += s->mb_y * linesize << mb_size;
3224 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3225 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3227 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3228 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3229 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3230 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3236 * Permute an 8x8 block.
3237 * @param block the block which will be permuted according to the given permutation vector
3238 * @param permutation the permutation vector
3239 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3240 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3241 * (inverse) permutated to scantable order!
3243 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3249 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3251 for(i=0; i<=last; i++){
3252 const int j= scantable[i];
3257 for(i=0; i<=last; i++){
3258 const int j= scantable[i];
3259 const int perm_j= permutation[j];
3260 block[perm_j]= temp[j];
3264 void ff_mpeg_flush(AVCodecContext *avctx){
3266 MpegEncContext *s = avctx->priv_data;
3268 if (!s || !s->picture)
3271 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3272 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
3273 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3275 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
3276 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
3277 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
3279 s->mb_x= s->mb_y= 0;
3282 s->parse_context.state= -1;
3283 s->parse_context.frame_start_found= 0;
3284 s->parse_context.overread= 0;
3285 s->parse_context.overread_index= 0;
3286 s->parse_context.index= 0;
3287 s->parse_context.last_index= 0;
3288 s->bitstream_buffer_size=0;
3293 * set qscale and update qscale dependent variables.
3295 void ff_set_qscale(MpegEncContext * s, int qscale)
3299 else if (qscale > 31)
3303 s->chroma_qscale= s->chroma_qscale_table[qscale];
3305 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3306 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3309 void ff_mpv_report_decode_progress(MpegEncContext *s)
3311 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3312 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);