2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/motion_vector.h"
35 #include "libavutil/timer.h"
38 #include "h264chroma.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
50 static const uint8_t ff_default_chroma_qscale_table[32] = {
51 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
53 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
56 const uint8_t ff_mpeg1_dc_scale_table[128] = {
57 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
68 static const uint8_t mpeg2_dc_scale_table1[128] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
80 static const uint8_t mpeg2_dc_scale_table2[128] = {
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 static const uint8_t mpeg2_dc_scale_table3[128] = {
93 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
105 ff_mpeg1_dc_scale_table,
106 mpeg2_dc_scale_table1,
107 mpeg2_dc_scale_table2,
108 mpeg2_dc_scale_table3,
111 const uint8_t ff_alternate_horizontal_scan[64] = {
112 0, 1, 2, 3, 8, 9, 16, 17,
113 10, 11, 4, 5, 6, 7, 15, 14,
114 13, 12, 19, 18, 24, 25, 32, 33,
115 26, 27, 20, 21, 22, 23, 28, 29,
116 30, 31, 34, 35, 40, 41, 48, 49,
117 42, 43, 36, 37, 38, 39, 44, 45,
118 46, 47, 50, 51, 56, 57, 58, 59,
119 52, 53, 54, 55, 60, 61, 62, 63,
122 const uint8_t ff_alternate_vertical_scan[64] = {
123 0, 8, 16, 24, 1, 9, 2, 10,
124 17, 25, 32, 40, 48, 56, 57, 49,
125 41, 33, 26, 18, 3, 11, 4, 12,
126 19, 27, 34, 42, 50, 58, 35, 43,
127 51, 59, 20, 28, 5, 13, 6, 14,
128 21, 29, 36, 44, 52, 60, 37, 45,
129 53, 61, 22, 30, 7, 15, 23, 31,
130 38, 46, 54, 62, 39, 47, 55, 63,
133 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
134 int16_t *block, int n, int qscale)
136 int i, level, nCoeffs;
137 const uint16_t *quant_matrix;
139 nCoeffs= s->block_last_index[n];
141 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
142 /* XXX: only mpeg1 */
143 quant_matrix = s->intra_matrix;
144 for(i=1;i<=nCoeffs;i++) {
145 int j= s->intra_scantable.permutated[i];
150 level = (int)(level * qscale * quant_matrix[j]) >> 3;
151 level = (level - 1) | 1;
154 level = (int)(level * qscale * quant_matrix[j]) >> 3;
155 level = (level - 1) | 1;
162 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
163 int16_t *block, int n, int qscale)
165 int i, level, nCoeffs;
166 const uint16_t *quant_matrix;
168 nCoeffs= s->block_last_index[n];
170 quant_matrix = s->inter_matrix;
171 for(i=0; i<=nCoeffs; i++) {
172 int j= s->intra_scantable.permutated[i];
177 level = (((level << 1) + 1) * qscale *
178 ((int) (quant_matrix[j]))) >> 4;
179 level = (level - 1) | 1;
182 level = (((level << 1) + 1) * qscale *
183 ((int) (quant_matrix[j]))) >> 4;
184 level = (level - 1) | 1;
191 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
192 int16_t *block, int n, int qscale)
194 int i, level, nCoeffs;
195 const uint16_t *quant_matrix;
197 if(s->alternate_scan) nCoeffs= 63;
198 else nCoeffs= s->block_last_index[n];
200 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
201 quant_matrix = s->intra_matrix;
202 for(i=1;i<=nCoeffs;i++) {
203 int j= s->intra_scantable.permutated[i];
208 level = (int)(level * qscale * quant_matrix[j]) >> 3;
211 level = (int)(level * qscale * quant_matrix[j]) >> 3;
218 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
219 int16_t *block, int n, int qscale)
221 int i, level, nCoeffs;
222 const uint16_t *quant_matrix;
225 if(s->alternate_scan) nCoeffs= 63;
226 else nCoeffs= s->block_last_index[n];
228 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
230 quant_matrix = s->intra_matrix;
231 for(i=1;i<=nCoeffs;i++) {
232 int j= s->intra_scantable.permutated[i];
237 level = (int)(level * qscale * quant_matrix[j]) >> 3;
240 level = (int)(level * qscale * quant_matrix[j]) >> 3;
249 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
250 int16_t *block, int n, int qscale)
252 int i, level, nCoeffs;
253 const uint16_t *quant_matrix;
256 if(s->alternate_scan) nCoeffs= 63;
257 else nCoeffs= s->block_last_index[n];
259 quant_matrix = s->inter_matrix;
260 for(i=0; i<=nCoeffs; i++) {
261 int j= s->intra_scantable.permutated[i];
266 level = (((level << 1) + 1) * qscale *
267 ((int) (quant_matrix[j]))) >> 4;
270 level = (((level << 1) + 1) * qscale *
271 ((int) (quant_matrix[j]))) >> 4;
280 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
281 int16_t *block, int n, int qscale)
283 int i, level, qmul, qadd;
286 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
291 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
292 qadd = (qscale - 1) | 1;
299 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
301 for(i=1; i<=nCoeffs; i++) {
305 level = level * qmul - qadd;
307 level = level * qmul + qadd;
314 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
315 int16_t *block, int n, int qscale)
317 int i, level, qmul, qadd;
320 av_assert2(s->block_last_index[n]>=0);
322 qadd = (qscale - 1) | 1;
325 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
327 for(i=0; i<=nCoeffs; i++) {
331 level = level * qmul - qadd;
333 level = level * qmul + qadd;
340 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
342 int mb_x, int mb_y, int mb_intra, int mb_skipped)
344 MpegEncContext *s = opaque;
347 s->mv_type = mv_type;
348 s->mb_intra = mb_intra;
349 s->mb_skipped = mb_skipped;
352 memcpy(s->mv, mv, sizeof(*mv));
354 ff_init_block_index(s);
355 ff_update_block_index(s);
357 s->bdsp.clear_blocks(s->block[0]);
359 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
360 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
364 av_log(s->avctx, AV_LOG_DEBUG,
365 "Interlaced error concealment is not fully implemented\n");
366 ff_mpv_decode_mb(s, s->block);
369 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
372 memset(dst + h*linesize, 128, 16);
375 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
378 memset(dst + h*linesize, 128, 8);
381 /* init common dct for both encoder and decoder */
382 static av_cold int dct_init(MpegEncContext *s)
384 ff_blockdsp_init(&s->bdsp, s->avctx);
385 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
386 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
387 ff_mpegvideodsp_init(&s->mdsp);
388 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
390 if (s->avctx->debug & FF_DEBUG_NOMC) {
392 for (i=0; i<4; i++) {
393 s->hdsp.avg_pixels_tab[0][i] = gray16;
394 s->hdsp.put_pixels_tab[0][i] = gray16;
395 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
397 s->hdsp.avg_pixels_tab[1][i] = gray8;
398 s->hdsp.put_pixels_tab[1][i] = gray8;
399 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
403 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
404 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
405 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
406 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
407 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
408 if (s->avctx->flags & CODEC_FLAG_BITEXACT)
409 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
410 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
412 if (HAVE_INTRINSICS_NEON)
413 ff_mpv_common_init_neon(s);
416 ff_mpv_common_init_axp(s);
418 ff_mpv_common_init_arm(s);
420 ff_mpv_common_init_ppc(s);
422 ff_mpv_common_init_x86(s);
427 av_cold void ff_mpv_idct_init(MpegEncContext *s)
429 ff_idctdsp_init(&s->idsp, s->avctx);
431 /* load & permutate scantables
432 * note: only wmv uses different ones
434 if (s->alternate_scan) {
435 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
436 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
438 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
439 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
441 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
442 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
445 static int frame_size_alloc(MpegEncContext *s, int linesize)
447 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
449 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
453 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
454 return AVERROR_PATCHWELCOME;
457 // edge emu needs blocksize + filter length - 1
458 // (= 17x17 for halfpel / 21x21 for h264)
459 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
460 // at uvlinesize. It supports only YUV420 so 24x24 is enough
461 // linesize * interlaced * MBsize
462 // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
463 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size, 4 * 68,
466 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size, 4 * 16 * 2,
468 s->me.temp = s->me.scratchpad;
469 s->rd_scratchpad = s->me.scratchpad;
470 s->b_scratchpad = s->me.scratchpad;
471 s->obmc_scratchpad = s->me.scratchpad + 16;
475 av_freep(&s->edge_emu_buffer);
476 return AVERROR(ENOMEM);
480 * Allocate a frame buffer
482 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
484 int edges_needed = av_codec_is_encoder(s->avctx->codec);
488 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
489 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
490 s->codec_id != AV_CODEC_ID_MSS2) {
492 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
493 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
496 r = ff_thread_get_buffer(s->avctx, &pic->tf,
497 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
499 pic->f->width = s->avctx->width;
500 pic->f->height = s->avctx->height;
501 pic->f->format = s->avctx->pix_fmt;
502 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
505 if (r < 0 || !pic->f->buf[0]) {
506 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
513 for (i = 0; pic->f->data[i]; i++) {
514 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
515 pic->f->linesize[i] +
516 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
517 pic->f->data[i] += offset;
519 pic->f->width = s->avctx->width;
520 pic->f->height = s->avctx->height;
523 if (s->avctx->hwaccel) {
524 assert(!pic->hwaccel_picture_private);
525 if (s->avctx->hwaccel->frame_priv_data_size) {
526 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
527 if (!pic->hwaccel_priv_buf) {
528 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
531 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
535 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
536 s->uvlinesize != pic->f->linesize[1])) {
537 av_log(s->avctx, AV_LOG_ERROR,
538 "get_buffer() failed (stride changed)\n");
539 ff_mpeg_unref_picture(s->avctx, pic);
543 if (pic->f->linesize[1] != pic->f->linesize[2]) {
544 av_log(s->avctx, AV_LOG_ERROR,
545 "get_buffer() failed (uv stride mismatch)\n");
546 ff_mpeg_unref_picture(s->avctx, pic);
550 if (!s->edge_emu_buffer &&
551 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
552 av_log(s->avctx, AV_LOG_ERROR,
553 "get_buffer() failed to allocate context scratch buffers.\n");
554 ff_mpeg_unref_picture(s->avctx, pic);
561 void ff_free_picture_tables(Picture *pic)
565 pic->alloc_mb_width =
566 pic->alloc_mb_height = 0;
568 av_buffer_unref(&pic->mb_var_buf);
569 av_buffer_unref(&pic->mc_mb_var_buf);
570 av_buffer_unref(&pic->mb_mean_buf);
571 av_buffer_unref(&pic->mbskip_table_buf);
572 av_buffer_unref(&pic->qscale_table_buf);
573 av_buffer_unref(&pic->mb_type_buf);
575 for (i = 0; i < 2; i++) {
576 av_buffer_unref(&pic->motion_val_buf[i]);
577 av_buffer_unref(&pic->ref_index_buf[i]);
581 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
583 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
584 const int mb_array_size = s->mb_stride * s->mb_height;
585 const int b8_array_size = s->b8_stride * s->mb_height * 2;
589 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
590 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
591 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
593 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
594 return AVERROR(ENOMEM);
597 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
598 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
599 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
600 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
601 return AVERROR(ENOMEM);
604 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv ||
605 (s->avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) {
606 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
607 int ref_index_size = 4 * mb_array_size;
609 for (i = 0; mv_size && i < 2; i++) {
610 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
611 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
612 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
613 return AVERROR(ENOMEM);
617 pic->alloc_mb_width = s->mb_width;
618 pic->alloc_mb_height = s->mb_height;
623 static int make_tables_writable(Picture *pic)
626 #define MAKE_WRITABLE(table) \
629 (ret = av_buffer_make_writable(&pic->table)) < 0)\
633 MAKE_WRITABLE(mb_var_buf);
634 MAKE_WRITABLE(mc_mb_var_buf);
635 MAKE_WRITABLE(mb_mean_buf);
636 MAKE_WRITABLE(mbskip_table_buf);
637 MAKE_WRITABLE(qscale_table_buf);
638 MAKE_WRITABLE(mb_type_buf);
640 for (i = 0; i < 2; i++) {
641 MAKE_WRITABLE(motion_val_buf[i]);
642 MAKE_WRITABLE(ref_index_buf[i]);
649 * Allocate a Picture.
650 * The pixels are allocated/set by calling get_buffer() if shared = 0
652 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
656 if (pic->qscale_table_buf)
657 if ( pic->alloc_mb_width != s->mb_width
658 || pic->alloc_mb_height != s->mb_height)
659 ff_free_picture_tables(pic);
662 av_assert0(pic->f->data[0]);
665 av_assert0(!pic->f->buf[0]);
667 if (alloc_frame_buffer(s, pic) < 0)
670 s->linesize = pic->f->linesize[0];
671 s->uvlinesize = pic->f->linesize[1];
674 if (!pic->qscale_table_buf)
675 ret = alloc_picture_tables(s, pic);
677 ret = make_tables_writable(pic);
682 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
683 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
684 pic->mb_mean = pic->mb_mean_buf->data;
687 pic->mbskip_table = pic->mbskip_table_buf->data;
688 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
689 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
691 if (pic->motion_val_buf[0]) {
692 for (i = 0; i < 2; i++) {
693 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
694 pic->ref_index[i] = pic->ref_index_buf[i]->data;
700 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
701 ff_mpeg_unref_picture(s->avctx, pic);
702 ff_free_picture_tables(pic);
703 return AVERROR(ENOMEM);
707 * Deallocate a picture.
709 void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
711 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
714 /* WM Image / Screen codecs allocate internal buffers with different
715 * dimensions / colorspaces; ignore user-defined callbacks for these. */
716 if (avctx->codec->id != AV_CODEC_ID_WMV3IMAGE &&
717 avctx->codec->id != AV_CODEC_ID_VC1IMAGE &&
718 avctx->codec->id != AV_CODEC_ID_MSS2)
719 ff_thread_release_buffer(avctx, &pic->tf);
721 av_frame_unref(pic->f);
723 av_buffer_unref(&pic->hwaccel_priv_buf);
725 if (pic->needs_realloc)
726 ff_free_picture_tables(pic);
728 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
731 static int update_picture_tables(Picture *dst, Picture *src)
735 #define UPDATE_TABLE(table)\
738 (!dst->table || dst->table->buffer != src->table->buffer)) {\
739 av_buffer_unref(&dst->table);\
740 dst->table = av_buffer_ref(src->table);\
742 ff_free_picture_tables(dst);\
743 return AVERROR(ENOMEM);\
748 UPDATE_TABLE(mb_var_buf);
749 UPDATE_TABLE(mc_mb_var_buf);
750 UPDATE_TABLE(mb_mean_buf);
751 UPDATE_TABLE(mbskip_table_buf);
752 UPDATE_TABLE(qscale_table_buf);
753 UPDATE_TABLE(mb_type_buf);
754 for (i = 0; i < 2; i++) {
755 UPDATE_TABLE(motion_val_buf[i]);
756 UPDATE_TABLE(ref_index_buf[i]);
759 dst->mb_var = src->mb_var;
760 dst->mc_mb_var = src->mc_mb_var;
761 dst->mb_mean = src->mb_mean;
762 dst->mbskip_table = src->mbskip_table;
763 dst->qscale_table = src->qscale_table;
764 dst->mb_type = src->mb_type;
765 for (i = 0; i < 2; i++) {
766 dst->motion_val[i] = src->motion_val[i];
767 dst->ref_index[i] = src->ref_index[i];
770 dst->alloc_mb_width = src->alloc_mb_width;
771 dst->alloc_mb_height = src->alloc_mb_height;
776 int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
780 av_assert0(!dst->f->buf[0]);
781 av_assert0(src->f->buf[0]);
785 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
789 ret = update_picture_tables(dst, src);
793 if (src->hwaccel_picture_private) {
794 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
795 if (!dst->hwaccel_priv_buf)
797 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
800 dst->field_picture = src->field_picture;
801 dst->mb_var_sum = src->mb_var_sum;
802 dst->mc_mb_var_sum = src->mc_mb_var_sum;
803 dst->b_frame_score = src->b_frame_score;
804 dst->needs_realloc = src->needs_realloc;
805 dst->reference = src->reference;
806 dst->shared = src->shared;
810 ff_mpeg_unref_picture(avctx, dst);
814 static void exchange_uv(MpegEncContext *s)
819 s->pblocks[4] = s->pblocks[5];
823 static int init_duplicate_context(MpegEncContext *s)
825 int y_size = s->b8_stride * (2 * s->mb_height + 1);
826 int c_size = s->mb_stride * (s->mb_height + 1);
827 int yc_size = y_size + 2 * c_size;
830 if (s->mb_height & 1)
831 yc_size += 2*s->b8_stride + 2*s->mb_stride;
838 s->obmc_scratchpad = NULL;
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
842 ME_MAP_SIZE * sizeof(uint32_t), fail)
843 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
844 ME_MAP_SIZE * sizeof(uint32_t), fail)
845 if (s->avctx->noise_reduction) {
846 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
847 2 * 64 * sizeof(int), fail)
850 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
851 s->block = s->blocks[0];
853 for (i = 0; i < 12; i++) {
854 s->pblocks[i] = &s->block[i];
856 if (s->avctx->codec_tag == AV_RL32("VCR2"))
859 if (s->out_format == FMT_H263) {
861 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
862 yc_size * sizeof(int16_t) * 16, fail);
863 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
864 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
865 s->ac_val[2] = s->ac_val[1] + c_size;
870 return -1; // free() through ff_mpv_common_end()
873 static void free_duplicate_context(MpegEncContext *s)
878 av_freep(&s->edge_emu_buffer);
879 av_freep(&s->me.scratchpad);
883 s->obmc_scratchpad = NULL;
885 av_freep(&s->dct_error_sum);
886 av_freep(&s->me.map);
887 av_freep(&s->me.score_map);
888 av_freep(&s->blocks);
889 av_freep(&s->ac_val_base);
893 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
895 #define COPY(a) bak->a = src->a
896 COPY(edge_emu_buffer);
901 COPY(obmc_scratchpad);
908 COPY(me.map_generation);
920 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
924 // FIXME copy only needed parts
926 backup_duplicate_context(&bak, dst);
927 memcpy(dst, src, sizeof(MpegEncContext));
928 backup_duplicate_context(dst, &bak);
929 for (i = 0; i < 12; i++) {
930 dst->pblocks[i] = &dst->block[i];
932 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
934 if (!dst->edge_emu_buffer &&
935 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
936 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
937 "scratch buffers.\n");
940 // STOP_TIMER("update_duplicate_context")
941 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
945 int ff_mpeg_update_thread_context(AVCodecContext *dst,
946 const AVCodecContext *src)
949 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
956 // FIXME can parameters change on I-frames?
957 // in that case dst may need a reinit
958 if (!s->context_initialized) {
960 memcpy(s, s1, sizeof(MpegEncContext));
963 s->bitstream_buffer = NULL;
964 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
966 if (s1->context_initialized){
967 // s->picture_range_start += MAX_PICTURE_COUNT;
968 // s->picture_range_end += MAX_PICTURE_COUNT;
970 if((err = ff_mpv_common_init(s)) < 0){
971 memset(s, 0, sizeof(MpegEncContext));
978 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
979 s->context_reinit = 0;
980 s->height = s1->height;
981 s->width = s1->width;
982 if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
986 s->avctx->coded_height = s1->avctx->coded_height;
987 s->avctx->coded_width = s1->avctx->coded_width;
988 s->avctx->width = s1->avctx->width;
989 s->avctx->height = s1->avctx->height;
991 s->coded_picture_number = s1->coded_picture_number;
992 s->picture_number = s1->picture_number;
994 av_assert0(!s->picture || s->picture != s1->picture);
996 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
997 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
998 if (s1->picture[i].f->buf[0] &&
999 (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
1003 #define UPDATE_PICTURE(pic)\
1005 ff_mpeg_unref_picture(s->avctx, &s->pic);\
1006 if (s1->pic.f && s1->pic.f->buf[0])\
1007 ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
1009 ret = update_picture_tables(&s->pic, &s1->pic);\
1014 UPDATE_PICTURE(current_picture);
1015 UPDATE_PICTURE(last_picture);
1016 UPDATE_PICTURE(next_picture);
1018 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
1019 ((pic && pic >= old_ctx->picture && \
1020 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
1021 &new_ctx->picture[pic - old_ctx->picture] : NULL)
1023 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1024 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1025 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1027 // Error/bug resilience
1028 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1029 s->workaround_bugs = s1->workaround_bugs;
1030 s->padding_bug_score = s1->padding_bug_score;
1032 // MPEG4 timing info
1033 memcpy(&s->last_time_base, &s1->last_time_base,
1034 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1035 (char *) &s1->last_time_base);
1038 s->max_b_frames = s1->max_b_frames;
1039 s->low_delay = s1->low_delay;
1040 s->droppable = s1->droppable;
1042 // DivX handling (doesn't work)
1043 s->divx_packed = s1->divx_packed;
1045 if (s1->bitstream_buffer) {
1046 if (s1->bitstream_buffer_size +
1047 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1048 av_fast_malloc(&s->bitstream_buffer,
1049 &s->allocated_bitstream_buffer_size,
1050 s1->allocated_bitstream_buffer_size);
1051 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1052 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1053 s1->bitstream_buffer_size);
1054 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1055 FF_INPUT_BUFFER_PADDING_SIZE);
1058 // linesize dependend scratch buffer allocation
1059 if (!s->edge_emu_buffer)
1061 if (frame_size_alloc(s, s1->linesize) < 0) {
1062 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1063 "scratch buffers.\n");
1064 return AVERROR(ENOMEM);
1067 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1068 "be allocated due to unknown size.\n");
1071 // MPEG2/interlacing info
1072 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1073 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1075 if (!s1->first_field) {
1076 s->last_pict_type = s1->pict_type;
1077 if (s1->current_picture_ptr)
1078 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1085 * Set the given MpegEncContext to common defaults
1086 * (same for encoding and decoding).
1087 * The changed fields will not depend upon the
1088 * prior state of the MpegEncContext.
1090 void ff_mpv_common_defaults(MpegEncContext *s)
1092 s->y_dc_scale_table =
1093 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1094 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1095 s->progressive_frame = 1;
1096 s->progressive_sequence = 1;
1097 s->picture_structure = PICT_FRAME;
1099 s->coded_picture_number = 0;
1100 s->picture_number = 0;
1105 s->slice_context_count = 1;
1109 * Set the given MpegEncContext to defaults for decoding.
1110 * the changed fields will not depend upon
1111 * the prior state of the MpegEncContext.
1113 void ff_mpv_decode_defaults(MpegEncContext *s)
1115 ff_mpv_common_defaults(s);
1118 void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
1121 s->width = avctx->coded_width;
1122 s->height = avctx->coded_height;
1123 s->codec_id = avctx->codec->id;
1124 s->workaround_bugs = avctx->workaround_bugs;
1126 /* convert fourcc to upper case */
1127 s->codec_tag = avpriv_toupper4(avctx->codec_tag);
1130 static int init_er(MpegEncContext *s)
1132 ERContext *er = &s->er;
1133 int mb_array_size = s->mb_height * s->mb_stride;
1136 er->avctx = s->avctx;
1138 er->mb_index2xy = s->mb_index2xy;
1139 er->mb_num = s->mb_num;
1140 er->mb_width = s->mb_width;
1141 er->mb_height = s->mb_height;
1142 er->mb_stride = s->mb_stride;
1143 er->b8_stride = s->b8_stride;
1145 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1146 er->error_status_table = av_mallocz(mb_array_size);
1147 if (!er->er_temp_buffer || !er->error_status_table)
1150 er->mbskip_table = s->mbskip_table;
1151 er->mbintra_table = s->mbintra_table;
1153 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1154 er->dc_val[i] = s->dc_val[i];
1156 er->decode_mb = mpeg_er_decode_mb;
1161 av_freep(&er->er_temp_buffer);
1162 av_freep(&er->error_status_table);
1163 return AVERROR(ENOMEM);
1167 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1169 static int init_context_frame(MpegEncContext *s)
1171 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1173 s->mb_width = (s->width + 15) / 16;
1174 s->mb_stride = s->mb_width + 1;
1175 s->b8_stride = s->mb_width * 2 + 1;
1176 mb_array_size = s->mb_height * s->mb_stride;
1177 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1179 /* set default edge pos, will be overridden
1180 * in decode_header if needed */
1181 s->h_edge_pos = s->mb_width * 16;
1182 s->v_edge_pos = s->mb_height * 16;
1184 s->mb_num = s->mb_width * s->mb_height;
1189 s->block_wrap[3] = s->b8_stride;
1191 s->block_wrap[5] = s->mb_stride;
1193 y_size = s->b8_stride * (2 * s->mb_height + 1);
1194 c_size = s->mb_stride * (s->mb_height + 1);
1195 yc_size = y_size + 2 * c_size;
1197 if (s->mb_height & 1)
1198 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1200 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1201 for (y = 0; y < s->mb_height; y++)
1202 for (x = 0; x < s->mb_width; x++)
1203 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1205 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1208 /* Allocate MV tables */
1209 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1210 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1211 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1212 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1213 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1214 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1215 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1216 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1217 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1218 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1219 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1220 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1222 /* Allocate MB type table */
1223 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1225 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1227 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1228 mb_array_size * sizeof(float), fail);
1229 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1230 mb_array_size * sizeof(float), fail);
1234 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1235 (s->avctx->flags & CODEC_FLAG_INTERLACED_ME)) {
1236 /* interlaced direct mode decoding tables */
1237 for (i = 0; i < 2; i++) {
1239 for (j = 0; j < 2; j++) {
1240 for (k = 0; k < 2; k++) {
1241 FF_ALLOCZ_OR_GOTO(s->avctx,
1242 s->b_field_mv_table_base[i][j][k],
1243 mv_table_size * 2 * sizeof(int16_t),
1245 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1248 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1249 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1250 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1252 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1255 if (s->out_format == FMT_H263) {
1257 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1258 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1260 /* cbp, ac_pred, pred_dir */
1261 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1262 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1265 if (s->h263_pred || s->h263_plus || !s->encoding) {
1267 // MN: we need these for error resilience of intra-frames
1268 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1269 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1270 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1271 s->dc_val[2] = s->dc_val[1] + c_size;
1272 for (i = 0; i < yc_size; i++)
1273 s->dc_val_base[i] = 1024;
1276 /* which mb is a intra block */
1277 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1278 memset(s->mbintra_table, 1, mb_array_size);
1280 /* init macroblock skip table */
1281 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1282 // Note the + 1 is for a quicker mpeg4 slice_end detection
1286 return AVERROR(ENOMEM);
1290 * init common structure for both encoder and decoder.
1291 * this assumes that some variables like width/height are already set
1293 av_cold int ff_mpv_common_init(MpegEncContext *s)
1296 int nb_slices = (HAVE_THREADS &&
1297 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1298 s->avctx->thread_count : 1;
1300 if (s->encoding && s->avctx->slices)
1301 nb_slices = s->avctx->slices;
1303 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1304 s->mb_height = (s->height + 31) / 32 * 2;
1306 s->mb_height = (s->height + 15) / 16;
1308 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1309 av_log(s->avctx, AV_LOG_ERROR,
1310 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1314 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1317 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1319 max_slices = MAX_THREADS;
1320 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1321 " reducing to %d\n", nb_slices, max_slices);
1322 nb_slices = max_slices;
1325 if ((s->width || s->height) &&
1326 av_image_check_size(s->width, s->height, 0, s->avctx))
1331 /* set chroma shifts */
1332 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1334 &s->chroma_y_shift);
1337 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1338 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1339 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1340 s->picture[i].f = av_frame_alloc();
1341 if (!s->picture[i].f)
1344 memset(&s->next_picture, 0, sizeof(s->next_picture));
1345 memset(&s->last_picture, 0, sizeof(s->last_picture));
1346 memset(&s->current_picture, 0, sizeof(s->current_picture));
1347 memset(&s->new_picture, 0, sizeof(s->new_picture));
1348 s->next_picture.f = av_frame_alloc();
1349 if (!s->next_picture.f)
1351 s->last_picture.f = av_frame_alloc();
1352 if (!s->last_picture.f)
1354 s->current_picture.f = av_frame_alloc();
1355 if (!s->current_picture.f)
1357 s->new_picture.f = av_frame_alloc();
1358 if (!s->new_picture.f)
1361 if (init_context_frame(s))
1364 s->parse_context.state = -1;
1366 s->context_initialized = 1;
1367 s->thread_context[0] = s;
1369 // if (s->width && s->height) {
1370 if (nb_slices > 1) {
1371 for (i = 1; i < nb_slices; i++) {
1372 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1373 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1376 for (i = 0; i < nb_slices; i++) {
1377 if (init_duplicate_context(s->thread_context[i]) < 0)
1379 s->thread_context[i]->start_mb_y =
1380 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1381 s->thread_context[i]->end_mb_y =
1382 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1385 if (init_duplicate_context(s) < 0)
1388 s->end_mb_y = s->mb_height;
1390 s->slice_context_count = nb_slices;
1395 ff_mpv_common_end(s);
1400 * Frees and resets MpegEncContext fields depending on the resolution.
1401 * Is used during resolution changes to avoid a full reinitialization of the
1404 static void free_context_frame(MpegEncContext *s)
1408 av_freep(&s->mb_type);
1409 av_freep(&s->p_mv_table_base);
1410 av_freep(&s->b_forw_mv_table_base);
1411 av_freep(&s->b_back_mv_table_base);
1412 av_freep(&s->b_bidir_forw_mv_table_base);
1413 av_freep(&s->b_bidir_back_mv_table_base);
1414 av_freep(&s->b_direct_mv_table_base);
1415 s->p_mv_table = NULL;
1416 s->b_forw_mv_table = NULL;
1417 s->b_back_mv_table = NULL;
1418 s->b_bidir_forw_mv_table = NULL;
1419 s->b_bidir_back_mv_table = NULL;
1420 s->b_direct_mv_table = NULL;
1421 for (i = 0; i < 2; i++) {
1422 for (j = 0; j < 2; j++) {
1423 for (k = 0; k < 2; k++) {
1424 av_freep(&s->b_field_mv_table_base[i][j][k]);
1425 s->b_field_mv_table[i][j][k] = NULL;
1427 av_freep(&s->b_field_select_table[i][j]);
1428 av_freep(&s->p_field_mv_table_base[i][j]);
1429 s->p_field_mv_table[i][j] = NULL;
1431 av_freep(&s->p_field_select_table[i]);
1434 av_freep(&s->dc_val_base);
1435 av_freep(&s->coded_block_base);
1436 av_freep(&s->mbintra_table);
1437 av_freep(&s->cbp_table);
1438 av_freep(&s->pred_dir_table);
1440 av_freep(&s->mbskip_table);
1442 av_freep(&s->er.error_status_table);
1443 av_freep(&s->er.er_temp_buffer);
1444 av_freep(&s->mb_index2xy);
1445 av_freep(&s->lambda_table);
1447 av_freep(&s->cplx_tab);
1448 av_freep(&s->bits_tab);
1450 s->linesize = s->uvlinesize = 0;
1453 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1457 if (!s->context_initialized)
1458 return AVERROR(EINVAL);
1460 if (s->slice_context_count > 1) {
1461 for (i = 0; i < s->slice_context_count; i++) {
1462 free_duplicate_context(s->thread_context[i]);
1464 for (i = 1; i < s->slice_context_count; i++) {
1465 av_freep(&s->thread_context[i]);
1468 free_duplicate_context(s);
1470 free_context_frame(s);
1473 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1474 s->picture[i].needs_realloc = 1;
1477 s->last_picture_ptr =
1478 s->next_picture_ptr =
1479 s->current_picture_ptr = NULL;
1482 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1483 s->mb_height = (s->height + 31) / 32 * 2;
1485 s->mb_height = (s->height + 15) / 16;
1487 if ((s->width || s->height) &&
1488 (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1491 if ((err = init_context_frame(s)))
1494 s->thread_context[0] = s;
1496 if (s->width && s->height) {
1497 int nb_slices = s->slice_context_count;
1498 if (nb_slices > 1) {
1499 for (i = 1; i < nb_slices; i++) {
1500 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1501 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1504 for (i = 0; i < nb_slices; i++) {
1505 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1507 s->thread_context[i]->start_mb_y =
1508 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1509 s->thread_context[i]->end_mb_y =
1510 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1513 err = init_duplicate_context(s);
1517 s->end_mb_y = s->mb_height;
1519 s->slice_context_count = nb_slices;
1524 ff_mpv_common_end(s);
1528 /* init common structure for both encoder and decoder */
1529 void ff_mpv_common_end(MpegEncContext *s)
1533 if (s->slice_context_count > 1) {
1534 for (i = 0; i < s->slice_context_count; i++) {
1535 free_duplicate_context(s->thread_context[i]);
1537 for (i = 1; i < s->slice_context_count; i++) {
1538 av_freep(&s->thread_context[i]);
1540 s->slice_context_count = 1;
1541 } else free_duplicate_context(s);
1543 av_freep(&s->parse_context.buffer);
1544 s->parse_context.buffer_size = 0;
1546 av_freep(&s->bitstream_buffer);
1547 s->allocated_bitstream_buffer_size = 0;
1550 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1551 ff_free_picture_tables(&s->picture[i]);
1552 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1553 av_frame_free(&s->picture[i].f);
1556 av_freep(&s->picture);
1557 ff_free_picture_tables(&s->last_picture);
1558 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1559 av_frame_free(&s->last_picture.f);
1560 ff_free_picture_tables(&s->current_picture);
1561 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1562 av_frame_free(&s->current_picture.f);
1563 ff_free_picture_tables(&s->next_picture);
1564 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1565 av_frame_free(&s->next_picture.f);
1566 ff_free_picture_tables(&s->new_picture);
1567 ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1568 av_frame_free(&s->new_picture.f);
1570 free_context_frame(s);
1572 s->context_initialized = 0;
1573 s->last_picture_ptr =
1574 s->next_picture_ptr =
1575 s->current_picture_ptr = NULL;
1576 s->linesize = s->uvlinesize = 0;
1579 av_cold void ff_init_rl(RLTable *rl,
1580 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1582 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1583 uint8_t index_run[MAX_RUN + 1];
1584 int last, run, level, start, end, i;
1586 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1587 if (static_store && rl->max_level[0])
1590 /* compute max_level[], max_run[] and index_run[] */
1591 for (last = 0; last < 2; last++) {
1600 memset(max_level, 0, MAX_RUN + 1);
1601 memset(max_run, 0, MAX_LEVEL + 1);
1602 memset(index_run, rl->n, MAX_RUN + 1);
1603 for (i = start; i < end; i++) {
1604 run = rl->table_run[i];
1605 level = rl->table_level[i];
1606 if (index_run[run] == rl->n)
1608 if (level > max_level[run])
1609 max_level[run] = level;
1610 if (run > max_run[level])
1611 max_run[level] = run;
1614 rl->max_level[last] = static_store[last];
1616 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1617 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1619 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1621 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1622 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1624 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1626 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1627 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1631 av_cold void ff_init_vlc_rl(RLTable *rl, unsigned static_size)
1634 VLC_TYPE table[1500][2] = {{0}};
1635 VLC vlc = { .table = table, .table_allocated = static_size };
1636 av_assert0(static_size <= FF_ARRAY_ELEMS(table));
1637 init_vlc(&vlc, 9, rl->n + 1, &rl->table_vlc[0][1], 4, 2, &rl->table_vlc[0][0], 4, 2, INIT_VLC_USE_NEW_STATIC);
1639 for (q = 0; q < 32; q++) {
1641 int qadd = (q - 1) | 1;
1647 for (i = 0; i < vlc.table_size; i++) {
1648 int code = vlc.table[i][0];
1649 int len = vlc.table[i][1];
1652 if (len == 0) { // illegal code
1655 } else if (len < 0) { // more bits needed
1659 if (code == rl->n) { // esc
1663 run = rl->table_run[code] + 1;
1664 level = rl->table_level[code] * qmul + qadd;
1665 if (code >= rl->last) run += 192;
1668 rl->rl_vlc[q][i].len = len;
1669 rl->rl_vlc[q][i].level = level;
1670 rl->rl_vlc[q][i].run = run;
1675 static void release_unused_pictures(AVCodecContext *avctx, Picture *picture)
1679 /* release non reference frames */
1680 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1681 if (!picture[i].reference)
1682 ff_mpeg_unref_picture(avctx, &picture[i]);
1686 static inline int pic_is_unused(Picture *pic)
1688 if (!pic->f->buf[0])
1690 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1695 static int find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
1700 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1701 if (!picture[i].f->buf[0])
1705 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1706 if (pic_is_unused(&picture[i]))
1711 av_log(avctx, AV_LOG_FATAL,
1712 "Internal error, picture buffer overflow\n");
1713 /* We could return -1, but the codec would crash trying to draw into a
1714 * non-existing frame anyway. This is safer than waiting for a random crash.
1715 * Also the return of this is never useful, an encoder must only allocate
1716 * as much as allowed in the specification. This has no relationship to how
1717 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1718 * enough for such valid streams).
1719 * Plus, a decoder has to check stream validity and remove frames if too
1720 * many reference frames are around. Waiting for "OOM" is not correct at
1721 * all. Similarly, missing reference frames have to be replaced by
1722 * interpolated/MC frames, anything else is a bug in the codec ...
1728 int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
1730 int ret = find_unused_picture(avctx, picture, shared);
1732 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1733 if (picture[ret].needs_realloc) {
1734 picture[ret].needs_realloc = 0;
1735 ff_free_picture_tables(&picture[ret]);
1736 ff_mpeg_unref_picture(avctx, &picture[ret]);
1742 static void gray_frame(AVFrame *frame)
1744 int i, h_chroma_shift, v_chroma_shift;
1746 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1748 for(i=0; i<frame->height; i++)
1749 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1750 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1751 memset(frame->data[1] + frame->linesize[1]*i,
1752 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1753 memset(frame->data[2] + frame->linesize[2]*i,
1754 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1759 * generic function called after decoding
1760 * the header and before a frame is decoded.
1762 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1768 if (!ff_thread_can_start_frame(avctx)) {
1769 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1773 /* mark & release old frames */
1774 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1775 s->last_picture_ptr != s->next_picture_ptr &&
1776 s->last_picture_ptr->f->buf[0]) {
1777 ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1780 /* release forgotten pictures */
1781 /* if (mpeg124/h263) */
1782 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1783 if (&s->picture[i] != s->last_picture_ptr &&
1784 &s->picture[i] != s->next_picture_ptr &&
1785 s->picture[i].reference && !s->picture[i].needs_realloc) {
1786 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1787 av_log(avctx, AV_LOG_ERROR,
1788 "releasing zombie picture\n");
1789 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1793 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1795 release_unused_pictures(s->avctx, s->picture);
1797 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1798 // we already have a unused image
1799 // (maybe it was set before reading the header)
1800 pic = s->current_picture_ptr;
1802 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1804 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1807 pic = &s->picture[i];
1811 if (!s->droppable) {
1812 if (s->pict_type != AV_PICTURE_TYPE_B)
1816 pic->f->coded_picture_number = s->coded_picture_number++;
1818 if (ff_alloc_picture(s, pic, 0) < 0)
1821 s->current_picture_ptr = pic;
1822 // FIXME use only the vars from current_pic
1823 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1824 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1825 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1826 if (s->picture_structure != PICT_FRAME)
1827 s->current_picture_ptr->f->top_field_first =
1828 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1830 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1831 !s->progressive_sequence;
1832 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1834 s->current_picture_ptr->f->pict_type = s->pict_type;
1835 // if (s->avctx->flags && CODEC_FLAG_QSCALE)
1836 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1837 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1839 if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1840 s->current_picture_ptr)) < 0)
1843 if (s->pict_type != AV_PICTURE_TYPE_B) {
1844 s->last_picture_ptr = s->next_picture_ptr;
1846 s->next_picture_ptr = s->current_picture_ptr;
1848 ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1849 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1850 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1851 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1852 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1853 s->pict_type, s->droppable);
1855 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1856 (s->pict_type != AV_PICTURE_TYPE_I ||
1857 s->picture_structure != PICT_FRAME)) {
1858 int h_chroma_shift, v_chroma_shift;
1859 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1860 &h_chroma_shift, &v_chroma_shift);
1861 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1862 av_log(avctx, AV_LOG_DEBUG,
1863 "allocating dummy last picture for B frame\n");
1864 else if (s->pict_type != AV_PICTURE_TYPE_I)
1865 av_log(avctx, AV_LOG_ERROR,
1866 "warning: first frame is no keyframe\n");
1867 else if (s->picture_structure != PICT_FRAME)
1868 av_log(avctx, AV_LOG_DEBUG,
1869 "allocate dummy last picture for field based first keyframe\n");
1871 /* Allocate a dummy frame */
1872 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1874 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1877 s->last_picture_ptr = &s->picture[i];
1879 s->last_picture_ptr->reference = 3;
1880 s->last_picture_ptr->f->key_frame = 0;
1881 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1883 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1884 s->last_picture_ptr = NULL;
1888 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1889 for(i=0; i<avctx->height; i++)
1890 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1891 0x80, avctx->width);
1892 if (s->last_picture_ptr->f->data[2]) {
1893 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1894 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1895 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1896 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1897 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1901 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1902 for(i=0; i<avctx->height; i++)
1903 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1907 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1908 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1910 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1911 s->pict_type == AV_PICTURE_TYPE_B) {
1912 /* Allocate a dummy frame */
1913 i = ff_find_unused_picture(s->avctx, s->picture, 0);
1915 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1918 s->next_picture_ptr = &s->picture[i];
1920 s->next_picture_ptr->reference = 3;
1921 s->next_picture_ptr->f->key_frame = 0;
1922 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1924 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1925 s->next_picture_ptr = NULL;
1928 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1929 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1932 #if 0 // BUFREF-FIXME
1933 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1934 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1936 if (s->last_picture_ptr) {
1937 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1938 if (s->last_picture_ptr->f->buf[0] &&
1939 (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1940 s->last_picture_ptr)) < 0)
1943 if (s->next_picture_ptr) {
1944 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1945 if (s->next_picture_ptr->f->buf[0] &&
1946 (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1947 s->next_picture_ptr)) < 0)
1951 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1952 s->last_picture_ptr->f->buf[0]));
1954 if (s->picture_structure!= PICT_FRAME) {
1956 for (i = 0; i < 4; i++) {
1957 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1958 s->current_picture.f->data[i] +=
1959 s->current_picture.f->linesize[i];
1961 s->current_picture.f->linesize[i] *= 2;
1962 s->last_picture.f->linesize[i] *= 2;
1963 s->next_picture.f->linesize[i] *= 2;
1967 /* set dequantizer, we can't do it during init as
1968 * it might change for mpeg4 and we can't do it in the header
1969 * decode as init is not called for mpeg4 there yet */
1970 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1971 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1972 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1973 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1974 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1975 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1977 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1978 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1981 if (s->avctx->debug & FF_DEBUG_NOMC) {
1982 gray_frame(s->current_picture_ptr->f);
1988 /* called after a frame has been decoded. */
1989 void ff_mpv_frame_end(MpegEncContext *s)
1993 if (s->current_picture.reference)
1994 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1999 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
2002 return clip_line(ex, ey, sx, sy, maxx);
2007 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
2014 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
2022 * Draw a line from (ex, ey) -> (sx, sy).
2023 * @param w width of the image
2024 * @param h height of the image
2025 * @param stride stride/linesize of the image
2026 * @param color color of the arrow
2028 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2029 int w, int h, int stride, int color)
2033 if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2035 if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2038 sx = av_clip(sx, 0, w - 1);
2039 sy = av_clip(sy, 0, h - 1);
2040 ex = av_clip(ex, 0, w - 1);
2041 ey = av_clip(ey, 0, h - 1);
2043 buf[sy * stride + sx] += color;
2045 if (FFABS(ex - sx) > FFABS(ey - sy)) {
2047 FFSWAP(int, sx, ex);
2048 FFSWAP(int, sy, ey);
2050 buf += sx + sy * stride;
2052 f = ((ey - sy) << 16) / ex;
2053 for (x = 0; x <= ex; x++) {
2055 fr = (x * f) & 0xFFFF;
2056 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2057 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2061 FFSWAP(int, sx, ex);
2062 FFSWAP(int, sy, ey);
2064 buf += sx + sy * stride;
2067 f = ((ex - sx) << 16) / ey;
2070 for(y= 0; y <= ey; y++){
2072 fr = (y*f) & 0xFFFF;
2073 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2074 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2080 * Draw an arrow from (ex, ey) -> (sx, sy).
2081 * @param w width of the image
2082 * @param h height of the image
2083 * @param stride stride/linesize of the image
2084 * @param color color of the arrow
2086 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2087 int ey, int w, int h, int stride, int color, int tail, int direction)
2092 FFSWAP(int, sx, ex);
2093 FFSWAP(int, sy, ey);
2096 sx = av_clip(sx, -100, w + 100);
2097 sy = av_clip(sy, -100, h + 100);
2098 ex = av_clip(ex, -100, w + 100);
2099 ey = av_clip(ey, -100, h + 100);
2104 if (dx * dx + dy * dy > 3 * 3) {
2107 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2109 // FIXME subpixel accuracy
2110 rx = ROUNDED_DIV(rx * 3 << 4, length);
2111 ry = ROUNDED_DIV(ry * 3 << 4, length);
2118 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2119 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2121 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2125 static int add_mb(AVMotionVector *mb, uint32_t mb_type,
2126 int dst_x, int dst_y,
2127 int src_x, int src_y,
2130 mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
2131 mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
2136 mb->source = direction ? 1 : -1;
2137 mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
2142 * Print debugging info for the given picture.
2144 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2145 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2147 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2149 if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
2150 const int shift = 1 + quarter_sample;
2151 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2152 const int mv_stride = (mb_width << mv_sample_log2) +
2153 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2154 int mb_x, mb_y, mbcount = 0;
2156 /* size is width * height * 2 * 4 where 2 is for directions and 4 is
2157 * for the maximum number of MB (4 MB in case of IS_8x8) */
2158 AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
2162 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2163 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2164 int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2165 for (direction = 0; direction < 2; direction++) {
2166 if (!USES_LIST(mb_type, direction))
2168 if (IS_8X8(mb_type)) {
2169 for (i = 0; i < 4; i++) {
2170 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2171 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2172 int xy = (mb_x * 2 + (i & 1) +
2173 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2174 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2175 int my = (motion_val[direction][xy][1] >> shift) + sy;
2176 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2178 } else if (IS_16X8(mb_type)) {
2179 for (i = 0; i < 2; i++) {
2180 int sx = mb_x * 16 + 8;
2181 int sy = mb_y * 16 + 4 + 8 * i;
2182 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2183 int mx = (motion_val[direction][xy][0] >> shift);
2184 int my = (motion_val[direction][xy][1] >> shift);
2186 if (IS_INTERLACED(mb_type))
2189 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2191 } else if (IS_8X16(mb_type)) {
2192 for (i = 0; i < 2; i++) {
2193 int sx = mb_x * 16 + 4 + 8 * i;
2194 int sy = mb_y * 16 + 8;
2195 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2196 int mx = motion_val[direction][xy][0] >> shift;
2197 int my = motion_val[direction][xy][1] >> shift;
2199 if (IS_INTERLACED(mb_type))
2202 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2205 int sx = mb_x * 16 + 8;
2206 int sy = mb_y * 16 + 8;
2207 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2208 int mx = (motion_val[direction][xy][0]>>shift) + sx;
2209 int my = (motion_val[direction][xy][1]>>shift) + sy;
2210 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2217 AVFrameSideData *sd;
2219 av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
2220 sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
2225 memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
2231 /* TODO: export all the following to make them accessible for users (and filters) */
2232 if (avctx->hwaccel || !mbtype_table
2233 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2237 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2240 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2241 av_get_picture_type_char(pict->pict_type));
2242 for (y = 0; y < mb_height; y++) {
2243 for (x = 0; x < mb_width; x++) {
2244 if (avctx->debug & FF_DEBUG_SKIP) {
2245 int count = mbskip_table ? mbskip_table[x + y * mb_stride] : 0;
2248 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2250 if (avctx->debug & FF_DEBUG_QP) {
2251 av_log(avctx, AV_LOG_DEBUG, "%2d",
2252 qscale_table[x + y * mb_stride]);
2254 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2255 int mb_type = mbtype_table[x + y * mb_stride];
2256 // Type & MV direction
2257 if (IS_PCM(mb_type))
2258 av_log(avctx, AV_LOG_DEBUG, "P");
2259 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2260 av_log(avctx, AV_LOG_DEBUG, "A");
2261 else if (IS_INTRA4x4(mb_type))
2262 av_log(avctx, AV_LOG_DEBUG, "i");
2263 else if (IS_INTRA16x16(mb_type))
2264 av_log(avctx, AV_LOG_DEBUG, "I");
2265 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2266 av_log(avctx, AV_LOG_DEBUG, "d");
2267 else if (IS_DIRECT(mb_type))
2268 av_log(avctx, AV_LOG_DEBUG, "D");
2269 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2270 av_log(avctx, AV_LOG_DEBUG, "g");
2271 else if (IS_GMC(mb_type))
2272 av_log(avctx, AV_LOG_DEBUG, "G");
2273 else if (IS_SKIP(mb_type))
2274 av_log(avctx, AV_LOG_DEBUG, "S");
2275 else if (!USES_LIST(mb_type, 1))
2276 av_log(avctx, AV_LOG_DEBUG, ">");
2277 else if (!USES_LIST(mb_type, 0))
2278 av_log(avctx, AV_LOG_DEBUG, "<");
2280 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2281 av_log(avctx, AV_LOG_DEBUG, "X");
2285 if (IS_8X8(mb_type))
2286 av_log(avctx, AV_LOG_DEBUG, "+");
2287 else if (IS_16X8(mb_type))
2288 av_log(avctx, AV_LOG_DEBUG, "-");
2289 else if (IS_8X16(mb_type))
2290 av_log(avctx, AV_LOG_DEBUG, "|");
2291 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2292 av_log(avctx, AV_LOG_DEBUG, " ");
2294 av_log(avctx, AV_LOG_DEBUG, "?");
2297 if (IS_INTERLACED(mb_type))
2298 av_log(avctx, AV_LOG_DEBUG, "=");
2300 av_log(avctx, AV_LOG_DEBUG, " ");
2303 av_log(avctx, AV_LOG_DEBUG, "\n");
2307 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2308 (avctx->debug_mv)) {
2311 int h_chroma_shift, v_chroma_shift, block_height;
2313 const int shift = 1 + quarter_sample;
2315 const int width = avctx->width;
2316 const int height = avctx->height;
2318 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2319 const int mv_stride = (mb_width << mv_sample_log2) +
2320 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2322 *low_delay = 0; // needed to see the vectors without trashing the buffers
2324 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2326 av_frame_make_writable(pict);
2328 pict->opaque = NULL;
2330 ptr = pict->data[0];
2332 block_height = 16 >> v_chroma_shift;
2334 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2336 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2337 const int mb_index = mb_x + mb_y * mb_stride;
2339 if ((avctx->debug_mv) && motion_val[0]) {
2341 for (type = 0; type < 3; type++) {
2345 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2346 (pict->pict_type!= AV_PICTURE_TYPE_P))
2351 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2352 (pict->pict_type!= AV_PICTURE_TYPE_B))
2357 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2358 (pict->pict_type!= AV_PICTURE_TYPE_B))
2363 if (!USES_LIST(mbtype_table[mb_index], direction))
2366 if (IS_8X8(mbtype_table[mb_index])) {
2368 for (i = 0; i < 4; i++) {
2369 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2370 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2371 int xy = (mb_x * 2 + (i & 1) +
2372 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2373 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2374 int my = (motion_val[direction][xy][1] >> shift) + sy;
2375 draw_arrow(ptr, sx, sy, mx, my, width,
2376 height, pict->linesize[0], 100, 0, direction);
2378 } else if (IS_16X8(mbtype_table[mb_index])) {
2380 for (i = 0; i < 2; i++) {
2381 int sx = mb_x * 16 + 8;
2382 int sy = mb_y * 16 + 4 + 8 * i;
2383 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2384 int mx = (motion_val[direction][xy][0] >> shift);
2385 int my = (motion_val[direction][xy][1] >> shift);
2387 if (IS_INTERLACED(mbtype_table[mb_index]))
2390 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2391 height, pict->linesize[0], 100, 0, direction);
2393 } else if (IS_8X16(mbtype_table[mb_index])) {
2395 for (i = 0; i < 2; i++) {
2396 int sx = mb_x * 16 + 4 + 8 * i;
2397 int sy = mb_y * 16 + 8;
2398 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2399 int mx = motion_val[direction][xy][0] >> shift;
2400 int my = motion_val[direction][xy][1] >> shift;
2402 if (IS_INTERLACED(mbtype_table[mb_index]))
2405 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2406 height, pict->linesize[0], 100, 0, direction);
2409 int sx= mb_x * 16 + 8;
2410 int sy= mb_y * 16 + 8;
2411 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2412 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2413 int my= (motion_val[direction][xy][1]>>shift) + sy;
2414 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2419 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2420 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2421 0x0101010101010101ULL;
2423 for (y = 0; y < block_height; y++) {
2424 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2425 (block_height * mb_y + y) *
2426 pict->linesize[1]) = c;
2427 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2428 (block_height * mb_y + y) *
2429 pict->linesize[2]) = c;
2432 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2434 int mb_type = mbtype_table[mb_index];
2437 #define COLOR(theta, r) \
2438 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2439 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2443 if (IS_PCM(mb_type)) {
2445 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2446 IS_INTRA16x16(mb_type)) {
2448 } else if (IS_INTRA4x4(mb_type)) {
2450 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2452 } else if (IS_DIRECT(mb_type)) {
2454 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2456 } else if (IS_GMC(mb_type)) {
2458 } else if (IS_SKIP(mb_type)) {
2460 } else if (!USES_LIST(mb_type, 1)) {
2462 } else if (!USES_LIST(mb_type, 0)) {
2465 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2469 u *= 0x0101010101010101ULL;
2470 v *= 0x0101010101010101ULL;
2471 for (y = 0; y < block_height; y++) {
2472 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2473 (block_height * mb_y + y) * pict->linesize[1]) = u;
2474 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2475 (block_height * mb_y + y) * pict->linesize[2]) = v;
2479 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2480 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2481 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2482 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2483 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2485 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2486 for (y = 0; y < 16; y++)
2487 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2488 pict->linesize[0]] ^= 0x80;
2490 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2491 int dm = 1 << (mv_sample_log2 - 2);
2492 for (i = 0; i < 4; i++) {
2493 int sx = mb_x * 16 + 8 * (i & 1);
2494 int sy = mb_y * 16 + 8 * (i >> 1);
2495 int xy = (mb_x * 2 + (i & 1) +
2496 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2498 int32_t *mv = (int32_t *) &motion_val[0][xy];
2499 if (mv[0] != mv[dm] ||
2500 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2501 for (y = 0; y < 8; y++)
2502 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2503 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2504 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2505 pict->linesize[0]) ^= 0x8080808080808080ULL;
2509 if (IS_INTERLACED(mb_type) &&
2510 avctx->codec->id == AV_CODEC_ID_H264) {
2515 mbskip_table[mb_index] = 0;
2521 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2523 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2524 p->qscale_table, p->motion_val, &s->low_delay,
2525 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2528 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2530 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2531 int offset = 2*s->mb_stride + 1;
2533 return AVERROR(ENOMEM);
2534 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2535 ref->size -= offset;
2536 ref->data += offset;
2537 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2540 static inline int hpel_motion_lowres(MpegEncContext *s,
2541 uint8_t *dest, uint8_t *src,
2542 int field_based, int field_select,
2543 int src_x, int src_y,
2544 int width, int height, ptrdiff_t stride,
2545 int h_edge_pos, int v_edge_pos,
2546 int w, int h, h264_chroma_mc_func *pix_op,
2547 int motion_x, int motion_y)
2549 const int lowres = s->avctx->lowres;
2550 const int op_index = FFMIN(lowres, 3);
2551 const int s_mask = (2 << lowres) - 1;
2555 if (s->quarter_sample) {
2560 sx = motion_x & s_mask;
2561 sy = motion_y & s_mask;
2562 src_x += motion_x >> lowres + 1;
2563 src_y += motion_y >> lowres + 1;
2565 src += src_y * stride + src_x;
2567 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2568 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2569 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2570 s->linesize, s->linesize,
2571 w + 1, (h + 1) << field_based,
2572 src_x, src_y << field_based,
2573 h_edge_pos, v_edge_pos);
2574 src = s->edge_emu_buffer;
2578 sx = (sx << 2) >> lowres;
2579 sy = (sy << 2) >> lowres;
2582 pix_op[op_index](dest, src, stride, h, sx, sy);
2586 /* apply one mpeg motion vector to the three components */
2587 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2594 uint8_t **ref_picture,
2595 h264_chroma_mc_func *pix_op,
2596 int motion_x, int motion_y,
2599 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2600 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2601 ptrdiff_t uvlinesize, linesize;
2602 const int lowres = s->avctx->lowres;
2603 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2604 const int block_s = 8>>lowres;
2605 const int s_mask = (2 << lowres) - 1;
2606 const int h_edge_pos = s->h_edge_pos >> lowres;
2607 const int v_edge_pos = s->v_edge_pos >> lowres;
2608 linesize = s->current_picture.f->linesize[0] << field_based;
2609 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2611 // FIXME obviously not perfect but qpel will not work in lowres anyway
2612 if (s->quarter_sample) {
2618 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2621 sx = motion_x & s_mask;
2622 sy = motion_y & s_mask;
2623 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2624 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2626 if (s->out_format == FMT_H263) {
2627 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2628 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2629 uvsrc_x = src_x >> 1;
2630 uvsrc_y = src_y >> 1;
2631 } else if (s->out_format == FMT_H261) {
2632 // even chroma mv's are full pel in H261
2635 uvsx = (2 * mx) & s_mask;
2636 uvsy = (2 * my) & s_mask;
2637 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2638 uvsrc_y = mb_y * block_s + (my >> lowres);
2640 if(s->chroma_y_shift){
2645 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2646 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2648 if(s->chroma_x_shift){
2652 uvsy = motion_y & s_mask;
2654 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2657 uvsx = motion_x & s_mask;
2658 uvsy = motion_y & s_mask;
2665 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2666 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2667 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2669 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2670 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2671 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2672 linesize >> field_based, linesize >> field_based,
2673 17, 17 + field_based,
2674 src_x, src_y << field_based, h_edge_pos,
2676 ptr_y = s->edge_emu_buffer;
2677 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2678 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2679 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2680 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2681 uvlinesize >> field_based, uvlinesize >> field_based,
2683 uvsrc_x, uvsrc_y << field_based,
2684 h_edge_pos >> 1, v_edge_pos >> 1);
2685 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2686 uvlinesize >> field_based,uvlinesize >> field_based,
2688 uvsrc_x, uvsrc_y << field_based,
2689 h_edge_pos >> 1, v_edge_pos >> 1);
2695 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2697 dest_y += s->linesize;
2698 dest_cb += s->uvlinesize;
2699 dest_cr += s->uvlinesize;
2703 ptr_y += s->linesize;
2704 ptr_cb += s->uvlinesize;
2705 ptr_cr += s->uvlinesize;
2708 sx = (sx << 2) >> lowres;
2709 sy = (sy << 2) >> lowres;
2710 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2712 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
2713 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2714 uvsx = (uvsx << 2) >> lowres;
2715 uvsy = (uvsy << 2) >> lowres;
2717 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2718 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2721 // FIXME h261 lowres loop filter
2724 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2725 uint8_t *dest_cb, uint8_t *dest_cr,
2726 uint8_t **ref_picture,
2727 h264_chroma_mc_func * pix_op,
2730 const int lowres = s->avctx->lowres;
2731 const int op_index = FFMIN(lowres, 3);
2732 const int block_s = 8 >> lowres;
2733 const int s_mask = (2 << lowres) - 1;
2734 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2735 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2736 int emu = 0, src_x, src_y, sx, sy;
2740 if (s->quarter_sample) {
2745 /* In case of 8X8, we construct a single chroma motion vector
2746 with a special rounding */
2747 mx = ff_h263_round_chroma(mx);
2748 my = ff_h263_round_chroma(my);
2752 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2753 src_y = s->mb_y * block_s + (my >> lowres + 1);
2755 offset = src_y * s->uvlinesize + src_x;
2756 ptr = ref_picture[1] + offset;
2757 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2758 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2759 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2760 s->uvlinesize, s->uvlinesize,
2762 src_x, src_y, h_edge_pos, v_edge_pos);
2763 ptr = s->edge_emu_buffer;
2766 sx = (sx << 2) >> lowres;
2767 sy = (sy << 2) >> lowres;
2768 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2770 ptr = ref_picture[2] + offset;
2772 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2773 s->uvlinesize, s->uvlinesize,
2775 src_x, src_y, h_edge_pos, v_edge_pos);
2776 ptr = s->edge_emu_buffer;
2778 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2782 * motion compensation of a single macroblock
2784 * @param dest_y luma destination pointer
2785 * @param dest_cb chroma cb/u destination pointer
2786 * @param dest_cr chroma cr/v destination pointer
2787 * @param dir direction (0->forward, 1->backward)
2788 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2789 * @param pix_op halfpel motion compensation function (average or put normally)
2790 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2792 static inline void MPV_motion_lowres(MpegEncContext *s,
2793 uint8_t *dest_y, uint8_t *dest_cb,
2795 int dir, uint8_t **ref_picture,
2796 h264_chroma_mc_func *pix_op)
2800 const int lowres = s->avctx->lowres;
2801 const int block_s = 8 >>lowres;
2806 switch (s->mv_type) {
2808 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2810 ref_picture, pix_op,
2811 s->mv[dir][0][0], s->mv[dir][0][1],
2817 for (i = 0; i < 4; i++) {
2818 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2819 s->linesize) * block_s,
2820 ref_picture[0], 0, 0,
2821 (2 * mb_x + (i & 1)) * block_s,
2822 (2 * mb_y + (i >> 1)) * block_s,
2823 s->width, s->height, s->linesize,
2824 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2825 block_s, block_s, pix_op,
2826 s->mv[dir][i][0], s->mv[dir][i][1]);
2828 mx += s->mv[dir][i][0];
2829 my += s->mv[dir][i][1];
2832 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY))
2833 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2837 if (s->picture_structure == PICT_FRAME) {
2839 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2840 1, 0, s->field_select[dir][0],
2841 ref_picture, pix_op,
2842 s->mv[dir][0][0], s->mv[dir][0][1],
2845 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2846 1, 1, s->field_select[dir][1],
2847 ref_picture, pix_op,
2848 s->mv[dir][1][0], s->mv[dir][1][1],
2851 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2852 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2853 ref_picture = s->current_picture_ptr->f->data;
2856 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2857 0, 0, s->field_select[dir][0],
2858 ref_picture, pix_op,
2860 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2864 for (i = 0; i < 2; i++) {
2865 uint8_t **ref2picture;
2867 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2868 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2869 ref2picture = ref_picture;
2871 ref2picture = s->current_picture_ptr->f->data;
2874 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2875 0, 0, s->field_select[dir][i],
2876 ref2picture, pix_op,
2877 s->mv[dir][i][0], s->mv[dir][i][1] +
2878 2 * block_s * i, block_s, mb_y >> 1);
2880 dest_y += 2 * block_s * s->linesize;
2881 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2882 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2886 if (s->picture_structure == PICT_FRAME) {
2887 for (i = 0; i < 2; i++) {
2889 for (j = 0; j < 2; j++) {
2890 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2892 ref_picture, pix_op,
2893 s->mv[dir][2 * i + j][0],
2894 s->mv[dir][2 * i + j][1],
2897 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2900 for (i = 0; i < 2; i++) {
2901 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2902 0, 0, s->picture_structure != i + 1,
2903 ref_picture, pix_op,
2904 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2905 2 * block_s, mb_y >> 1);
2907 // after put we make avg of the same block
2908 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2910 // opposite parity is always in the same
2911 // frame if this is second field
2912 if (!s->first_field) {
2913 ref_picture = s->current_picture_ptr->f->data;
2924 * find the lowest MB row referenced in the MVs
2926 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2928 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2929 int my, off, i, mvs;
2931 if (s->picture_structure != PICT_FRAME || s->mcsel)
2934 switch (s->mv_type) {
2948 for (i = 0; i < mvs; i++) {
2949 my = s->mv[dir][i][1];
2950 my_max = FFMAX(my_max, my);
2951 my_min = FFMIN(my_min, my);
2954 off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
2956 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2958 return s->mb_height-1;
2961 /* put block[] to dest[] */
2962 static inline void put_dct(MpegEncContext *s,
2963 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2965 s->dct_unquantize_intra(s, block, i, qscale);
2966 s->idsp.idct_put(dest, line_size, block);
2969 /* add block[] to dest[] */
2970 static inline void add_dct(MpegEncContext *s,
2971 int16_t *block, int i, uint8_t *dest, int line_size)
2973 if (s->block_last_index[i] >= 0) {
2974 s->idsp.idct_add(dest, line_size, block);
2978 static inline void add_dequant_dct(MpegEncContext *s,
2979 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2981 if (s->block_last_index[i] >= 0) {
2982 s->dct_unquantize_inter(s, block, i, qscale);
2984 s->idsp.idct_add(dest, line_size, block);
2989 * Clean dc, ac, coded_block for the current non-intra MB.
2991 void ff_clean_intra_table_entries(MpegEncContext *s)
2993 int wrap = s->b8_stride;
2994 int xy = s->block_index[0];
2997 s->dc_val[0][xy + 1 ] =
2998 s->dc_val[0][xy + wrap] =
2999 s->dc_val[0][xy + 1 + wrap] = 1024;
3001 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
3002 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
3003 if (s->msmpeg4_version>=3) {
3004 s->coded_block[xy ] =
3005 s->coded_block[xy + 1 ] =
3006 s->coded_block[xy + wrap] =
3007 s->coded_block[xy + 1 + wrap] = 0;
3010 wrap = s->mb_stride;
3011 xy = s->mb_x + s->mb_y * wrap;
3013 s->dc_val[2][xy] = 1024;
3015 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
3016 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
3018 s->mbintra_table[xy]= 0;
3021 /* generic function called after a macroblock has been parsed by the
3022 decoder or after it has been encoded by the encoder.
3024 Important variables used:
3025 s->mb_intra : true if intra macroblock
3026 s->mv_dir : motion vector direction
3027 s->mv_type : motion vector type
3028 s->mv : motion vector
3029 s->interlaced_dct : true if interlaced dct used (mpeg2)
3031 static av_always_inline
3032 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
3033 int lowres_flag, int is_mpeg12)
3035 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
3038 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
3039 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
3043 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
3044 /* print DCT coefficients */
3046 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
3048 for(j=0; j<64; j++){
3049 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
3050 block[i][s->idsp.idct_permutation[j]]);
3052 av_log(s->avctx, AV_LOG_DEBUG, "\n");
3056 s->current_picture.qscale_table[mb_xy] = s->qscale;
3058 /* update DC predictors for P macroblocks */
3060 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
3061 if(s->mbintra_table[mb_xy])
3062 ff_clean_intra_table_entries(s);
3066 s->last_dc[2] = 128 << s->intra_dc_precision;
3069 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
3070 s->mbintra_table[mb_xy]=1;
3072 if ((s->avctx->flags & CODEC_FLAG_PSNR) || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor ||
3073 !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
3074 s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
3075 uint8_t *dest_y, *dest_cb, *dest_cr;
3076 int dct_linesize, dct_offset;
3077 op_pixels_func (*op_pix)[4];
3078 qpel_mc_func (*op_qpix)[16];
3079 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3080 const int uvlinesize = s->current_picture.f->linesize[1];
3081 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
3082 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
3084 /* avoid copy if macroblock skipped in last frame too */
3085 /* skip only during decoding as we might trash the buffers during encoding a bit */
3087 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
3089 if (s->mb_skipped) {
3091 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
3093 } else if(!s->current_picture.reference) {
3096 *mbskip_ptr = 0; /* not skipped */
3100 dct_linesize = linesize << s->interlaced_dct;
3101 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
3105 dest_cb= s->dest[1];
3106 dest_cr= s->dest[2];
3108 dest_y = s->b_scratchpad;
3109 dest_cb= s->b_scratchpad+16*linesize;
3110 dest_cr= s->b_scratchpad+32*linesize;
3114 /* motion handling */
3115 /* decoding or more than one mb_type (MC was already done otherwise) */
3118 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
3119 if (s->mv_dir & MV_DIR_FORWARD) {
3120 ff_thread_await_progress(&s->last_picture_ptr->tf,
3121 ff_mpv_lowest_referenced_row(s, 0),
3124 if (s->mv_dir & MV_DIR_BACKWARD) {
3125 ff_thread_await_progress(&s->next_picture_ptr->tf,
3126 ff_mpv_lowest_referenced_row(s, 1),
3132 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
3134 if (s->mv_dir & MV_DIR_FORWARD) {
3135 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3136 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
3138 if (s->mv_dir & MV_DIR_BACKWARD) {
3139 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3142 op_qpix = s->me.qpel_put;
3143 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3144 op_pix = s->hdsp.put_pixels_tab;
3146 op_pix = s->hdsp.put_no_rnd_pixels_tab;
3148 if (s->mv_dir & MV_DIR_FORWARD) {
3149 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3150 op_pix = s->hdsp.avg_pixels_tab;
3151 op_qpix= s->me.qpel_avg;
3153 if (s->mv_dir & MV_DIR_BACKWARD) {
3154 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3159 /* skip dequant / idct if we are really late ;) */
3160 if(s->avctx->skip_idct){
3161 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
3162 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
3163 || s->avctx->skip_idct >= AVDISCARD_ALL)
3167 /* add dct residue */
3168 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3169 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3170 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3171 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3172 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3173 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3175 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
3176 if (s->chroma_y_shift){
3177 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3178 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3182 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3183 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3184 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3185 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3188 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3189 add_dct(s, block[0], 0, dest_y , dct_linesize);
3190 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3191 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3192 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3194 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
3195 if(s->chroma_y_shift){//Chroma420
3196 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3197 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3200 dct_linesize = uvlinesize << s->interlaced_dct;
3201 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3203 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3204 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3205 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3206 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3207 if(!s->chroma_x_shift){//Chroma444
3208 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3209 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3210 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3211 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3216 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3217 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3220 /* dct only in intra block */
3221 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3222 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3223 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3224 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3225 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3227 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
3228 if(s->chroma_y_shift){
3229 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3230 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3234 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3235 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3236 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3237 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3241 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3242 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3243 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3244 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3246 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
3247 if(s->chroma_y_shift){
3248 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3249 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3252 dct_linesize = uvlinesize << s->interlaced_dct;
3253 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3255 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3256 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3257 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3258 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3259 if(!s->chroma_x_shift){//Chroma444
3260 s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3261 s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3262 s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3263 s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3271 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3272 if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
3273 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3274 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3280 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
3283 if(s->out_format == FMT_MPEG1) {
3284 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
3285 else mpv_decode_mb_internal(s, block, 0, 1);
3288 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
3289 else mpv_decode_mb_internal(s, block, 0, 0);
3292 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3294 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3295 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3296 s->first_field, s->low_delay);
3299 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3300 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3301 const int uvlinesize = s->current_picture.f->linesize[1];
3302 const int mb_size= 4 - s->avctx->lowres;
3304 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3305 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3306 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3307 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3308 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3309 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3310 //block_index is not used by mpeg2, so it is not affected by chroma_format
3312 s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << mb_size);
3313 s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (mb_size - s->chroma_x_shift));
3314 s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (mb_size - s->chroma_x_shift));
3316 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3318 if(s->picture_structure==PICT_FRAME){
3319 s->dest[0] += s->mb_y * linesize << mb_size;
3320 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3321 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3323 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3324 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3325 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3326 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3332 * Permute an 8x8 block.
3333 * @param block the block which will be permuted according to the given permutation vector
3334 * @param permutation the permutation vector
3335 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3336 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3337 * (inverse) permutated to scantable order!
3339 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3345 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3347 for(i=0; i<=last; i++){
3348 const int j= scantable[i];
3353 for(i=0; i<=last; i++){
3354 const int j= scantable[i];
3355 const int perm_j= permutation[j];
3356 block[perm_j]= temp[j];
3360 void ff_mpeg_flush(AVCodecContext *avctx){
3362 MpegEncContext *s = avctx->priv_data;
3364 if (!s || !s->picture)
3367 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3368 ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
3369 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3371 ff_mpeg_unref_picture(s->avctx, &s->current_picture);
3372 ff_mpeg_unref_picture(s->avctx, &s->last_picture);
3373 ff_mpeg_unref_picture(s->avctx, &s->next_picture);
3375 s->mb_x= s->mb_y= 0;
3378 s->parse_context.state= -1;
3379 s->parse_context.frame_start_found= 0;
3380 s->parse_context.overread= 0;
3381 s->parse_context.overread_index= 0;
3382 s->parse_context.index= 0;
3383 s->parse_context.last_index= 0;
3384 s->bitstream_buffer_size=0;
3389 * set qscale and update qscale dependent variables.
3391 void ff_set_qscale(MpegEncContext * s, int qscale)
3395 else if (qscale > 31)
3399 s->chroma_qscale= s->chroma_qscale_table[qscale];
3401 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3402 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3405 void ff_mpv_report_decode_progress(MpegEncContext *s)
3407 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3408 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);