2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/motion_vector.h"
35 #include "libavutil/timer.h"
38 #include "h264chroma.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
50 static const uint8_t ff_default_chroma_qscale_table[32] = {
51 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
53 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
56 const uint8_t ff_mpeg1_dc_scale_table[128] = {
57 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
68 static const uint8_t mpeg2_dc_scale_table1[128] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
80 static const uint8_t mpeg2_dc_scale_table2[128] = {
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 static const uint8_t mpeg2_dc_scale_table3[128] = {
93 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
105 ff_mpeg1_dc_scale_table,
106 mpeg2_dc_scale_table1,
107 mpeg2_dc_scale_table2,
108 mpeg2_dc_scale_table3,
111 const uint8_t ff_alternate_horizontal_scan[64] = {
112 0, 1, 2, 3, 8, 9, 16, 17,
113 10, 11, 4, 5, 6, 7, 15, 14,
114 13, 12, 19, 18, 24, 25, 32, 33,
115 26, 27, 20, 21, 22, 23, 28, 29,
116 30, 31, 34, 35, 40, 41, 48, 49,
117 42, 43, 36, 37, 38, 39, 44, 45,
118 46, 47, 50, 51, 56, 57, 58, 59,
119 52, 53, 54, 55, 60, 61, 62, 63,
122 const uint8_t ff_alternate_vertical_scan[64] = {
123 0, 8, 16, 24, 1, 9, 2, 10,
124 17, 25, 32, 40, 48, 56, 57, 49,
125 41, 33, 26, 18, 3, 11, 4, 12,
126 19, 27, 34, 42, 50, 58, 35, 43,
127 51, 59, 20, 28, 5, 13, 6, 14,
128 21, 29, 36, 44, 52, 60, 37, 45,
129 53, 61, 22, 30, 7, 15, 23, 31,
130 38, 46, 54, 62, 39, 47, 55, 63,
133 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
134 int16_t *block, int n, int qscale)
136 int i, level, nCoeffs;
137 const uint16_t *quant_matrix;
139 nCoeffs= s->block_last_index[n];
141 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
142 /* XXX: only mpeg1 */
143 quant_matrix = s->intra_matrix;
144 for(i=1;i<=nCoeffs;i++) {
145 int j= s->intra_scantable.permutated[i];
150 level = (int)(level * qscale * quant_matrix[j]) >> 3;
151 level = (level - 1) | 1;
154 level = (int)(level * qscale * quant_matrix[j]) >> 3;
155 level = (level - 1) | 1;
162 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
163 int16_t *block, int n, int qscale)
165 int i, level, nCoeffs;
166 const uint16_t *quant_matrix;
168 nCoeffs= s->block_last_index[n];
170 quant_matrix = s->inter_matrix;
171 for(i=0; i<=nCoeffs; i++) {
172 int j= s->intra_scantable.permutated[i];
177 level = (((level << 1) + 1) * qscale *
178 ((int) (quant_matrix[j]))) >> 4;
179 level = (level - 1) | 1;
182 level = (((level << 1) + 1) * qscale *
183 ((int) (quant_matrix[j]))) >> 4;
184 level = (level - 1) | 1;
191 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
192 int16_t *block, int n, int qscale)
194 int i, level, nCoeffs;
195 const uint16_t *quant_matrix;
197 if(s->alternate_scan) nCoeffs= 63;
198 else nCoeffs= s->block_last_index[n];
200 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
201 quant_matrix = s->intra_matrix;
202 for(i=1;i<=nCoeffs;i++) {
203 int j= s->intra_scantable.permutated[i];
208 level = (int)(level * qscale * quant_matrix[j]) >> 3;
211 level = (int)(level * qscale * quant_matrix[j]) >> 3;
218 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
219 int16_t *block, int n, int qscale)
221 int i, level, nCoeffs;
222 const uint16_t *quant_matrix;
225 if(s->alternate_scan) nCoeffs= 63;
226 else nCoeffs= s->block_last_index[n];
228 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
230 quant_matrix = s->intra_matrix;
231 for(i=1;i<=nCoeffs;i++) {
232 int j= s->intra_scantable.permutated[i];
237 level = (int)(level * qscale * quant_matrix[j]) >> 3;
240 level = (int)(level * qscale * quant_matrix[j]) >> 3;
249 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
250 int16_t *block, int n, int qscale)
252 int i, level, nCoeffs;
253 const uint16_t *quant_matrix;
256 if(s->alternate_scan) nCoeffs= 63;
257 else nCoeffs= s->block_last_index[n];
259 quant_matrix = s->inter_matrix;
260 for(i=0; i<=nCoeffs; i++) {
261 int j= s->intra_scantable.permutated[i];
266 level = (((level << 1) + 1) * qscale *
267 ((int) (quant_matrix[j]))) >> 4;
270 level = (((level << 1) + 1) * qscale *
271 ((int) (quant_matrix[j]))) >> 4;
280 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
281 int16_t *block, int n, int qscale)
283 int i, level, qmul, qadd;
286 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
291 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
292 qadd = (qscale - 1) | 1;
299 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
301 for(i=1; i<=nCoeffs; i++) {
305 level = level * qmul - qadd;
307 level = level * qmul + qadd;
314 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
315 int16_t *block, int n, int qscale)
317 int i, level, qmul, qadd;
320 av_assert2(s->block_last_index[n]>=0);
322 qadd = (qscale - 1) | 1;
325 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
327 for(i=0; i<=nCoeffs; i++) {
331 level = level * qmul - qadd;
333 level = level * qmul + qadd;
340 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
342 int mb_x, int mb_y, int mb_intra, int mb_skipped)
344 MpegEncContext *s = opaque;
347 s->mv_type = mv_type;
348 s->mb_intra = mb_intra;
349 s->mb_skipped = mb_skipped;
352 memcpy(s->mv, mv, sizeof(*mv));
354 ff_init_block_index(s);
355 ff_update_block_index(s);
357 s->bdsp.clear_blocks(s->block[0]);
359 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
360 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
364 av_log(s->avctx, AV_LOG_DEBUG,
365 "Interlaced error concealment is not fully implemented\n");
366 ff_mpv_decode_mb(s, s->block);
369 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
372 memset(dst + h*linesize, 128, 16);
375 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
378 memset(dst + h*linesize, 128, 8);
381 /* init common dct for both encoder and decoder */
382 static av_cold int dct_init(MpegEncContext *s)
384 ff_blockdsp_init(&s->bdsp, s->avctx);
385 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
386 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
387 ff_me_cmp_init(&s->mecc, s->avctx);
388 ff_mpegvideodsp_init(&s->mdsp);
389 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
391 if (s->avctx->debug & FF_DEBUG_NOMC) {
393 for (i=0; i<4; i++) {
394 s->hdsp.avg_pixels_tab[0][i] = gray16;
395 s->hdsp.put_pixels_tab[0][i] = gray16;
396 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
398 s->hdsp.avg_pixels_tab[1][i] = gray8;
399 s->hdsp.put_pixels_tab[1][i] = gray8;
400 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
404 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
405 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
406 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
407 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
408 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
409 if (s->flags & CODEC_FLAG_BITEXACT)
410 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
411 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
413 if (HAVE_INTRINSICS_NEON)
414 ff_mpv_common_init_neon(s);
417 ff_mpv_common_init_axp(s);
419 ff_mpv_common_init_arm(s);
421 ff_mpv_common_init_ppc(s);
423 ff_mpv_common_init_x86(s);
428 av_cold void ff_mpv_idct_init(MpegEncContext *s)
430 ff_idctdsp_init(&s->idsp, s->avctx);
432 /* load & permutate scantables
433 * note: only wmv uses different ones
435 if (s->alternate_scan) {
436 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
437 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
439 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
440 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
442 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
443 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
446 static int frame_size_alloc(MpegEncContext *s, int linesize)
448 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
450 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
454 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
455 return AVERROR_PATCHWELCOME;
458 // edge emu needs blocksize + filter length - 1
459 // (= 17x17 for halfpel / 21x21 for h264)
460 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
461 // at uvlinesize. It supports only YUV420 so 24x24 is enough
462 // linesize * interlaced * MBsize
463 // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
464 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size, 4 * 68,
467 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size, 4 * 16 * 2,
469 s->me.temp = s->me.scratchpad;
470 s->rd_scratchpad = s->me.scratchpad;
471 s->b_scratchpad = s->me.scratchpad;
472 s->obmc_scratchpad = s->me.scratchpad + 16;
476 av_freep(&s->edge_emu_buffer);
477 return AVERROR(ENOMEM);
481 * Allocate a frame buffer
483 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
485 int edges_needed = av_codec_is_encoder(s->avctx->codec);
489 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
490 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
491 s->codec_id != AV_CODEC_ID_MSS2) {
493 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
494 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
497 r = ff_thread_get_buffer(s->avctx, &pic->tf,
498 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
500 pic->f->width = s->avctx->width;
501 pic->f->height = s->avctx->height;
502 pic->f->format = s->avctx->pix_fmt;
503 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
506 if (r < 0 || !pic->f->buf[0]) {
507 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
514 for (i = 0; pic->f->data[i]; i++) {
515 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
516 pic->f->linesize[i] +
517 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
518 pic->f->data[i] += offset;
520 pic->f->width = s->avctx->width;
521 pic->f->height = s->avctx->height;
524 if (s->avctx->hwaccel) {
525 assert(!pic->hwaccel_picture_private);
526 if (s->avctx->hwaccel->frame_priv_data_size) {
527 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
528 if (!pic->hwaccel_priv_buf) {
529 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
532 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
536 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
537 s->uvlinesize != pic->f->linesize[1])) {
538 av_log(s->avctx, AV_LOG_ERROR,
539 "get_buffer() failed (stride changed)\n");
540 ff_mpeg_unref_picture(s, pic);
544 if (pic->f->linesize[1] != pic->f->linesize[2]) {
545 av_log(s->avctx, AV_LOG_ERROR,
546 "get_buffer() failed (uv stride mismatch)\n");
547 ff_mpeg_unref_picture(s, pic);
551 if (!s->edge_emu_buffer &&
552 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
553 av_log(s->avctx, AV_LOG_ERROR,
554 "get_buffer() failed to allocate context scratch buffers.\n");
555 ff_mpeg_unref_picture(s, pic);
562 void ff_free_picture_tables(Picture *pic)
566 pic->alloc_mb_width =
567 pic->alloc_mb_height = 0;
569 av_buffer_unref(&pic->mb_var_buf);
570 av_buffer_unref(&pic->mc_mb_var_buf);
571 av_buffer_unref(&pic->mb_mean_buf);
572 av_buffer_unref(&pic->mbskip_table_buf);
573 av_buffer_unref(&pic->qscale_table_buf);
574 av_buffer_unref(&pic->mb_type_buf);
576 for (i = 0; i < 2; i++) {
577 av_buffer_unref(&pic->motion_val_buf[i]);
578 av_buffer_unref(&pic->ref_index_buf[i]);
582 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
584 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
585 const int mb_array_size = s->mb_stride * s->mb_height;
586 const int b8_array_size = s->b8_stride * s->mb_height * 2;
590 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
591 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
592 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
594 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
595 return AVERROR(ENOMEM);
598 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
599 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
600 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
601 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
602 return AVERROR(ENOMEM);
605 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv ||
606 (s->avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) {
607 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
608 int ref_index_size = 4 * mb_array_size;
610 for (i = 0; mv_size && i < 2; i++) {
611 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
612 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
613 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
614 return AVERROR(ENOMEM);
618 pic->alloc_mb_width = s->mb_width;
619 pic->alloc_mb_height = s->mb_height;
624 static int make_tables_writable(Picture *pic)
627 #define MAKE_WRITABLE(table) \
630 (ret = av_buffer_make_writable(&pic->table)) < 0)\
634 MAKE_WRITABLE(mb_var_buf);
635 MAKE_WRITABLE(mc_mb_var_buf);
636 MAKE_WRITABLE(mb_mean_buf);
637 MAKE_WRITABLE(mbskip_table_buf);
638 MAKE_WRITABLE(qscale_table_buf);
639 MAKE_WRITABLE(mb_type_buf);
641 for (i = 0; i < 2; i++) {
642 MAKE_WRITABLE(motion_val_buf[i]);
643 MAKE_WRITABLE(ref_index_buf[i]);
650 * Allocate a Picture.
651 * The pixels are allocated/set by calling get_buffer() if shared = 0
653 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
657 if (pic->qscale_table_buf)
658 if ( pic->alloc_mb_width != s->mb_width
659 || pic->alloc_mb_height != s->mb_height)
660 ff_free_picture_tables(pic);
663 av_assert0(pic->f->data[0]);
666 av_assert0(!pic->f->buf[0]);
668 if (alloc_frame_buffer(s, pic) < 0)
671 s->linesize = pic->f->linesize[0];
672 s->uvlinesize = pic->f->linesize[1];
675 if (!pic->qscale_table_buf)
676 ret = alloc_picture_tables(s, pic);
678 ret = make_tables_writable(pic);
683 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
684 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
685 pic->mb_mean = pic->mb_mean_buf->data;
688 pic->mbskip_table = pic->mbskip_table_buf->data;
689 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
690 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
692 if (pic->motion_val_buf[0]) {
693 for (i = 0; i < 2; i++) {
694 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
695 pic->ref_index[i] = pic->ref_index_buf[i]->data;
701 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
702 ff_mpeg_unref_picture(s, pic);
703 ff_free_picture_tables(pic);
704 return AVERROR(ENOMEM);
708 * Deallocate a picture.
710 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
712 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
715 /* WM Image / Screen codecs allocate internal buffers with different
716 * dimensions / colorspaces; ignore user-defined callbacks for these. */
717 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
718 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
719 s->codec_id != AV_CODEC_ID_MSS2)
720 ff_thread_release_buffer(s->avctx, &pic->tf);
722 av_frame_unref(pic->f);
724 av_buffer_unref(&pic->hwaccel_priv_buf);
726 if (pic->needs_realloc)
727 ff_free_picture_tables(pic);
729 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
732 static int update_picture_tables(Picture *dst, Picture *src)
736 #define UPDATE_TABLE(table)\
739 (!dst->table || dst->table->buffer != src->table->buffer)) {\
740 av_buffer_unref(&dst->table);\
741 dst->table = av_buffer_ref(src->table);\
743 ff_free_picture_tables(dst);\
744 return AVERROR(ENOMEM);\
749 UPDATE_TABLE(mb_var_buf);
750 UPDATE_TABLE(mc_mb_var_buf);
751 UPDATE_TABLE(mb_mean_buf);
752 UPDATE_TABLE(mbskip_table_buf);
753 UPDATE_TABLE(qscale_table_buf);
754 UPDATE_TABLE(mb_type_buf);
755 for (i = 0; i < 2; i++) {
756 UPDATE_TABLE(motion_val_buf[i]);
757 UPDATE_TABLE(ref_index_buf[i]);
760 dst->mb_var = src->mb_var;
761 dst->mc_mb_var = src->mc_mb_var;
762 dst->mb_mean = src->mb_mean;
763 dst->mbskip_table = src->mbskip_table;
764 dst->qscale_table = src->qscale_table;
765 dst->mb_type = src->mb_type;
766 for (i = 0; i < 2; i++) {
767 dst->motion_val[i] = src->motion_val[i];
768 dst->ref_index[i] = src->ref_index[i];
771 dst->alloc_mb_width = src->alloc_mb_width;
772 dst->alloc_mb_height = src->alloc_mb_height;
777 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
781 av_assert0(!dst->f->buf[0]);
782 av_assert0(src->f->buf[0]);
786 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
790 ret = update_picture_tables(dst, src);
794 if (src->hwaccel_picture_private) {
795 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
796 if (!dst->hwaccel_priv_buf)
798 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
801 dst->field_picture = src->field_picture;
802 dst->mb_var_sum = src->mb_var_sum;
803 dst->mc_mb_var_sum = src->mc_mb_var_sum;
804 dst->b_frame_score = src->b_frame_score;
805 dst->needs_realloc = src->needs_realloc;
806 dst->reference = src->reference;
807 dst->shared = src->shared;
811 ff_mpeg_unref_picture(s, dst);
815 static void exchange_uv(MpegEncContext *s)
820 s->pblocks[4] = s->pblocks[5];
824 static int init_duplicate_context(MpegEncContext *s)
826 int y_size = s->b8_stride * (2 * s->mb_height + 1);
827 int c_size = s->mb_stride * (s->mb_height + 1);
828 int yc_size = y_size + 2 * c_size;
831 if (s->mb_height & 1)
832 yc_size += 2*s->b8_stride + 2*s->mb_stride;
839 s->obmc_scratchpad = NULL;
842 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
843 ME_MAP_SIZE * sizeof(uint32_t), fail)
844 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
845 ME_MAP_SIZE * sizeof(uint32_t), fail)
846 if (s->avctx->noise_reduction) {
847 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
848 2 * 64 * sizeof(int), fail)
851 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
852 s->block = s->blocks[0];
854 for (i = 0; i < 12; i++) {
855 s->pblocks[i] = &s->block[i];
857 if (s->avctx->codec_tag == AV_RL32("VCR2"))
860 if (s->out_format == FMT_H263) {
862 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
863 yc_size * sizeof(int16_t) * 16, fail);
864 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
865 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
866 s->ac_val[2] = s->ac_val[1] + c_size;
871 return -1; // free() through ff_mpv_common_end()
874 static void free_duplicate_context(MpegEncContext *s)
879 av_freep(&s->edge_emu_buffer);
880 av_freep(&s->me.scratchpad);
884 s->obmc_scratchpad = NULL;
886 av_freep(&s->dct_error_sum);
887 av_freep(&s->me.map);
888 av_freep(&s->me.score_map);
889 av_freep(&s->blocks);
890 av_freep(&s->ac_val_base);
894 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
896 #define COPY(a) bak->a = src->a
897 COPY(edge_emu_buffer);
902 COPY(obmc_scratchpad);
909 COPY(me.map_generation);
921 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
925 // FIXME copy only needed parts
927 backup_duplicate_context(&bak, dst);
928 memcpy(dst, src, sizeof(MpegEncContext));
929 backup_duplicate_context(dst, &bak);
930 for (i = 0; i < 12; i++) {
931 dst->pblocks[i] = &dst->block[i];
933 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
935 if (!dst->edge_emu_buffer &&
936 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
937 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
938 "scratch buffers.\n");
941 // STOP_TIMER("update_duplicate_context")
942 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
946 int ff_mpeg_update_thread_context(AVCodecContext *dst,
947 const AVCodecContext *src)
950 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
957 // FIXME can parameters change on I-frames?
958 // in that case dst may need a reinit
959 if (!s->context_initialized) {
961 memcpy(s, s1, sizeof(MpegEncContext));
964 s->bitstream_buffer = NULL;
965 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
967 if (s1->context_initialized){
968 // s->picture_range_start += MAX_PICTURE_COUNT;
969 // s->picture_range_end += MAX_PICTURE_COUNT;
971 if((err = ff_mpv_common_init(s)) < 0){
972 memset(s, 0, sizeof(MpegEncContext));
979 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
980 s->context_reinit = 0;
981 s->height = s1->height;
982 s->width = s1->width;
983 if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
987 s->avctx->coded_height = s1->avctx->coded_height;
988 s->avctx->coded_width = s1->avctx->coded_width;
989 s->avctx->width = s1->avctx->width;
990 s->avctx->height = s1->avctx->height;
992 s->coded_picture_number = s1->coded_picture_number;
993 s->picture_number = s1->picture_number;
995 av_assert0(!s->picture || s->picture != s1->picture);
997 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
998 ff_mpeg_unref_picture(s, &s->picture[i]);
999 if (s1->picture[i].f->buf[0] &&
1000 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
1004 #define UPDATE_PICTURE(pic)\
1006 ff_mpeg_unref_picture(s, &s->pic);\
1007 if (s1->pic.f && s1->pic.f->buf[0])\
1008 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
1010 ret = update_picture_tables(&s->pic, &s1->pic);\
1015 UPDATE_PICTURE(current_picture);
1016 UPDATE_PICTURE(last_picture);
1017 UPDATE_PICTURE(next_picture);
1019 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
1020 ((pic && pic >= old_ctx->picture && \
1021 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
1022 &new_ctx->picture[pic - old_ctx->picture] : NULL)
1024 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1025 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1026 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1028 // Error/bug resilience
1029 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1030 s->workaround_bugs = s1->workaround_bugs;
1031 s->padding_bug_score = s1->padding_bug_score;
1033 // MPEG4 timing info
1034 memcpy(&s->last_time_base, &s1->last_time_base,
1035 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1036 (char *) &s1->last_time_base);
1039 s->max_b_frames = s1->max_b_frames;
1040 s->low_delay = s1->low_delay;
1041 s->droppable = s1->droppable;
1043 // DivX handling (doesn't work)
1044 s->divx_packed = s1->divx_packed;
1046 if (s1->bitstream_buffer) {
1047 if (s1->bitstream_buffer_size +
1048 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1049 av_fast_malloc(&s->bitstream_buffer,
1050 &s->allocated_bitstream_buffer_size,
1051 s1->allocated_bitstream_buffer_size);
1052 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1053 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1054 s1->bitstream_buffer_size);
1055 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1056 FF_INPUT_BUFFER_PADDING_SIZE);
1059 // linesize dependend scratch buffer allocation
1060 if (!s->edge_emu_buffer)
1062 if (frame_size_alloc(s, s1->linesize) < 0) {
1063 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1064 "scratch buffers.\n");
1065 return AVERROR(ENOMEM);
1068 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1069 "be allocated due to unknown size.\n");
1072 // MPEG2/interlacing info
1073 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1074 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1076 if (!s1->first_field) {
1077 s->last_pict_type = s1->pict_type;
1078 if (s1->current_picture_ptr)
1079 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1086 * Set the given MpegEncContext to common defaults
1087 * (same for encoding and decoding).
1088 * The changed fields will not depend upon the
1089 * prior state of the MpegEncContext.
1091 void ff_mpv_common_defaults(MpegEncContext *s)
1093 s->y_dc_scale_table =
1094 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1095 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1096 s->progressive_frame = 1;
1097 s->progressive_sequence = 1;
1098 s->picture_structure = PICT_FRAME;
1100 s->coded_picture_number = 0;
1101 s->picture_number = 0;
1106 s->slice_context_count = 1;
1110 * Set the given MpegEncContext to defaults for decoding.
1111 * the changed fields will not depend upon
1112 * the prior state of the MpegEncContext.
1114 void ff_mpv_decode_defaults(MpegEncContext *s)
1116 ff_mpv_common_defaults(s);
1119 void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
1122 s->width = avctx->coded_width;
1123 s->height = avctx->coded_height;
1124 s->codec_id = avctx->codec->id;
1125 s->workaround_bugs = avctx->workaround_bugs;
1126 s->flags = avctx->flags;
1127 s->flags2 = avctx->flags2;
1129 /* convert fourcc to upper case */
1130 s->codec_tag = avpriv_toupper4(avctx->codec_tag);
1132 s->stream_codec_tag = avpriv_toupper4(avctx->stream_codec_tag);
1135 static int init_er(MpegEncContext *s)
1137 ERContext *er = &s->er;
1138 int mb_array_size = s->mb_height * s->mb_stride;
1141 er->avctx = s->avctx;
1142 er->mecc = &s->mecc;
1144 er->mb_index2xy = s->mb_index2xy;
1145 er->mb_num = s->mb_num;
1146 er->mb_width = s->mb_width;
1147 er->mb_height = s->mb_height;
1148 er->mb_stride = s->mb_stride;
1149 er->b8_stride = s->b8_stride;
1151 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1152 er->error_status_table = av_mallocz(mb_array_size);
1153 if (!er->er_temp_buffer || !er->error_status_table)
1156 er->mbskip_table = s->mbskip_table;
1157 er->mbintra_table = s->mbintra_table;
1159 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1160 er->dc_val[i] = s->dc_val[i];
1162 er->decode_mb = mpeg_er_decode_mb;
1167 av_freep(&er->er_temp_buffer);
1168 av_freep(&er->error_status_table);
1169 return AVERROR(ENOMEM);
1173 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1175 static int init_context_frame(MpegEncContext *s)
1177 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1179 s->mb_width = (s->width + 15) / 16;
1180 s->mb_stride = s->mb_width + 1;
1181 s->b8_stride = s->mb_width * 2 + 1;
1182 mb_array_size = s->mb_height * s->mb_stride;
1183 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1185 /* set default edge pos, will be overridden
1186 * in decode_header if needed */
1187 s->h_edge_pos = s->mb_width * 16;
1188 s->v_edge_pos = s->mb_height * 16;
1190 s->mb_num = s->mb_width * s->mb_height;
1195 s->block_wrap[3] = s->b8_stride;
1197 s->block_wrap[5] = s->mb_stride;
1199 y_size = s->b8_stride * (2 * s->mb_height + 1);
1200 c_size = s->mb_stride * (s->mb_height + 1);
1201 yc_size = y_size + 2 * c_size;
1203 if (s->mb_height & 1)
1204 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1206 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1207 for (y = 0; y < s->mb_height; y++)
1208 for (x = 0; x < s->mb_width; x++)
1209 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1211 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1214 /* Allocate MV tables */
1215 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1216 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1217 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1218 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1219 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1220 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1221 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1222 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1223 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1224 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1225 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1226 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1228 /* Allocate MB type table */
1229 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1231 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1233 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1234 mb_array_size * sizeof(float), fail);
1235 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1236 mb_array_size * sizeof(float), fail);
1240 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1241 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1242 /* interlaced direct mode decoding tables */
1243 for (i = 0; i < 2; i++) {
1245 for (j = 0; j < 2; j++) {
1246 for (k = 0; k < 2; k++) {
1247 FF_ALLOCZ_OR_GOTO(s->avctx,
1248 s->b_field_mv_table_base[i][j][k],
1249 mv_table_size * 2 * sizeof(int16_t),
1251 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1254 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1255 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1256 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1258 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1261 if (s->out_format == FMT_H263) {
1263 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1264 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1266 /* cbp, ac_pred, pred_dir */
1267 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1268 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1271 if (s->h263_pred || s->h263_plus || !s->encoding) {
1273 // MN: we need these for error resilience of intra-frames
1274 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1275 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1276 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1277 s->dc_val[2] = s->dc_val[1] + c_size;
1278 for (i = 0; i < yc_size; i++)
1279 s->dc_val_base[i] = 1024;
1282 /* which mb is a intra block */
1283 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1284 memset(s->mbintra_table, 1, mb_array_size);
1286 /* init macroblock skip table */
1287 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1288 // Note the + 1 is for a quicker mpeg4 slice_end detection
1292 return AVERROR(ENOMEM);
1296 * init common structure for both encoder and decoder.
1297 * this assumes that some variables like width/height are already set
1299 av_cold int ff_mpv_common_init(MpegEncContext *s)
1302 int nb_slices = (HAVE_THREADS &&
1303 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1304 s->avctx->thread_count : 1;
1306 if (s->encoding && s->avctx->slices)
1307 nb_slices = s->avctx->slices;
1309 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1310 s->mb_height = (s->height + 31) / 32 * 2;
1312 s->mb_height = (s->height + 15) / 16;
1314 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1315 av_log(s->avctx, AV_LOG_ERROR,
1316 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1320 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1323 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1325 max_slices = MAX_THREADS;
1326 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1327 " reducing to %d\n", nb_slices, max_slices);
1328 nb_slices = max_slices;
1331 if ((s->width || s->height) &&
1332 av_image_check_size(s->width, s->height, 0, s->avctx))
1337 s->flags = s->avctx->flags;
1338 s->flags2 = s->avctx->flags2;
1340 /* set chroma shifts */
1341 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1343 &s->chroma_y_shift);
1346 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1347 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1348 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1349 s->picture[i].f = av_frame_alloc();
1350 if (!s->picture[i].f)
1353 memset(&s->next_picture, 0, sizeof(s->next_picture));
1354 memset(&s->last_picture, 0, sizeof(s->last_picture));
1355 memset(&s->current_picture, 0, sizeof(s->current_picture));
1356 memset(&s->new_picture, 0, sizeof(s->new_picture));
1357 s->next_picture.f = av_frame_alloc();
1358 if (!s->next_picture.f)
1360 s->last_picture.f = av_frame_alloc();
1361 if (!s->last_picture.f)
1363 s->current_picture.f = av_frame_alloc();
1364 if (!s->current_picture.f)
1366 s->new_picture.f = av_frame_alloc();
1367 if (!s->new_picture.f)
1370 if (init_context_frame(s))
1373 s->parse_context.state = -1;
1375 s->context_initialized = 1;
1376 s->thread_context[0] = s;
1378 // if (s->width && s->height) {
1379 if (nb_slices > 1) {
1380 for (i = 1; i < nb_slices; i++) {
1381 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1382 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1385 for (i = 0; i < nb_slices; i++) {
1386 if (init_duplicate_context(s->thread_context[i]) < 0)
1388 s->thread_context[i]->start_mb_y =
1389 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1390 s->thread_context[i]->end_mb_y =
1391 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1394 if (init_duplicate_context(s) < 0)
1397 s->end_mb_y = s->mb_height;
1399 s->slice_context_count = nb_slices;
1404 ff_mpv_common_end(s);
1409 * Frees and resets MpegEncContext fields depending on the resolution.
1410 * Is used during resolution changes to avoid a full reinitialization of the
1413 static void free_context_frame(MpegEncContext *s)
1417 av_freep(&s->mb_type);
1418 av_freep(&s->p_mv_table_base);
1419 av_freep(&s->b_forw_mv_table_base);
1420 av_freep(&s->b_back_mv_table_base);
1421 av_freep(&s->b_bidir_forw_mv_table_base);
1422 av_freep(&s->b_bidir_back_mv_table_base);
1423 av_freep(&s->b_direct_mv_table_base);
1424 s->p_mv_table = NULL;
1425 s->b_forw_mv_table = NULL;
1426 s->b_back_mv_table = NULL;
1427 s->b_bidir_forw_mv_table = NULL;
1428 s->b_bidir_back_mv_table = NULL;
1429 s->b_direct_mv_table = NULL;
1430 for (i = 0; i < 2; i++) {
1431 for (j = 0; j < 2; j++) {
1432 for (k = 0; k < 2; k++) {
1433 av_freep(&s->b_field_mv_table_base[i][j][k]);
1434 s->b_field_mv_table[i][j][k] = NULL;
1436 av_freep(&s->b_field_select_table[i][j]);
1437 av_freep(&s->p_field_mv_table_base[i][j]);
1438 s->p_field_mv_table[i][j] = NULL;
1440 av_freep(&s->p_field_select_table[i]);
1443 av_freep(&s->dc_val_base);
1444 av_freep(&s->coded_block_base);
1445 av_freep(&s->mbintra_table);
1446 av_freep(&s->cbp_table);
1447 av_freep(&s->pred_dir_table);
1449 av_freep(&s->mbskip_table);
1451 av_freep(&s->er.error_status_table);
1452 av_freep(&s->er.er_temp_buffer);
1453 av_freep(&s->mb_index2xy);
1454 av_freep(&s->lambda_table);
1456 av_freep(&s->cplx_tab);
1457 av_freep(&s->bits_tab);
1459 s->linesize = s->uvlinesize = 0;
1462 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1466 if (!s->context_initialized)
1467 return AVERROR(EINVAL);
1469 if (s->slice_context_count > 1) {
1470 for (i = 0; i < s->slice_context_count; i++) {
1471 free_duplicate_context(s->thread_context[i]);
1473 for (i = 1; i < s->slice_context_count; i++) {
1474 av_freep(&s->thread_context[i]);
1477 free_duplicate_context(s);
1479 free_context_frame(s);
1482 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1483 s->picture[i].needs_realloc = 1;
1486 s->last_picture_ptr =
1487 s->next_picture_ptr =
1488 s->current_picture_ptr = NULL;
1491 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1492 s->mb_height = (s->height + 31) / 32 * 2;
1494 s->mb_height = (s->height + 15) / 16;
1496 if ((s->width || s->height) &&
1497 (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1500 if ((err = init_context_frame(s)))
1503 s->thread_context[0] = s;
1505 if (s->width && s->height) {
1506 int nb_slices = s->slice_context_count;
1507 if (nb_slices > 1) {
1508 for (i = 1; i < nb_slices; i++) {
1509 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1510 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1513 for (i = 0; i < nb_slices; i++) {
1514 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1516 s->thread_context[i]->start_mb_y =
1517 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1518 s->thread_context[i]->end_mb_y =
1519 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1522 err = init_duplicate_context(s);
1526 s->end_mb_y = s->mb_height;
1528 s->slice_context_count = nb_slices;
1533 ff_mpv_common_end(s);
1537 /* init common structure for both encoder and decoder */
1538 void ff_mpv_common_end(MpegEncContext *s)
1542 if (s->slice_context_count > 1) {
1543 for (i = 0; i < s->slice_context_count; i++) {
1544 free_duplicate_context(s->thread_context[i]);
1546 for (i = 1; i < s->slice_context_count; i++) {
1547 av_freep(&s->thread_context[i]);
1549 s->slice_context_count = 1;
1550 } else free_duplicate_context(s);
1552 av_freep(&s->parse_context.buffer);
1553 s->parse_context.buffer_size = 0;
1555 av_freep(&s->bitstream_buffer);
1556 s->allocated_bitstream_buffer_size = 0;
1559 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1560 ff_free_picture_tables(&s->picture[i]);
1561 ff_mpeg_unref_picture(s, &s->picture[i]);
1562 av_frame_free(&s->picture[i].f);
1565 av_freep(&s->picture);
1566 ff_free_picture_tables(&s->last_picture);
1567 ff_mpeg_unref_picture(s, &s->last_picture);
1568 av_frame_free(&s->last_picture.f);
1569 ff_free_picture_tables(&s->current_picture);
1570 ff_mpeg_unref_picture(s, &s->current_picture);
1571 av_frame_free(&s->current_picture.f);
1572 ff_free_picture_tables(&s->next_picture);
1573 ff_mpeg_unref_picture(s, &s->next_picture);
1574 av_frame_free(&s->next_picture.f);
1575 ff_free_picture_tables(&s->new_picture);
1576 ff_mpeg_unref_picture(s, &s->new_picture);
1577 av_frame_free(&s->new_picture.f);
1579 free_context_frame(s);
1581 s->context_initialized = 0;
1582 s->last_picture_ptr =
1583 s->next_picture_ptr =
1584 s->current_picture_ptr = NULL;
1585 s->linesize = s->uvlinesize = 0;
1588 av_cold void ff_init_rl(RLTable *rl,
1589 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1591 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1592 uint8_t index_run[MAX_RUN + 1];
1593 int last, run, level, start, end, i;
1595 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1596 if (static_store && rl->max_level[0])
1599 /* compute max_level[], max_run[] and index_run[] */
1600 for (last = 0; last < 2; last++) {
1609 memset(max_level, 0, MAX_RUN + 1);
1610 memset(max_run, 0, MAX_LEVEL + 1);
1611 memset(index_run, rl->n, MAX_RUN + 1);
1612 for (i = start; i < end; i++) {
1613 run = rl->table_run[i];
1614 level = rl->table_level[i];
1615 if (index_run[run] == rl->n)
1617 if (level > max_level[run])
1618 max_level[run] = level;
1619 if (run > max_run[level])
1620 max_run[level] = run;
1623 rl->max_level[last] = static_store[last];
1625 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1626 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1628 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1630 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1631 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1633 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1635 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1636 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1640 av_cold void ff_init_vlc_rl(RLTable *rl, unsigned static_size)
1643 VLC_TYPE table[1500][2] = {{0}};
1644 VLC vlc = { .table = table, .table_allocated = static_size };
1645 av_assert0(static_size <= FF_ARRAY_ELEMS(table));
1646 init_vlc(&vlc, 9, rl->n + 1, &rl->table_vlc[0][1], 4, 2, &rl->table_vlc[0][0], 4, 2, INIT_VLC_USE_NEW_STATIC);
1648 for (q = 0; q < 32; q++) {
1650 int qadd = (q - 1) | 1;
1656 for (i = 0; i < vlc.table_size; i++) {
1657 int code = vlc.table[i][0];
1658 int len = vlc.table[i][1];
1661 if (len == 0) { // illegal code
1664 } else if (len < 0) { // more bits needed
1668 if (code == rl->n) { // esc
1672 run = rl->table_run[code] + 1;
1673 level = rl->table_level[code] * qmul + qadd;
1674 if (code >= rl->last) run += 192;
1677 rl->rl_vlc[q][i].len = len;
1678 rl->rl_vlc[q][i].level = level;
1679 rl->rl_vlc[q][i].run = run;
1684 static void release_unused_pictures(MpegEncContext *s)
1688 /* release non reference frames */
1689 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1690 if (!s->picture[i].reference)
1691 ff_mpeg_unref_picture(s, &s->picture[i]);
1695 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1697 if (pic == s->last_picture_ptr)
1699 if (!pic->f->buf[0])
1701 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1706 static int find_unused_picture(MpegEncContext *s, int shared)
1711 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1712 if (!s->picture[i].f->buf[0] && &s->picture[i] != s->last_picture_ptr)
1716 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1717 if (pic_is_unused(s, &s->picture[i]))
1722 av_log(s->avctx, AV_LOG_FATAL,
1723 "Internal error, picture buffer overflow\n");
1724 /* We could return -1, but the codec would crash trying to draw into a
1725 * non-existing frame anyway. This is safer than waiting for a random crash.
1726 * Also the return of this is never useful, an encoder must only allocate
1727 * as much as allowed in the specification. This has no relationship to how
1728 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1729 * enough for such valid streams).
1730 * Plus, a decoder has to check stream validity and remove frames if too
1731 * many reference frames are around. Waiting for "OOM" is not correct at
1732 * all. Similarly, missing reference frames have to be replaced by
1733 * interpolated/MC frames, anything else is a bug in the codec ...
1739 int ff_find_unused_picture(MpegEncContext *s, int shared)
1741 int ret = find_unused_picture(s, shared);
1743 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1744 if (s->picture[ret].needs_realloc) {
1745 s->picture[ret].needs_realloc = 0;
1746 ff_free_picture_tables(&s->picture[ret]);
1747 ff_mpeg_unref_picture(s, &s->picture[ret]);
1753 static void gray_frame(AVFrame *frame)
1755 int i, h_chroma_shift, v_chroma_shift;
1757 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1759 for(i=0; i<frame->height; i++)
1760 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1761 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1762 memset(frame->data[1] + frame->linesize[1]*i,
1763 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1764 memset(frame->data[2] + frame->linesize[2]*i,
1765 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1770 * generic function called after decoding
1771 * the header and before a frame is decoded.
1773 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1779 if (!ff_thread_can_start_frame(avctx)) {
1780 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1784 /* mark & release old frames */
1785 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1786 s->last_picture_ptr != s->next_picture_ptr &&
1787 s->last_picture_ptr->f->buf[0]) {
1788 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1791 /* release forgotten pictures */
1792 /* if (mpeg124/h263) */
1793 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1794 if (&s->picture[i] != s->last_picture_ptr &&
1795 &s->picture[i] != s->next_picture_ptr &&
1796 s->picture[i].reference && !s->picture[i].needs_realloc) {
1797 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1798 av_log(avctx, AV_LOG_ERROR,
1799 "releasing zombie picture\n");
1800 ff_mpeg_unref_picture(s, &s->picture[i]);
1804 ff_mpeg_unref_picture(s, &s->current_picture);
1806 release_unused_pictures(s);
1808 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1809 // we already have a unused image
1810 // (maybe it was set before reading the header)
1811 pic = s->current_picture_ptr;
1813 i = ff_find_unused_picture(s, 0);
1815 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1818 pic = &s->picture[i];
1822 if (!s->droppable) {
1823 if (s->pict_type != AV_PICTURE_TYPE_B)
1827 pic->f->coded_picture_number = s->coded_picture_number++;
1829 if (ff_alloc_picture(s, pic, 0) < 0)
1832 s->current_picture_ptr = pic;
1833 // FIXME use only the vars from current_pic
1834 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1835 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1836 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1837 if (s->picture_structure != PICT_FRAME)
1838 s->current_picture_ptr->f->top_field_first =
1839 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1841 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1842 !s->progressive_sequence;
1843 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1845 s->current_picture_ptr->f->pict_type = s->pict_type;
1846 // if (s->flags && CODEC_FLAG_QSCALE)
1847 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1848 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1850 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1851 s->current_picture_ptr)) < 0)
1854 if (s->pict_type != AV_PICTURE_TYPE_B) {
1855 s->last_picture_ptr = s->next_picture_ptr;
1857 s->next_picture_ptr = s->current_picture_ptr;
1859 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1860 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1861 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1862 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1863 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1864 s->pict_type, s->droppable);
1866 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1867 (s->pict_type != AV_PICTURE_TYPE_I ||
1868 s->picture_structure != PICT_FRAME)) {
1869 int h_chroma_shift, v_chroma_shift;
1870 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1871 &h_chroma_shift, &v_chroma_shift);
1872 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1873 av_log(avctx, AV_LOG_DEBUG,
1874 "allocating dummy last picture for B frame\n");
1875 else if (s->pict_type != AV_PICTURE_TYPE_I)
1876 av_log(avctx, AV_LOG_ERROR,
1877 "warning: first frame is no keyframe\n");
1878 else if (s->picture_structure != PICT_FRAME)
1879 av_log(avctx, AV_LOG_DEBUG,
1880 "allocate dummy last picture for field based first keyframe\n");
1882 /* Allocate a dummy frame */
1883 i = ff_find_unused_picture(s, 0);
1885 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1888 s->last_picture_ptr = &s->picture[i];
1890 s->last_picture_ptr->reference = 3;
1891 s->last_picture_ptr->f->key_frame = 0;
1892 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1894 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1895 s->last_picture_ptr = NULL;
1899 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1900 for(i=0; i<avctx->height; i++)
1901 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1902 0x80, avctx->width);
1903 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1904 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1905 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1906 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1907 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1910 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1911 for(i=0; i<avctx->height; i++)
1912 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1916 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1917 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1919 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1920 s->pict_type == AV_PICTURE_TYPE_B) {
1921 /* Allocate a dummy frame */
1922 i = ff_find_unused_picture(s, 0);
1924 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1927 s->next_picture_ptr = &s->picture[i];
1929 s->next_picture_ptr->reference = 3;
1930 s->next_picture_ptr->f->key_frame = 0;
1931 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1933 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1934 s->next_picture_ptr = NULL;
1937 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1938 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1941 #if 0 // BUFREF-FIXME
1942 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1943 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1945 if (s->last_picture_ptr) {
1946 ff_mpeg_unref_picture(s, &s->last_picture);
1947 if (s->last_picture_ptr->f->buf[0] &&
1948 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1949 s->last_picture_ptr)) < 0)
1952 if (s->next_picture_ptr) {
1953 ff_mpeg_unref_picture(s, &s->next_picture);
1954 if (s->next_picture_ptr->f->buf[0] &&
1955 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1956 s->next_picture_ptr)) < 0)
1960 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1961 s->last_picture_ptr->f->buf[0]));
1963 if (s->picture_structure!= PICT_FRAME) {
1965 for (i = 0; i < 4; i++) {
1966 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1967 s->current_picture.f->data[i] +=
1968 s->current_picture.f->linesize[i];
1970 s->current_picture.f->linesize[i] *= 2;
1971 s->last_picture.f->linesize[i] *= 2;
1972 s->next_picture.f->linesize[i] *= 2;
1976 s->err_recognition = avctx->err_recognition;
1978 /* set dequantizer, we can't do it during init as
1979 * it might change for mpeg4 and we can't do it in the header
1980 * decode as init is not called for mpeg4 there yet */
1981 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1982 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1983 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1984 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1985 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1986 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1988 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1989 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1992 if (s->avctx->debug & FF_DEBUG_NOMC) {
1993 gray_frame(s->current_picture_ptr->f);
1999 /* called after a frame has been decoded. */
2000 void ff_mpv_frame_end(MpegEncContext *s)
2004 if (s->current_picture.reference)
2005 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
2010 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
2013 return clip_line(ex, ey, sx, sy, maxx);
2018 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
2025 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
2033 * Draw a line from (ex, ey) -> (sx, sy).
2034 * @param w width of the image
2035 * @param h height of the image
2036 * @param stride stride/linesize of the image
2037 * @param color color of the arrow
2039 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2040 int w, int h, int stride, int color)
2044 if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2046 if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2049 sx = av_clip(sx, 0, w - 1);
2050 sy = av_clip(sy, 0, h - 1);
2051 ex = av_clip(ex, 0, w - 1);
2052 ey = av_clip(ey, 0, h - 1);
2054 buf[sy * stride + sx] += color;
2056 if (FFABS(ex - sx) > FFABS(ey - sy)) {
2058 FFSWAP(int, sx, ex);
2059 FFSWAP(int, sy, ey);
2061 buf += sx + sy * stride;
2063 f = ((ey - sy) << 16) / ex;
2064 for (x = 0; x <= ex; x++) {
2066 fr = (x * f) & 0xFFFF;
2067 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2068 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2072 FFSWAP(int, sx, ex);
2073 FFSWAP(int, sy, ey);
2075 buf += sx + sy * stride;
2078 f = ((ex - sx) << 16) / ey;
2081 for(y= 0; y <= ey; y++){
2083 fr = (y*f) & 0xFFFF;
2084 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2085 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2091 * Draw an arrow from (ex, ey) -> (sx, sy).
2092 * @param w width of the image
2093 * @param h height of the image
2094 * @param stride stride/linesize of the image
2095 * @param color color of the arrow
2097 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2098 int ey, int w, int h, int stride, int color, int tail, int direction)
2103 FFSWAP(int, sx, ex);
2104 FFSWAP(int, sy, ey);
2107 sx = av_clip(sx, -100, w + 100);
2108 sy = av_clip(sy, -100, h + 100);
2109 ex = av_clip(ex, -100, w + 100);
2110 ey = av_clip(ey, -100, h + 100);
2115 if (dx * dx + dy * dy > 3 * 3) {
2118 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2120 // FIXME subpixel accuracy
2121 rx = ROUNDED_DIV(rx * 3 << 4, length);
2122 ry = ROUNDED_DIV(ry * 3 << 4, length);
2129 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2130 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2132 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2136 static int add_mb(AVMotionVector *mb, uint32_t mb_type,
2137 int dst_x, int dst_y,
2138 int src_x, int src_y,
2141 if (dst_x == src_x && dst_y == src_y)
2143 mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
2144 mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
2149 mb->source = direction ? 1 : -1;
2150 mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
2155 * Print debugging info for the given picture.
2157 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2158 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2160 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2162 if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
2163 const int shift = 1 + quarter_sample;
2164 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2165 const int mv_stride = (mb_width << mv_sample_log2) +
2166 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2167 int mb_x, mb_y, mbcount = 0;
2169 /* size is width * height * 2 * 4 where 2 is for directions and 4 is
2170 * for the maximum number of MB (4 MB in case of IS_8x8) */
2171 AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
2175 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2176 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2177 int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2178 for (direction = 0; direction < 2; direction++) {
2179 if (!USES_LIST(mb_type, direction))
2181 if (IS_8X8(mb_type)) {
2182 for (i = 0; i < 4; i++) {
2183 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2184 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2185 int xy = (mb_x * 2 + (i & 1) +
2186 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2187 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2188 int my = (motion_val[direction][xy][1] >> shift) + sy;
2189 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2191 } else if (IS_16X8(mb_type)) {
2192 for (i = 0; i < 2; i++) {
2193 int sx = mb_x * 16 + 8;
2194 int sy = mb_y * 16 + 4 + 8 * i;
2195 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2196 int mx = (motion_val[direction][xy][0] >> shift);
2197 int my = (motion_val[direction][xy][1] >> shift);
2199 if (IS_INTERLACED(mb_type))
2202 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2204 } else if (IS_8X16(mb_type)) {
2205 for (i = 0; i < 2; i++) {
2206 int sx = mb_x * 16 + 4 + 8 * i;
2207 int sy = mb_y * 16 + 8;
2208 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2209 int mx = motion_val[direction][xy][0] >> shift;
2210 int my = motion_val[direction][xy][1] >> shift;
2212 if (IS_INTERLACED(mb_type))
2215 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2218 int sx = mb_x * 16 + 8;
2219 int sy = mb_y * 16 + 8;
2220 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2221 int mx = (motion_val[direction][xy][0]>>shift) + sx;
2222 int my = (motion_val[direction][xy][1]>>shift) + sy;
2223 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2230 AVFrameSideData *sd;
2232 av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
2233 sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
2236 memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
2242 /* TODO: export all the following to make them accessible for users (and filters) */
2243 if (avctx->hwaccel || !mbtype_table
2244 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2248 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2251 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2252 av_get_picture_type_char(pict->pict_type));
2253 for (y = 0; y < mb_height; y++) {
2254 for (x = 0; x < mb_width; x++) {
2255 if (avctx->debug & FF_DEBUG_SKIP) {
2256 int count = mbskip_table[x + y * mb_stride];
2259 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2261 if (avctx->debug & FF_DEBUG_QP) {
2262 av_log(avctx, AV_LOG_DEBUG, "%2d",
2263 qscale_table[x + y * mb_stride]);
2265 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2266 int mb_type = mbtype_table[x + y * mb_stride];
2267 // Type & MV direction
2268 if (IS_PCM(mb_type))
2269 av_log(avctx, AV_LOG_DEBUG, "P");
2270 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2271 av_log(avctx, AV_LOG_DEBUG, "A");
2272 else if (IS_INTRA4x4(mb_type))
2273 av_log(avctx, AV_LOG_DEBUG, "i");
2274 else if (IS_INTRA16x16(mb_type))
2275 av_log(avctx, AV_LOG_DEBUG, "I");
2276 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2277 av_log(avctx, AV_LOG_DEBUG, "d");
2278 else if (IS_DIRECT(mb_type))
2279 av_log(avctx, AV_LOG_DEBUG, "D");
2280 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2281 av_log(avctx, AV_LOG_DEBUG, "g");
2282 else if (IS_GMC(mb_type))
2283 av_log(avctx, AV_LOG_DEBUG, "G");
2284 else if (IS_SKIP(mb_type))
2285 av_log(avctx, AV_LOG_DEBUG, "S");
2286 else if (!USES_LIST(mb_type, 1))
2287 av_log(avctx, AV_LOG_DEBUG, ">");
2288 else if (!USES_LIST(mb_type, 0))
2289 av_log(avctx, AV_LOG_DEBUG, "<");
2291 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2292 av_log(avctx, AV_LOG_DEBUG, "X");
2296 if (IS_8X8(mb_type))
2297 av_log(avctx, AV_LOG_DEBUG, "+");
2298 else if (IS_16X8(mb_type))
2299 av_log(avctx, AV_LOG_DEBUG, "-");
2300 else if (IS_8X16(mb_type))
2301 av_log(avctx, AV_LOG_DEBUG, "|");
2302 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2303 av_log(avctx, AV_LOG_DEBUG, " ");
2305 av_log(avctx, AV_LOG_DEBUG, "?");
2308 if (IS_INTERLACED(mb_type))
2309 av_log(avctx, AV_LOG_DEBUG, "=");
2311 av_log(avctx, AV_LOG_DEBUG, " ");
2314 av_log(avctx, AV_LOG_DEBUG, "\n");
2318 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2319 (avctx->debug_mv)) {
2322 int h_chroma_shift, v_chroma_shift, block_height;
2324 const int shift = 1 + quarter_sample;
2326 const int width = avctx->width;
2327 const int height = avctx->height;
2329 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2330 const int mv_stride = (mb_width << mv_sample_log2) +
2331 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2333 *low_delay = 0; // needed to see the vectors without trashing the buffers
2335 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2337 av_frame_make_writable(pict);
2339 pict->opaque = NULL;
2341 ptr = pict->data[0];
2343 block_height = 16 >> v_chroma_shift;
2345 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2347 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2348 const int mb_index = mb_x + mb_y * mb_stride;
2350 if ((avctx->debug_mv) && motion_val[0]) {
2352 for (type = 0; type < 3; type++) {
2356 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2357 (pict->pict_type!= AV_PICTURE_TYPE_P))
2362 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2363 (pict->pict_type!= AV_PICTURE_TYPE_B))
2368 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2369 (pict->pict_type!= AV_PICTURE_TYPE_B))
2374 if (!USES_LIST(mbtype_table[mb_index], direction))
2377 if (IS_8X8(mbtype_table[mb_index])) {
2379 for (i = 0; i < 4; i++) {
2380 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2381 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2382 int xy = (mb_x * 2 + (i & 1) +
2383 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2384 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2385 int my = (motion_val[direction][xy][1] >> shift) + sy;
2386 draw_arrow(ptr, sx, sy, mx, my, width,
2387 height, pict->linesize[0], 100, 0, direction);
2389 } else if (IS_16X8(mbtype_table[mb_index])) {
2391 for (i = 0; i < 2; i++) {
2392 int sx = mb_x * 16 + 8;
2393 int sy = mb_y * 16 + 4 + 8 * i;
2394 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2395 int mx = (motion_val[direction][xy][0] >> shift);
2396 int my = (motion_val[direction][xy][1] >> shift);
2398 if (IS_INTERLACED(mbtype_table[mb_index]))
2401 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2402 height, pict->linesize[0], 100, 0, direction);
2404 } else if (IS_8X16(mbtype_table[mb_index])) {
2406 for (i = 0; i < 2; i++) {
2407 int sx = mb_x * 16 + 4 + 8 * i;
2408 int sy = mb_y * 16 + 8;
2409 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2410 int mx = motion_val[direction][xy][0] >> shift;
2411 int my = motion_val[direction][xy][1] >> shift;
2413 if (IS_INTERLACED(mbtype_table[mb_index]))
2416 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2417 height, pict->linesize[0], 100, 0, direction);
2420 int sx= mb_x * 16 + 8;
2421 int sy= mb_y * 16 + 8;
2422 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2423 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2424 int my= (motion_val[direction][xy][1]>>shift) + sy;
2425 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2430 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2431 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2432 0x0101010101010101ULL;
2434 for (y = 0; y < block_height; y++) {
2435 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2436 (block_height * mb_y + y) *
2437 pict->linesize[1]) = c;
2438 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2439 (block_height * mb_y + y) *
2440 pict->linesize[2]) = c;
2443 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2445 int mb_type = mbtype_table[mb_index];
2448 #define COLOR(theta, r) \
2449 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2450 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2454 if (IS_PCM(mb_type)) {
2456 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2457 IS_INTRA16x16(mb_type)) {
2459 } else if (IS_INTRA4x4(mb_type)) {
2461 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2463 } else if (IS_DIRECT(mb_type)) {
2465 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2467 } else if (IS_GMC(mb_type)) {
2469 } else if (IS_SKIP(mb_type)) {
2471 } else if (!USES_LIST(mb_type, 1)) {
2473 } else if (!USES_LIST(mb_type, 0)) {
2476 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2480 u *= 0x0101010101010101ULL;
2481 v *= 0x0101010101010101ULL;
2482 for (y = 0; y < block_height; y++) {
2483 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2484 (block_height * mb_y + y) * pict->linesize[1]) = u;
2485 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2486 (block_height * mb_y + y) * pict->linesize[2]) = v;
2490 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2491 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2492 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2493 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2494 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2496 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2497 for (y = 0; y < 16; y++)
2498 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2499 pict->linesize[0]] ^= 0x80;
2501 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2502 int dm = 1 << (mv_sample_log2 - 2);
2503 for (i = 0; i < 4; i++) {
2504 int sx = mb_x * 16 + 8 * (i & 1);
2505 int sy = mb_y * 16 + 8 * (i >> 1);
2506 int xy = (mb_x * 2 + (i & 1) +
2507 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2509 int32_t *mv = (int32_t *) &motion_val[0][xy];
2510 if (mv[0] != mv[dm] ||
2511 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2512 for (y = 0; y < 8; y++)
2513 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2514 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2515 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2516 pict->linesize[0]) ^= 0x8080808080808080ULL;
2520 if (IS_INTERLACED(mb_type) &&
2521 avctx->codec->id == AV_CODEC_ID_H264) {
2525 mbskip_table[mb_index] = 0;
2531 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2533 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2534 p->qscale_table, p->motion_val, &s->low_delay,
2535 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2538 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2540 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2541 int offset = 2*s->mb_stride + 1;
2543 return AVERROR(ENOMEM);
2544 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2545 ref->size -= offset;
2546 ref->data += offset;
2547 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2550 static inline int hpel_motion_lowres(MpegEncContext *s,
2551 uint8_t *dest, uint8_t *src,
2552 int field_based, int field_select,
2553 int src_x, int src_y,
2554 int width, int height, ptrdiff_t stride,
2555 int h_edge_pos, int v_edge_pos,
2556 int w, int h, h264_chroma_mc_func *pix_op,
2557 int motion_x, int motion_y)
2559 const int lowres = s->avctx->lowres;
2560 const int op_index = FFMIN(lowres, 3);
2561 const int s_mask = (2 << lowres) - 1;
2565 if (s->quarter_sample) {
2570 sx = motion_x & s_mask;
2571 sy = motion_y & s_mask;
2572 src_x += motion_x >> lowres + 1;
2573 src_y += motion_y >> lowres + 1;
2575 src += src_y * stride + src_x;
2577 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2578 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2579 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2580 s->linesize, s->linesize,
2581 w + 1, (h + 1) << field_based,
2582 src_x, src_y << field_based,
2583 h_edge_pos, v_edge_pos);
2584 src = s->edge_emu_buffer;
2588 sx = (sx << 2) >> lowres;
2589 sy = (sy << 2) >> lowres;
2592 pix_op[op_index](dest, src, stride, h, sx, sy);
2596 /* apply one mpeg motion vector to the three components */
2597 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2604 uint8_t **ref_picture,
2605 h264_chroma_mc_func *pix_op,
2606 int motion_x, int motion_y,
2609 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2610 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2611 ptrdiff_t uvlinesize, linesize;
2612 const int lowres = s->avctx->lowres;
2613 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2614 const int block_s = 8>>lowres;
2615 const int s_mask = (2 << lowres) - 1;
2616 const int h_edge_pos = s->h_edge_pos >> lowres;
2617 const int v_edge_pos = s->v_edge_pos >> lowres;
2618 linesize = s->current_picture.f->linesize[0] << field_based;
2619 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2621 // FIXME obviously not perfect but qpel will not work in lowres anyway
2622 if (s->quarter_sample) {
2628 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2631 sx = motion_x & s_mask;
2632 sy = motion_y & s_mask;
2633 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2634 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2636 if (s->out_format == FMT_H263) {
2637 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2638 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2639 uvsrc_x = src_x >> 1;
2640 uvsrc_y = src_y >> 1;
2641 } else if (s->out_format == FMT_H261) {
2642 // even chroma mv's are full pel in H261
2645 uvsx = (2 * mx) & s_mask;
2646 uvsy = (2 * my) & s_mask;
2647 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2648 uvsrc_y = mb_y * block_s + (my >> lowres);
2650 if(s->chroma_y_shift){
2655 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2656 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2658 if(s->chroma_x_shift){
2662 uvsy = motion_y & s_mask;
2664 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2667 uvsx = motion_x & s_mask;
2668 uvsy = motion_y & s_mask;
2675 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2676 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2677 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2679 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2680 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2681 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2682 linesize >> field_based, linesize >> field_based,
2683 17, 17 + field_based,
2684 src_x, src_y << field_based, h_edge_pos,
2686 ptr_y = s->edge_emu_buffer;
2687 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2688 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2689 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2690 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2691 uvlinesize >> field_based, uvlinesize >> field_based,
2693 uvsrc_x, uvsrc_y << field_based,
2694 h_edge_pos >> 1, v_edge_pos >> 1);
2695 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2696 uvlinesize >> field_based,uvlinesize >> field_based,
2698 uvsrc_x, uvsrc_y << field_based,
2699 h_edge_pos >> 1, v_edge_pos >> 1);
2705 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2707 dest_y += s->linesize;
2708 dest_cb += s->uvlinesize;
2709 dest_cr += s->uvlinesize;
2713 ptr_y += s->linesize;
2714 ptr_cb += s->uvlinesize;
2715 ptr_cr += s->uvlinesize;
2718 sx = (sx << 2) >> lowres;
2719 sy = (sy << 2) >> lowres;
2720 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2722 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2723 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2724 uvsx = (uvsx << 2) >> lowres;
2725 uvsy = (uvsy << 2) >> lowres;
2727 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2728 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2731 // FIXME h261 lowres loop filter
2734 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2735 uint8_t *dest_cb, uint8_t *dest_cr,
2736 uint8_t **ref_picture,
2737 h264_chroma_mc_func * pix_op,
2740 const int lowres = s->avctx->lowres;
2741 const int op_index = FFMIN(lowres, 3);
2742 const int block_s = 8 >> lowres;
2743 const int s_mask = (2 << lowres) - 1;
2744 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2745 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2746 int emu = 0, src_x, src_y, sx, sy;
2750 if (s->quarter_sample) {
2755 /* In case of 8X8, we construct a single chroma motion vector
2756 with a special rounding */
2757 mx = ff_h263_round_chroma(mx);
2758 my = ff_h263_round_chroma(my);
2762 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2763 src_y = s->mb_y * block_s + (my >> lowres + 1);
2765 offset = src_y * s->uvlinesize + src_x;
2766 ptr = ref_picture[1] + offset;
2767 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2768 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2769 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2770 s->uvlinesize, s->uvlinesize,
2772 src_x, src_y, h_edge_pos, v_edge_pos);
2773 ptr = s->edge_emu_buffer;
2776 sx = (sx << 2) >> lowres;
2777 sy = (sy << 2) >> lowres;
2778 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2780 ptr = ref_picture[2] + offset;
2782 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2783 s->uvlinesize, s->uvlinesize,
2785 src_x, src_y, h_edge_pos, v_edge_pos);
2786 ptr = s->edge_emu_buffer;
2788 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2792 * motion compensation of a single macroblock
2794 * @param dest_y luma destination pointer
2795 * @param dest_cb chroma cb/u destination pointer
2796 * @param dest_cr chroma cr/v destination pointer
2797 * @param dir direction (0->forward, 1->backward)
2798 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2799 * @param pix_op halfpel motion compensation function (average or put normally)
2800 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2802 static inline void MPV_motion_lowres(MpegEncContext *s,
2803 uint8_t *dest_y, uint8_t *dest_cb,
2805 int dir, uint8_t **ref_picture,
2806 h264_chroma_mc_func *pix_op)
2810 const int lowres = s->avctx->lowres;
2811 const int block_s = 8 >>lowres;
2816 switch (s->mv_type) {
2818 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2820 ref_picture, pix_op,
2821 s->mv[dir][0][0], s->mv[dir][0][1],
2827 for (i = 0; i < 4; i++) {
2828 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2829 s->linesize) * block_s,
2830 ref_picture[0], 0, 0,
2831 (2 * mb_x + (i & 1)) * block_s,
2832 (2 * mb_y + (i >> 1)) * block_s,
2833 s->width, s->height, s->linesize,
2834 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2835 block_s, block_s, pix_op,
2836 s->mv[dir][i][0], s->mv[dir][i][1]);
2838 mx += s->mv[dir][i][0];
2839 my += s->mv[dir][i][1];
2842 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2843 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2847 if (s->picture_structure == PICT_FRAME) {
2849 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2850 1, 0, s->field_select[dir][0],
2851 ref_picture, pix_op,
2852 s->mv[dir][0][0], s->mv[dir][0][1],
2855 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2856 1, 1, s->field_select[dir][1],
2857 ref_picture, pix_op,
2858 s->mv[dir][1][0], s->mv[dir][1][1],
2861 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2862 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2863 ref_picture = s->current_picture_ptr->f->data;
2866 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2867 0, 0, s->field_select[dir][0],
2868 ref_picture, pix_op,
2870 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2874 for (i = 0; i < 2; i++) {
2875 uint8_t **ref2picture;
2877 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2878 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2879 ref2picture = ref_picture;
2881 ref2picture = s->current_picture_ptr->f->data;
2884 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2885 0, 0, s->field_select[dir][i],
2886 ref2picture, pix_op,
2887 s->mv[dir][i][0], s->mv[dir][i][1] +
2888 2 * block_s * i, block_s, mb_y >> 1);
2890 dest_y += 2 * block_s * s->linesize;
2891 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2892 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2896 if (s->picture_structure == PICT_FRAME) {
2897 for (i = 0; i < 2; i++) {
2899 for (j = 0; j < 2; j++) {
2900 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2902 ref_picture, pix_op,
2903 s->mv[dir][2 * i + j][0],
2904 s->mv[dir][2 * i + j][1],
2907 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2910 for (i = 0; i < 2; i++) {
2911 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2912 0, 0, s->picture_structure != i + 1,
2913 ref_picture, pix_op,
2914 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2915 2 * block_s, mb_y >> 1);
2917 // after put we make avg of the same block
2918 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2920 // opposite parity is always in the same
2921 // frame if this is second field
2922 if (!s->first_field) {
2923 ref_picture = s->current_picture_ptr->f->data;
2934 * find the lowest MB row referenced in the MVs
2936 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2938 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2939 int my, off, i, mvs;
2941 if (s->picture_structure != PICT_FRAME || s->mcsel)
2944 switch (s->mv_type) {
2958 for (i = 0; i < mvs; i++) {
2959 my = s->mv[dir][i][1]<<qpel_shift;
2960 my_max = FFMAX(my_max, my);
2961 my_min = FFMIN(my_min, my);
2964 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2966 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2968 return s->mb_height-1;
2971 /* put block[] to dest[] */
2972 static inline void put_dct(MpegEncContext *s,
2973 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2975 s->dct_unquantize_intra(s, block, i, qscale);
2976 s->idsp.idct_put(dest, line_size, block);
2979 /* add block[] to dest[] */
2980 static inline void add_dct(MpegEncContext *s,
2981 int16_t *block, int i, uint8_t *dest, int line_size)
2983 if (s->block_last_index[i] >= 0) {
2984 s->idsp.idct_add(dest, line_size, block);
2988 static inline void add_dequant_dct(MpegEncContext *s,
2989 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2991 if (s->block_last_index[i] >= 0) {
2992 s->dct_unquantize_inter(s, block, i, qscale);
2994 s->idsp.idct_add(dest, line_size, block);
2999 * Clean dc, ac, coded_block for the current non-intra MB.
3001 void ff_clean_intra_table_entries(MpegEncContext *s)
3003 int wrap = s->b8_stride;
3004 int xy = s->block_index[0];
3007 s->dc_val[0][xy + 1 ] =
3008 s->dc_val[0][xy + wrap] =
3009 s->dc_val[0][xy + 1 + wrap] = 1024;
3011 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
3012 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
3013 if (s->msmpeg4_version>=3) {
3014 s->coded_block[xy ] =
3015 s->coded_block[xy + 1 ] =
3016 s->coded_block[xy + wrap] =
3017 s->coded_block[xy + 1 + wrap] = 0;
3020 wrap = s->mb_stride;
3021 xy = s->mb_x + s->mb_y * wrap;
3023 s->dc_val[2][xy] = 1024;
3025 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
3026 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
3028 s->mbintra_table[xy]= 0;
3031 /* generic function called after a macroblock has been parsed by the
3032 decoder or after it has been encoded by the encoder.
3034 Important variables used:
3035 s->mb_intra : true if intra macroblock
3036 s->mv_dir : motion vector direction
3037 s->mv_type : motion vector type
3038 s->mv : motion vector
3039 s->interlaced_dct : true if interlaced dct used (mpeg2)
3041 static av_always_inline
3042 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
3043 int lowres_flag, int is_mpeg12)
3045 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
3048 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
3049 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
3053 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
3054 /* print DCT coefficients */
3056 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
3058 for(j=0; j<64; j++){
3059 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
3060 block[i][s->idsp.idct_permutation[j]]);
3062 av_log(s->avctx, AV_LOG_DEBUG, "\n");
3066 s->current_picture.qscale_table[mb_xy] = s->qscale;
3068 /* update DC predictors for P macroblocks */
3070 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
3071 if(s->mbintra_table[mb_xy])
3072 ff_clean_intra_table_entries(s);
3076 s->last_dc[2] = 128 << s->intra_dc_precision;
3079 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
3080 s->mbintra_table[mb_xy]=1;
3082 if ( (s->flags&CODEC_FLAG_PSNR)
3083 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
3084 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
3085 uint8_t *dest_y, *dest_cb, *dest_cr;
3086 int dct_linesize, dct_offset;
3087 op_pixels_func (*op_pix)[4];
3088 qpel_mc_func (*op_qpix)[16];
3089 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3090 const int uvlinesize = s->current_picture.f->linesize[1];
3091 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
3092 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
3094 /* avoid copy if macroblock skipped in last frame too */
3095 /* skip only during decoding as we might trash the buffers during encoding a bit */
3097 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
3099 if (s->mb_skipped) {
3101 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
3103 } else if(!s->current_picture.reference) {
3106 *mbskip_ptr = 0; /* not skipped */
3110 dct_linesize = linesize << s->interlaced_dct;
3111 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
3115 dest_cb= s->dest[1];
3116 dest_cr= s->dest[2];
3118 dest_y = s->b_scratchpad;
3119 dest_cb= s->b_scratchpad+16*linesize;
3120 dest_cr= s->b_scratchpad+32*linesize;
3124 /* motion handling */
3125 /* decoding or more than one mb_type (MC was already done otherwise) */
3128 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
3129 if (s->mv_dir & MV_DIR_FORWARD) {
3130 ff_thread_await_progress(&s->last_picture_ptr->tf,
3131 ff_mpv_lowest_referenced_row(s, 0),
3134 if (s->mv_dir & MV_DIR_BACKWARD) {
3135 ff_thread_await_progress(&s->next_picture_ptr->tf,
3136 ff_mpv_lowest_referenced_row(s, 1),
3142 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
3144 if (s->mv_dir & MV_DIR_FORWARD) {
3145 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3146 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
3148 if (s->mv_dir & MV_DIR_BACKWARD) {
3149 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3152 op_qpix = s->me.qpel_put;
3153 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3154 op_pix = s->hdsp.put_pixels_tab;
3156 op_pix = s->hdsp.put_no_rnd_pixels_tab;
3158 if (s->mv_dir & MV_DIR_FORWARD) {
3159 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3160 op_pix = s->hdsp.avg_pixels_tab;
3161 op_qpix= s->me.qpel_avg;
3163 if (s->mv_dir & MV_DIR_BACKWARD) {
3164 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3169 /* skip dequant / idct if we are really late ;) */
3170 if(s->avctx->skip_idct){
3171 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
3172 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
3173 || s->avctx->skip_idct >= AVDISCARD_ALL)
3177 /* add dct residue */
3178 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3179 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3180 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3181 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3182 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3183 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3185 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3186 if (s->chroma_y_shift){
3187 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3188 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3192 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3193 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3194 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3195 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3198 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3199 add_dct(s, block[0], 0, dest_y , dct_linesize);
3200 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3201 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3202 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3204 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3205 if(s->chroma_y_shift){//Chroma420
3206 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3207 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3210 dct_linesize = uvlinesize << s->interlaced_dct;
3211 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3213 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3214 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3215 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3216 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3217 if(!s->chroma_x_shift){//Chroma444
3218 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3219 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3220 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3221 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3226 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3227 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3230 /* dct only in intra block */
3231 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3232 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3233 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3234 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3235 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3237 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3238 if(s->chroma_y_shift){
3239 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3240 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3244 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3245 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3246 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3247 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3251 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3252 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3253 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3254 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3256 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3257 if(s->chroma_y_shift){
3258 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3259 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3262 dct_linesize = uvlinesize << s->interlaced_dct;
3263 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3265 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3266 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3267 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3268 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3269 if(!s->chroma_x_shift){//Chroma444
3270 s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3271 s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3272 s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3273 s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3281 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3282 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3283 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3288 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
3291 if(s->out_format == FMT_MPEG1) {
3292 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
3293 else mpv_decode_mb_internal(s, block, 0, 1);
3296 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
3297 else mpv_decode_mb_internal(s, block, 0, 0);
3300 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3302 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3303 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3304 s->first_field, s->low_delay);
3307 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3308 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3309 const int uvlinesize = s->current_picture.f->linesize[1];
3310 const int mb_size= 4 - s->avctx->lowres;
3312 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3313 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3314 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3315 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3316 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3317 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3318 //block_index is not used by mpeg2, so it is not affected by chroma_format
3320 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3321 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3322 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3324 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3326 if(s->picture_structure==PICT_FRAME){
3327 s->dest[0] += s->mb_y * linesize << mb_size;
3328 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3329 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3331 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3332 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3333 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3334 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3340 * Permute an 8x8 block.
3341 * @param block the block which will be permuted according to the given permutation vector
3342 * @param permutation the permutation vector
3343 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3344 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3345 * (inverse) permutated to scantable order!
3347 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3353 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3355 for(i=0; i<=last; i++){
3356 const int j= scantable[i];
3361 for(i=0; i<=last; i++){
3362 const int j= scantable[i];
3363 const int perm_j= permutation[j];
3364 block[perm_j]= temp[j];
3368 void ff_mpeg_flush(AVCodecContext *avctx){
3370 MpegEncContext *s = avctx->priv_data;
3372 if (!s || !s->picture)
3375 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3376 ff_mpeg_unref_picture(s, &s->picture[i]);
3377 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3379 ff_mpeg_unref_picture(s, &s->current_picture);
3380 ff_mpeg_unref_picture(s, &s->last_picture);
3381 ff_mpeg_unref_picture(s, &s->next_picture);
3383 s->mb_x= s->mb_y= 0;
3386 s->parse_context.state= -1;
3387 s->parse_context.frame_start_found= 0;
3388 s->parse_context.overread= 0;
3389 s->parse_context.overread_index= 0;
3390 s->parse_context.index= 0;
3391 s->parse_context.last_index= 0;
3392 s->bitstream_buffer_size=0;
3397 * set qscale and update qscale dependent variables.
3399 void ff_set_qscale(MpegEncContext * s, int qscale)
3403 else if (qscale > 31)
3407 s->chroma_qscale= s->chroma_qscale_table[qscale];
3409 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3410 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3413 void ff_mpv_report_decode_progress(MpegEncContext *s)
3415 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3416 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);