2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/motion_vector.h"
35 #include "libavutil/timer.h"
38 #include "h264chroma.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
50 static const uint8_t ff_default_chroma_qscale_table[32] = {
51 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
53 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
56 const uint8_t ff_mpeg1_dc_scale_table[128] = {
57 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
68 static const uint8_t mpeg2_dc_scale_table1[128] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
80 static const uint8_t mpeg2_dc_scale_table2[128] = {
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 static const uint8_t mpeg2_dc_scale_table3[128] = {
93 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
105 ff_mpeg1_dc_scale_table,
106 mpeg2_dc_scale_table1,
107 mpeg2_dc_scale_table2,
108 mpeg2_dc_scale_table3,
111 const uint8_t ff_alternate_horizontal_scan[64] = {
112 0, 1, 2, 3, 8, 9, 16, 17,
113 10, 11, 4, 5, 6, 7, 15, 14,
114 13, 12, 19, 18, 24, 25, 32, 33,
115 26, 27, 20, 21, 22, 23, 28, 29,
116 30, 31, 34, 35, 40, 41, 48, 49,
117 42, 43, 36, 37, 38, 39, 44, 45,
118 46, 47, 50, 51, 56, 57, 58, 59,
119 52, 53, 54, 55, 60, 61, 62, 63,
122 const uint8_t ff_alternate_vertical_scan[64] = {
123 0, 8, 16, 24, 1, 9, 2, 10,
124 17, 25, 32, 40, 48, 56, 57, 49,
125 41, 33, 26, 18, 3, 11, 4, 12,
126 19, 27, 34, 42, 50, 58, 35, 43,
127 51, 59, 20, 28, 5, 13, 6, 14,
128 21, 29, 36, 44, 52, 60, 37, 45,
129 53, 61, 22, 30, 7, 15, 23, 31,
130 38, 46, 54, 62, 39, 47, 55, 63,
133 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
134 int16_t *block, int n, int qscale)
136 int i, level, nCoeffs;
137 const uint16_t *quant_matrix;
139 nCoeffs= s->block_last_index[n];
141 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
142 /* XXX: only mpeg1 */
143 quant_matrix = s->intra_matrix;
144 for(i=1;i<=nCoeffs;i++) {
145 int j= s->intra_scantable.permutated[i];
150 level = (int)(level * qscale * quant_matrix[j]) >> 3;
151 level = (level - 1) | 1;
154 level = (int)(level * qscale * quant_matrix[j]) >> 3;
155 level = (level - 1) | 1;
162 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
163 int16_t *block, int n, int qscale)
165 int i, level, nCoeffs;
166 const uint16_t *quant_matrix;
168 nCoeffs= s->block_last_index[n];
170 quant_matrix = s->inter_matrix;
171 for(i=0; i<=nCoeffs; i++) {
172 int j= s->intra_scantable.permutated[i];
177 level = (((level << 1) + 1) * qscale *
178 ((int) (quant_matrix[j]))) >> 4;
179 level = (level - 1) | 1;
182 level = (((level << 1) + 1) * qscale *
183 ((int) (quant_matrix[j]))) >> 4;
184 level = (level - 1) | 1;
191 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
192 int16_t *block, int n, int qscale)
194 int i, level, nCoeffs;
195 const uint16_t *quant_matrix;
197 if(s->alternate_scan) nCoeffs= 63;
198 else nCoeffs= s->block_last_index[n];
200 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
201 quant_matrix = s->intra_matrix;
202 for(i=1;i<=nCoeffs;i++) {
203 int j= s->intra_scantable.permutated[i];
208 level = (int)(level * qscale * quant_matrix[j]) >> 3;
211 level = (int)(level * qscale * quant_matrix[j]) >> 3;
218 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
219 int16_t *block, int n, int qscale)
221 int i, level, nCoeffs;
222 const uint16_t *quant_matrix;
225 if(s->alternate_scan) nCoeffs= 63;
226 else nCoeffs= s->block_last_index[n];
228 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
230 quant_matrix = s->intra_matrix;
231 for(i=1;i<=nCoeffs;i++) {
232 int j= s->intra_scantable.permutated[i];
237 level = (int)(level * qscale * quant_matrix[j]) >> 3;
240 level = (int)(level * qscale * quant_matrix[j]) >> 3;
249 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
250 int16_t *block, int n, int qscale)
252 int i, level, nCoeffs;
253 const uint16_t *quant_matrix;
256 if(s->alternate_scan) nCoeffs= 63;
257 else nCoeffs= s->block_last_index[n];
259 quant_matrix = s->inter_matrix;
260 for(i=0; i<=nCoeffs; i++) {
261 int j= s->intra_scantable.permutated[i];
266 level = (((level << 1) + 1) * qscale *
267 ((int) (quant_matrix[j]))) >> 4;
270 level = (((level << 1) + 1) * qscale *
271 ((int) (quant_matrix[j]))) >> 4;
280 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
281 int16_t *block, int n, int qscale)
283 int i, level, qmul, qadd;
286 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
291 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
292 qadd = (qscale - 1) | 1;
299 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
301 for(i=1; i<=nCoeffs; i++) {
305 level = level * qmul - qadd;
307 level = level * qmul + qadd;
314 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
315 int16_t *block, int n, int qscale)
317 int i, level, qmul, qadd;
320 av_assert2(s->block_last_index[n]>=0);
322 qadd = (qscale - 1) | 1;
325 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
327 for(i=0; i<=nCoeffs; i++) {
331 level = level * qmul - qadd;
333 level = level * qmul + qadd;
340 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
342 int mb_x, int mb_y, int mb_intra, int mb_skipped)
344 MpegEncContext *s = opaque;
347 s->mv_type = mv_type;
348 s->mb_intra = mb_intra;
349 s->mb_skipped = mb_skipped;
352 memcpy(s->mv, mv, sizeof(*mv));
354 ff_init_block_index(s);
355 ff_update_block_index(s);
357 s->bdsp.clear_blocks(s->block[0]);
359 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
360 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
364 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
365 ff_mpv_decode_mb(s, s->block);
368 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
371 memset(dst + h*linesize, 128, 16);
374 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
377 memset(dst + h*linesize, 128, 8);
380 /* init common dct for both encoder and decoder */
381 static av_cold int dct_init(MpegEncContext *s)
383 ff_blockdsp_init(&s->bdsp, s->avctx);
384 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
385 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
386 ff_me_cmp_init(&s->mecc, s->avctx);
387 ff_mpegvideodsp_init(&s->mdsp);
388 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
390 if (s->avctx->debug & FF_DEBUG_NOMC) {
392 for (i=0; i<4; i++) {
393 s->hdsp.avg_pixels_tab[0][i] = gray16;
394 s->hdsp.put_pixels_tab[0][i] = gray16;
395 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
397 s->hdsp.avg_pixels_tab[1][i] = gray8;
398 s->hdsp.put_pixels_tab[1][i] = gray8;
399 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
403 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
404 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
405 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
406 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
407 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
408 if (s->flags & CODEC_FLAG_BITEXACT)
409 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
410 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
412 if (HAVE_INTRINSICS_NEON)
413 ff_mpv_common_init_neon(s);
416 ff_mpv_common_init_axp(s);
418 ff_mpv_common_init_arm(s);
420 ff_mpv_common_init_ppc(s);
422 ff_mpv_common_init_x86(s);
427 av_cold void ff_mpv_idct_init(MpegEncContext *s)
429 ff_idctdsp_init(&s->idsp, s->avctx);
431 /* load & permutate scantables
432 * note: only wmv uses different ones
434 if (s->alternate_scan) {
435 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
436 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
438 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
439 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
441 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
442 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
445 static int frame_size_alloc(MpegEncContext *s, int linesize)
447 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
449 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
453 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
454 return AVERROR_PATCHWELCOME;
457 // edge emu needs blocksize + filter length - 1
458 // (= 17x17 for halfpel / 21x21 for h264)
459 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
460 // at uvlinesize. It supports only YUV420 so 24x24 is enough
461 // linesize * interlaced * MBsize
462 // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
463 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 68,
466 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
468 s->me.temp = s->me.scratchpad;
469 s->rd_scratchpad = s->me.scratchpad;
470 s->b_scratchpad = s->me.scratchpad;
471 s->obmc_scratchpad = s->me.scratchpad + 16;
475 av_freep(&s->edge_emu_buffer);
476 return AVERROR(ENOMEM);
480 * Allocate a frame buffer
482 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
484 int edges_needed = av_codec_is_encoder(s->avctx->codec);
488 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
489 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
490 s->codec_id != AV_CODEC_ID_MSS2) {
492 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
493 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
496 r = ff_thread_get_buffer(s->avctx, &pic->tf,
497 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
499 pic->f->width = s->avctx->width;
500 pic->f->height = s->avctx->height;
501 pic->f->format = s->avctx->pix_fmt;
502 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
505 if (r < 0 || !pic->f->buf[0]) {
506 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
513 for (i = 0; pic->f->data[i]; i++) {
514 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
515 pic->f->linesize[i] +
516 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
517 pic->f->data[i] += offset;
519 pic->f->width = s->avctx->width;
520 pic->f->height = s->avctx->height;
523 if (s->avctx->hwaccel) {
524 assert(!pic->hwaccel_picture_private);
525 if (s->avctx->hwaccel->frame_priv_data_size) {
526 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
527 if (!pic->hwaccel_priv_buf) {
528 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
531 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
535 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
536 s->uvlinesize != pic->f->linesize[1])) {
537 av_log(s->avctx, AV_LOG_ERROR,
538 "get_buffer() failed (stride changed)\n");
539 ff_mpeg_unref_picture(s, pic);
543 if (pic->f->linesize[1] != pic->f->linesize[2]) {
544 av_log(s->avctx, AV_LOG_ERROR,
545 "get_buffer() failed (uv stride mismatch)\n");
546 ff_mpeg_unref_picture(s, pic);
550 if (!s->edge_emu_buffer &&
551 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
552 av_log(s->avctx, AV_LOG_ERROR,
553 "get_buffer() failed to allocate context scratch buffers.\n");
554 ff_mpeg_unref_picture(s, pic);
561 void ff_free_picture_tables(Picture *pic)
565 pic->alloc_mb_width =
566 pic->alloc_mb_height = 0;
568 av_buffer_unref(&pic->mb_var_buf);
569 av_buffer_unref(&pic->mc_mb_var_buf);
570 av_buffer_unref(&pic->mb_mean_buf);
571 av_buffer_unref(&pic->mbskip_table_buf);
572 av_buffer_unref(&pic->qscale_table_buf);
573 av_buffer_unref(&pic->mb_type_buf);
575 for (i = 0; i < 2; i++) {
576 av_buffer_unref(&pic->motion_val_buf[i]);
577 av_buffer_unref(&pic->ref_index_buf[i]);
581 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
583 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
584 const int mb_array_size = s->mb_stride * s->mb_height;
585 const int b8_array_size = s->b8_stride * s->mb_height * 2;
589 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
590 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
591 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
593 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
594 return AVERROR(ENOMEM);
597 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
598 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
599 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
600 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
601 return AVERROR(ENOMEM);
604 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv ||
605 (s->avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) {
606 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
607 int ref_index_size = 4 * mb_array_size;
609 for (i = 0; mv_size && i < 2; i++) {
610 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
611 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
612 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
613 return AVERROR(ENOMEM);
617 pic->alloc_mb_width = s->mb_width;
618 pic->alloc_mb_height = s->mb_height;
623 static int make_tables_writable(Picture *pic)
626 #define MAKE_WRITABLE(table) \
629 (ret = av_buffer_make_writable(&pic->table)) < 0)\
633 MAKE_WRITABLE(mb_var_buf);
634 MAKE_WRITABLE(mc_mb_var_buf);
635 MAKE_WRITABLE(mb_mean_buf);
636 MAKE_WRITABLE(mbskip_table_buf);
637 MAKE_WRITABLE(qscale_table_buf);
638 MAKE_WRITABLE(mb_type_buf);
640 for (i = 0; i < 2; i++) {
641 MAKE_WRITABLE(motion_val_buf[i]);
642 MAKE_WRITABLE(ref_index_buf[i]);
649 * Allocate a Picture.
650 * The pixels are allocated/set by calling get_buffer() if shared = 0
652 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
656 if (pic->qscale_table_buf)
657 if ( pic->alloc_mb_width != s->mb_width
658 || pic->alloc_mb_height != s->mb_height)
659 ff_free_picture_tables(pic);
662 av_assert0(pic->f->data[0]);
665 av_assert0(!pic->f->buf[0]);
667 if (alloc_frame_buffer(s, pic) < 0)
670 s->linesize = pic->f->linesize[0];
671 s->uvlinesize = pic->f->linesize[1];
674 if (!pic->qscale_table_buf)
675 ret = alloc_picture_tables(s, pic);
677 ret = make_tables_writable(pic);
682 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
683 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
684 pic->mb_mean = pic->mb_mean_buf->data;
687 pic->mbskip_table = pic->mbskip_table_buf->data;
688 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
689 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
691 if (pic->motion_val_buf[0]) {
692 for (i = 0; i < 2; i++) {
693 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
694 pic->ref_index[i] = pic->ref_index_buf[i]->data;
700 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
701 ff_mpeg_unref_picture(s, pic);
702 ff_free_picture_tables(pic);
703 return AVERROR(ENOMEM);
707 * Deallocate a picture.
709 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
711 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
714 /* WM Image / Screen codecs allocate internal buffers with different
715 * dimensions / colorspaces; ignore user-defined callbacks for these. */
716 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
717 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
718 s->codec_id != AV_CODEC_ID_MSS2)
719 ff_thread_release_buffer(s->avctx, &pic->tf);
721 av_frame_unref(pic->f);
723 av_buffer_unref(&pic->hwaccel_priv_buf);
725 if (pic->needs_realloc)
726 ff_free_picture_tables(pic);
728 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
731 static int update_picture_tables(Picture *dst, Picture *src)
735 #define UPDATE_TABLE(table)\
738 (!dst->table || dst->table->buffer != src->table->buffer)) {\
739 av_buffer_unref(&dst->table);\
740 dst->table = av_buffer_ref(src->table);\
742 ff_free_picture_tables(dst);\
743 return AVERROR(ENOMEM);\
748 UPDATE_TABLE(mb_var_buf);
749 UPDATE_TABLE(mc_mb_var_buf);
750 UPDATE_TABLE(mb_mean_buf);
751 UPDATE_TABLE(mbskip_table_buf);
752 UPDATE_TABLE(qscale_table_buf);
753 UPDATE_TABLE(mb_type_buf);
754 for (i = 0; i < 2; i++) {
755 UPDATE_TABLE(motion_val_buf[i]);
756 UPDATE_TABLE(ref_index_buf[i]);
759 dst->mb_var = src->mb_var;
760 dst->mc_mb_var = src->mc_mb_var;
761 dst->mb_mean = src->mb_mean;
762 dst->mbskip_table = src->mbskip_table;
763 dst->qscale_table = src->qscale_table;
764 dst->mb_type = src->mb_type;
765 for (i = 0; i < 2; i++) {
766 dst->motion_val[i] = src->motion_val[i];
767 dst->ref_index[i] = src->ref_index[i];
770 dst->alloc_mb_width = src->alloc_mb_width;
771 dst->alloc_mb_height = src->alloc_mb_height;
776 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
780 av_assert0(!dst->f->buf[0]);
781 av_assert0(src->f->buf[0]);
785 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
789 ret = update_picture_tables(dst, src);
793 if (src->hwaccel_picture_private) {
794 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
795 if (!dst->hwaccel_priv_buf)
797 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
800 dst->field_picture = src->field_picture;
801 dst->mb_var_sum = src->mb_var_sum;
802 dst->mc_mb_var_sum = src->mc_mb_var_sum;
803 dst->b_frame_score = src->b_frame_score;
804 dst->needs_realloc = src->needs_realloc;
805 dst->reference = src->reference;
806 dst->shared = src->shared;
810 ff_mpeg_unref_picture(s, dst);
814 static void exchange_uv(MpegEncContext *s)
819 s->pblocks[4] = s->pblocks[5];
823 static int init_duplicate_context(MpegEncContext *s)
825 int y_size = s->b8_stride * (2 * s->mb_height + 1);
826 int c_size = s->mb_stride * (s->mb_height + 1);
827 int yc_size = y_size + 2 * c_size;
830 if (s->mb_height & 1)
831 yc_size += 2*s->b8_stride + 2*s->mb_stride;
838 s->obmc_scratchpad = NULL;
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
842 ME_MAP_SIZE * sizeof(uint32_t), fail)
843 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
844 ME_MAP_SIZE * sizeof(uint32_t), fail)
845 if (s->avctx->noise_reduction) {
846 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
847 2 * 64 * sizeof(int), fail)
850 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
851 s->block = s->blocks[0];
853 for (i = 0; i < 12; i++) {
854 s->pblocks[i] = &s->block[i];
856 if (s->avctx->codec_tag == AV_RL32("VCR2"))
859 if (s->out_format == FMT_H263) {
861 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
862 yc_size * sizeof(int16_t) * 16, fail);
863 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
864 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
865 s->ac_val[2] = s->ac_val[1] + c_size;
870 return -1; // free() through ff_mpv_common_end()
873 static void free_duplicate_context(MpegEncContext *s)
878 av_freep(&s->edge_emu_buffer);
879 av_freep(&s->me.scratchpad);
883 s->obmc_scratchpad = NULL;
885 av_freep(&s->dct_error_sum);
886 av_freep(&s->me.map);
887 av_freep(&s->me.score_map);
888 av_freep(&s->blocks);
889 av_freep(&s->ac_val_base);
893 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
895 #define COPY(a) bak->a = src->a
896 COPY(edge_emu_buffer);
901 COPY(obmc_scratchpad);
908 COPY(me.map_generation);
920 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
924 // FIXME copy only needed parts
926 backup_duplicate_context(&bak, dst);
927 memcpy(dst, src, sizeof(MpegEncContext));
928 backup_duplicate_context(dst, &bak);
929 for (i = 0; i < 12; i++) {
930 dst->pblocks[i] = &dst->block[i];
932 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
934 if (!dst->edge_emu_buffer &&
935 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
936 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
937 "scratch buffers.\n");
940 // STOP_TIMER("update_duplicate_context")
941 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
945 int ff_mpeg_update_thread_context(AVCodecContext *dst,
946 const AVCodecContext *src)
949 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
956 // FIXME can parameters change on I-frames?
957 // in that case dst may need a reinit
958 if (!s->context_initialized) {
959 memcpy(s, s1, sizeof(MpegEncContext));
962 s->bitstream_buffer = NULL;
963 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
965 if (s1->context_initialized){
966 // s->picture_range_start += MAX_PICTURE_COUNT;
967 // s->picture_range_end += MAX_PICTURE_COUNT;
969 if((ret = ff_mpv_common_init(s)) < 0){
970 memset(s, 0, sizeof(MpegEncContext));
977 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
978 s->context_reinit = 0;
979 s->height = s1->height;
980 s->width = s1->width;
981 if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
985 s->avctx->coded_height = s1->avctx->coded_height;
986 s->avctx->coded_width = s1->avctx->coded_width;
987 s->avctx->width = s1->avctx->width;
988 s->avctx->height = s1->avctx->height;
990 s->coded_picture_number = s1->coded_picture_number;
991 s->picture_number = s1->picture_number;
993 av_assert0(!s->picture || s->picture != s1->picture);
995 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
996 ff_mpeg_unref_picture(s, &s->picture[i]);
997 if (s1->picture[i].f->buf[0] &&
998 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
1002 #define UPDATE_PICTURE(pic)\
1004 ff_mpeg_unref_picture(s, &s->pic);\
1005 if (s1->pic.f && s1->pic.f->buf[0])\
1006 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
1008 ret = update_picture_tables(&s->pic, &s1->pic);\
1013 UPDATE_PICTURE(current_picture);
1014 UPDATE_PICTURE(last_picture);
1015 UPDATE_PICTURE(next_picture);
1017 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1018 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1019 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1021 // Error/bug resilience
1022 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1023 s->workaround_bugs = s1->workaround_bugs;
1024 s->padding_bug_score = s1->padding_bug_score;
1026 // MPEG4 timing info
1027 memcpy(&s->last_time_base, &s1->last_time_base,
1028 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1029 (char *) &s1->last_time_base);
1032 s->max_b_frames = s1->max_b_frames;
1033 s->low_delay = s1->low_delay;
1034 s->droppable = s1->droppable;
1036 // DivX handling (doesn't work)
1037 s->divx_packed = s1->divx_packed;
1039 if (s1->bitstream_buffer) {
1040 if (s1->bitstream_buffer_size +
1041 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1042 av_fast_malloc(&s->bitstream_buffer,
1043 &s->allocated_bitstream_buffer_size,
1044 s1->allocated_bitstream_buffer_size);
1045 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1046 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1047 s1->bitstream_buffer_size);
1048 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1049 FF_INPUT_BUFFER_PADDING_SIZE);
1052 // linesize dependend scratch buffer allocation
1053 if (!s->edge_emu_buffer)
1055 if (frame_size_alloc(s, s1->linesize) < 0) {
1056 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1057 "scratch buffers.\n");
1058 return AVERROR(ENOMEM);
1061 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1062 "be allocated due to unknown size.\n");
1065 // MPEG2/interlacing info
1066 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1067 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1069 if (!s1->first_field) {
1070 s->last_pict_type = s1->pict_type;
1071 if (s1->current_picture_ptr)
1072 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1079 * Set the given MpegEncContext to common defaults
1080 * (same for encoding and decoding).
1081 * The changed fields will not depend upon the
1082 * prior state of the MpegEncContext.
1084 void ff_mpv_common_defaults(MpegEncContext *s)
1086 s->y_dc_scale_table =
1087 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1088 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1089 s->progressive_frame = 1;
1090 s->progressive_sequence = 1;
1091 s->picture_structure = PICT_FRAME;
1093 s->coded_picture_number = 0;
1094 s->picture_number = 0;
1099 s->slice_context_count = 1;
1103 * Set the given MpegEncContext to defaults for decoding.
1104 * the changed fields will not depend upon
1105 * the prior state of the MpegEncContext.
1107 void ff_mpv_decode_defaults(MpegEncContext *s)
1109 ff_mpv_common_defaults(s);
1112 static int init_er(MpegEncContext *s)
1114 ERContext *er = &s->er;
1115 int mb_array_size = s->mb_height * s->mb_stride;
1118 er->avctx = s->avctx;
1119 er->mecc = &s->mecc;
1121 er->mb_index2xy = s->mb_index2xy;
1122 er->mb_num = s->mb_num;
1123 er->mb_width = s->mb_width;
1124 er->mb_height = s->mb_height;
1125 er->mb_stride = s->mb_stride;
1126 er->b8_stride = s->b8_stride;
1128 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1129 er->error_status_table = av_mallocz(mb_array_size);
1130 if (!er->er_temp_buffer || !er->error_status_table)
1133 er->mbskip_table = s->mbskip_table;
1134 er->mbintra_table = s->mbintra_table;
1136 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1137 er->dc_val[i] = s->dc_val[i];
1139 er->decode_mb = mpeg_er_decode_mb;
1144 av_freep(&er->er_temp_buffer);
1145 av_freep(&er->error_status_table);
1146 return AVERROR(ENOMEM);
1150 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1152 static int init_context_frame(MpegEncContext *s)
1154 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1156 s->mb_width = (s->width + 15) / 16;
1157 s->mb_stride = s->mb_width + 1;
1158 s->b8_stride = s->mb_width * 2 + 1;
1159 mb_array_size = s->mb_height * s->mb_stride;
1160 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1162 /* set default edge pos, will be overridden
1163 * in decode_header if needed */
1164 s->h_edge_pos = s->mb_width * 16;
1165 s->v_edge_pos = s->mb_height * 16;
1167 s->mb_num = s->mb_width * s->mb_height;
1172 s->block_wrap[3] = s->b8_stride;
1174 s->block_wrap[5] = s->mb_stride;
1176 y_size = s->b8_stride * (2 * s->mb_height + 1);
1177 c_size = s->mb_stride * (s->mb_height + 1);
1178 yc_size = y_size + 2 * c_size;
1180 if (s->mb_height & 1)
1181 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1183 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1184 for (y = 0; y < s->mb_height; y++)
1185 for (x = 0; x < s->mb_width; x++)
1186 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1188 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1191 /* Allocate MV tables */
1192 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1193 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1194 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1195 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1196 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1197 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1198 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1199 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1200 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1201 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1202 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1203 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1205 /* Allocate MB type table */
1206 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1208 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1210 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1211 mb_array_size * sizeof(float), fail);
1212 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1213 mb_array_size * sizeof(float), fail);
1217 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1218 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1219 /* interlaced direct mode decoding tables */
1220 for (i = 0; i < 2; i++) {
1222 for (j = 0; j < 2; j++) {
1223 for (k = 0; k < 2; k++) {
1224 FF_ALLOCZ_OR_GOTO(s->avctx,
1225 s->b_field_mv_table_base[i][j][k],
1226 mv_table_size * 2 * sizeof(int16_t),
1228 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1231 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1232 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1233 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1235 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1238 if (s->out_format == FMT_H263) {
1240 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1241 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1243 /* cbp, ac_pred, pred_dir */
1244 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1245 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1248 if (s->h263_pred || s->h263_plus || !s->encoding) {
1250 // MN: we need these for error resilience of intra-frames
1251 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1252 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1253 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1254 s->dc_val[2] = s->dc_val[1] + c_size;
1255 for (i = 0; i < yc_size; i++)
1256 s->dc_val_base[i] = 1024;
1259 /* which mb is a intra block */
1260 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1261 memset(s->mbintra_table, 1, mb_array_size);
1263 /* init macroblock skip table */
1264 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1265 // Note the + 1 is for a quicker mpeg4 slice_end detection
1269 return AVERROR(ENOMEM);
1273 * init common structure for both encoder and decoder.
1274 * this assumes that some variables like width/height are already set
1276 av_cold int ff_mpv_common_init(MpegEncContext *s)
1279 int nb_slices = (HAVE_THREADS &&
1280 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1281 s->avctx->thread_count : 1;
1283 if (s->encoding && s->avctx->slices)
1284 nb_slices = s->avctx->slices;
1286 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1287 s->mb_height = (s->height + 31) / 32 * 2;
1289 s->mb_height = (s->height + 15) / 16;
1291 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1292 av_log(s->avctx, AV_LOG_ERROR,
1293 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1297 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1300 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1302 max_slices = MAX_THREADS;
1303 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1304 " reducing to %d\n", nb_slices, max_slices);
1305 nb_slices = max_slices;
1308 if ((s->width || s->height) &&
1309 av_image_check_size(s->width, s->height, 0, s->avctx))
1314 s->flags = s->avctx->flags;
1315 s->flags2 = s->avctx->flags2;
1317 /* set chroma shifts */
1318 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1320 &s->chroma_y_shift);
1322 /* convert fourcc to upper case */
1323 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1325 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1327 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1328 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1329 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1330 s->picture[i].f = av_frame_alloc();
1331 if (!s->picture[i].f)
1334 memset(&s->next_picture, 0, sizeof(s->next_picture));
1335 memset(&s->last_picture, 0, sizeof(s->last_picture));
1336 memset(&s->current_picture, 0, sizeof(s->current_picture));
1337 memset(&s->new_picture, 0, sizeof(s->new_picture));
1338 s->next_picture.f = av_frame_alloc();
1339 if (!s->next_picture.f)
1341 s->last_picture.f = av_frame_alloc();
1342 if (!s->last_picture.f)
1344 s->current_picture.f = av_frame_alloc();
1345 if (!s->current_picture.f)
1347 s->new_picture.f = av_frame_alloc();
1348 if (!s->new_picture.f)
1351 if (init_context_frame(s))
1354 s->parse_context.state = -1;
1356 s->context_initialized = 1;
1357 s->thread_context[0] = s;
1359 // if (s->width && s->height) {
1360 if (nb_slices > 1) {
1361 for (i = 1; i < nb_slices; i++) {
1362 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1363 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1366 for (i = 0; i < nb_slices; i++) {
1367 if (init_duplicate_context(s->thread_context[i]) < 0)
1369 s->thread_context[i]->start_mb_y =
1370 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1371 s->thread_context[i]->end_mb_y =
1372 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1375 if (init_duplicate_context(s) < 0)
1378 s->end_mb_y = s->mb_height;
1380 s->slice_context_count = nb_slices;
1385 ff_mpv_common_end(s);
1390 * Frees and resets MpegEncContext fields depending on the resolution.
1391 * Is used during resolution changes to avoid a full reinitialization of the
1394 static int free_context_frame(MpegEncContext *s)
1398 av_freep(&s->mb_type);
1399 av_freep(&s->p_mv_table_base);
1400 av_freep(&s->b_forw_mv_table_base);
1401 av_freep(&s->b_back_mv_table_base);
1402 av_freep(&s->b_bidir_forw_mv_table_base);
1403 av_freep(&s->b_bidir_back_mv_table_base);
1404 av_freep(&s->b_direct_mv_table_base);
1405 s->p_mv_table = NULL;
1406 s->b_forw_mv_table = NULL;
1407 s->b_back_mv_table = NULL;
1408 s->b_bidir_forw_mv_table = NULL;
1409 s->b_bidir_back_mv_table = NULL;
1410 s->b_direct_mv_table = NULL;
1411 for (i = 0; i < 2; i++) {
1412 for (j = 0; j < 2; j++) {
1413 for (k = 0; k < 2; k++) {
1414 av_freep(&s->b_field_mv_table_base[i][j][k]);
1415 s->b_field_mv_table[i][j][k] = NULL;
1417 av_freep(&s->b_field_select_table[i][j]);
1418 av_freep(&s->p_field_mv_table_base[i][j]);
1419 s->p_field_mv_table[i][j] = NULL;
1421 av_freep(&s->p_field_select_table[i]);
1424 av_freep(&s->dc_val_base);
1425 av_freep(&s->coded_block_base);
1426 av_freep(&s->mbintra_table);
1427 av_freep(&s->cbp_table);
1428 av_freep(&s->pred_dir_table);
1430 av_freep(&s->mbskip_table);
1432 av_freep(&s->er.error_status_table);
1433 av_freep(&s->er.er_temp_buffer);
1434 av_freep(&s->mb_index2xy);
1435 av_freep(&s->lambda_table);
1437 av_freep(&s->cplx_tab);
1438 av_freep(&s->bits_tab);
1440 s->linesize = s->uvlinesize = 0;
1445 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1449 if (s->slice_context_count > 1) {
1450 for (i = 0; i < s->slice_context_count; i++) {
1451 free_duplicate_context(s->thread_context[i]);
1453 for (i = 1; i < s->slice_context_count; i++) {
1454 av_freep(&s->thread_context[i]);
1457 free_duplicate_context(s);
1459 if ((err = free_context_frame(s)) < 0)
1463 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1464 s->picture[i].needs_realloc = 1;
1467 s->last_picture_ptr =
1468 s->next_picture_ptr =
1469 s->current_picture_ptr = NULL;
1472 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1473 s->mb_height = (s->height + 31) / 32 * 2;
1475 s->mb_height = (s->height + 15) / 16;
1477 if ((s->width || s->height) &&
1478 av_image_check_size(s->width, s->height, 0, s->avctx))
1479 return AVERROR_INVALIDDATA;
1481 if ((err = init_context_frame(s)))
1484 s->thread_context[0] = s;
1486 if (s->width && s->height) {
1487 int nb_slices = s->slice_context_count;
1488 if (nb_slices > 1) {
1489 for (i = 1; i < nb_slices; i++) {
1490 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1491 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1494 for (i = 0; i < nb_slices; i++) {
1495 if (init_duplicate_context(s->thread_context[i]) < 0)
1497 s->thread_context[i]->start_mb_y =
1498 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1499 s->thread_context[i]->end_mb_y =
1500 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1503 err = init_duplicate_context(s);
1507 s->end_mb_y = s->mb_height;
1509 s->slice_context_count = nb_slices;
1514 ff_mpv_common_end(s);
1518 /* init common structure for both encoder and decoder */
1519 void ff_mpv_common_end(MpegEncContext *s)
1523 if (s->slice_context_count > 1) {
1524 for (i = 0; i < s->slice_context_count; i++) {
1525 free_duplicate_context(s->thread_context[i]);
1527 for (i = 1; i < s->slice_context_count; i++) {
1528 av_freep(&s->thread_context[i]);
1530 s->slice_context_count = 1;
1531 } else free_duplicate_context(s);
1533 av_freep(&s->parse_context.buffer);
1534 s->parse_context.buffer_size = 0;
1536 av_freep(&s->bitstream_buffer);
1537 s->allocated_bitstream_buffer_size = 0;
1540 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1541 ff_free_picture_tables(&s->picture[i]);
1542 ff_mpeg_unref_picture(s, &s->picture[i]);
1543 av_frame_free(&s->picture[i].f);
1546 av_freep(&s->picture);
1547 ff_free_picture_tables(&s->last_picture);
1548 ff_mpeg_unref_picture(s, &s->last_picture);
1549 av_frame_free(&s->last_picture.f);
1550 ff_free_picture_tables(&s->current_picture);
1551 ff_mpeg_unref_picture(s, &s->current_picture);
1552 av_frame_free(&s->current_picture.f);
1553 ff_free_picture_tables(&s->next_picture);
1554 ff_mpeg_unref_picture(s, &s->next_picture);
1555 av_frame_free(&s->next_picture.f);
1556 ff_free_picture_tables(&s->new_picture);
1557 ff_mpeg_unref_picture(s, &s->new_picture);
1558 av_frame_free(&s->new_picture.f);
1560 free_context_frame(s);
1562 s->context_initialized = 0;
1563 s->last_picture_ptr =
1564 s->next_picture_ptr =
1565 s->current_picture_ptr = NULL;
1566 s->linesize = s->uvlinesize = 0;
1569 av_cold void ff_init_rl(RLTable *rl,
1570 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1572 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1573 uint8_t index_run[MAX_RUN + 1];
1574 int last, run, level, start, end, i;
1576 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1577 if (static_store && rl->max_level[0])
1580 /* compute max_level[], max_run[] and index_run[] */
1581 for (last = 0; last < 2; last++) {
1590 memset(max_level, 0, MAX_RUN + 1);
1591 memset(max_run, 0, MAX_LEVEL + 1);
1592 memset(index_run, rl->n, MAX_RUN + 1);
1593 for (i = start; i < end; i++) {
1594 run = rl->table_run[i];
1595 level = rl->table_level[i];
1596 if (index_run[run] == rl->n)
1598 if (level > max_level[run])
1599 max_level[run] = level;
1600 if (run > max_run[level])
1601 max_run[level] = run;
1604 rl->max_level[last] = static_store[last];
1606 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1607 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1609 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1611 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1612 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1614 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1616 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1617 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1621 av_cold void ff_init_vlc_rl(RLTable *rl, unsigned static_size)
1624 VLC_TYPE table[1500][2] = {{0}};
1625 VLC vlc = { .table = table, .table_allocated = static_size };
1626 av_assert0(static_size <= FF_ARRAY_ELEMS(table));
1627 init_vlc(&vlc, 9, rl->n + 1, &rl->table_vlc[0][1], 4, 2, &rl->table_vlc[0][0], 4, 2, INIT_VLC_USE_NEW_STATIC);
1629 for (q = 0; q < 32; q++) {
1631 int qadd = (q - 1) | 1;
1637 for (i = 0; i < vlc.table_size; i++) {
1638 int code = vlc.table[i][0];
1639 int len = vlc.table[i][1];
1642 if (len == 0) { // illegal code
1645 } else if (len < 0) { // more bits needed
1649 if (code == rl->n) { // esc
1653 run = rl->table_run[code] + 1;
1654 level = rl->table_level[code] * qmul + qadd;
1655 if (code >= rl->last) run += 192;
1658 rl->rl_vlc[q][i].len = len;
1659 rl->rl_vlc[q][i].level = level;
1660 rl->rl_vlc[q][i].run = run;
1665 static void release_unused_pictures(MpegEncContext *s)
1669 /* release non reference frames */
1670 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1671 if (!s->picture[i].reference)
1672 ff_mpeg_unref_picture(s, &s->picture[i]);
1676 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1678 if (pic == s->last_picture_ptr)
1680 if (!pic->f->buf[0])
1682 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1687 static int find_unused_picture(MpegEncContext *s, int shared)
1692 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1693 if (!s->picture[i].f->buf[0] && &s->picture[i] != s->last_picture_ptr)
1697 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1698 if (pic_is_unused(s, &s->picture[i]))
1703 av_log(s->avctx, AV_LOG_FATAL,
1704 "Internal error, picture buffer overflow\n");
1705 /* We could return -1, but the codec would crash trying to draw into a
1706 * non-existing frame anyway. This is safer than waiting for a random crash.
1707 * Also the return of this is never useful, an encoder must only allocate
1708 * as much as allowed in the specification. This has no relationship to how
1709 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1710 * enough for such valid streams).
1711 * Plus, a decoder has to check stream validity and remove frames if too
1712 * many reference frames are around. Waiting for "OOM" is not correct at
1713 * all. Similarly, missing reference frames have to be replaced by
1714 * interpolated/MC frames, anything else is a bug in the codec ...
1720 int ff_find_unused_picture(MpegEncContext *s, int shared)
1722 int ret = find_unused_picture(s, shared);
1724 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1725 if (s->picture[ret].needs_realloc) {
1726 s->picture[ret].needs_realloc = 0;
1727 ff_free_picture_tables(&s->picture[ret]);
1728 ff_mpeg_unref_picture(s, &s->picture[ret]);
1734 static void gray_frame(AVFrame *frame)
1736 int i, h_chroma_shift, v_chroma_shift;
1738 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1740 for(i=0; i<frame->height; i++)
1741 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1742 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1743 memset(frame->data[1] + frame->linesize[1]*i,
1744 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1745 memset(frame->data[2] + frame->linesize[2]*i,
1746 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1751 * generic function called after decoding
1752 * the header and before a frame is decoded.
1754 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1760 if (!ff_thread_can_start_frame(avctx)) {
1761 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1765 /* mark & release old frames */
1766 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1767 s->last_picture_ptr != s->next_picture_ptr &&
1768 s->last_picture_ptr->f->buf[0]) {
1769 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1772 /* release forgotten pictures */
1773 /* if (mpeg124/h263) */
1774 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1775 if (&s->picture[i] != s->last_picture_ptr &&
1776 &s->picture[i] != s->next_picture_ptr &&
1777 s->picture[i].reference && !s->picture[i].needs_realloc) {
1778 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1779 av_log(avctx, AV_LOG_ERROR,
1780 "releasing zombie picture\n");
1781 ff_mpeg_unref_picture(s, &s->picture[i]);
1785 ff_mpeg_unref_picture(s, &s->current_picture);
1787 release_unused_pictures(s);
1789 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1790 // we already have a unused image
1791 // (maybe it was set before reading the header)
1792 pic = s->current_picture_ptr;
1794 i = ff_find_unused_picture(s, 0);
1796 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1799 pic = &s->picture[i];
1803 if (!s->droppable) {
1804 if (s->pict_type != AV_PICTURE_TYPE_B)
1808 pic->f->coded_picture_number = s->coded_picture_number++;
1810 if (ff_alloc_picture(s, pic, 0) < 0)
1813 s->current_picture_ptr = pic;
1814 // FIXME use only the vars from current_pic
1815 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1816 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1817 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1818 if (s->picture_structure != PICT_FRAME)
1819 s->current_picture_ptr->f->top_field_first =
1820 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1822 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1823 !s->progressive_sequence;
1824 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1826 s->current_picture_ptr->f->pict_type = s->pict_type;
1827 // if (s->flags && CODEC_FLAG_QSCALE)
1828 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1829 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1831 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1832 s->current_picture_ptr)) < 0)
1835 if (s->pict_type != AV_PICTURE_TYPE_B) {
1836 s->last_picture_ptr = s->next_picture_ptr;
1838 s->next_picture_ptr = s->current_picture_ptr;
1840 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1841 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1842 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1843 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1844 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1845 s->pict_type, s->droppable);
1847 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1848 (s->pict_type != AV_PICTURE_TYPE_I ||
1849 s->picture_structure != PICT_FRAME)) {
1850 int h_chroma_shift, v_chroma_shift;
1851 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1852 &h_chroma_shift, &v_chroma_shift);
1853 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1854 av_log(avctx, AV_LOG_DEBUG,
1855 "allocating dummy last picture for B frame\n");
1856 else if (s->pict_type != AV_PICTURE_TYPE_I)
1857 av_log(avctx, AV_LOG_ERROR,
1858 "warning: first frame is no keyframe\n");
1859 else if (s->picture_structure != PICT_FRAME)
1860 av_log(avctx, AV_LOG_DEBUG,
1861 "allocate dummy last picture for field based first keyframe\n");
1863 /* Allocate a dummy frame */
1864 i = ff_find_unused_picture(s, 0);
1866 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1869 s->last_picture_ptr = &s->picture[i];
1871 s->last_picture_ptr->reference = 3;
1872 s->last_picture_ptr->f->key_frame = 0;
1873 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1875 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1876 s->last_picture_ptr = NULL;
1880 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1881 for(i=0; i<avctx->height; i++)
1882 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1883 0x80, avctx->width);
1884 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1885 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1886 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1887 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1888 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1891 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1892 for(i=0; i<avctx->height; i++)
1893 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1897 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1898 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1900 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1901 s->pict_type == AV_PICTURE_TYPE_B) {
1902 /* Allocate a dummy frame */
1903 i = ff_find_unused_picture(s, 0);
1905 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1908 s->next_picture_ptr = &s->picture[i];
1910 s->next_picture_ptr->reference = 3;
1911 s->next_picture_ptr->f->key_frame = 0;
1912 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1914 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1915 s->next_picture_ptr = NULL;
1918 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1919 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1922 #if 0 // BUFREF-FIXME
1923 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1924 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1926 if (s->last_picture_ptr) {
1927 ff_mpeg_unref_picture(s, &s->last_picture);
1928 if (s->last_picture_ptr->f->buf[0] &&
1929 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1930 s->last_picture_ptr)) < 0)
1933 if (s->next_picture_ptr) {
1934 ff_mpeg_unref_picture(s, &s->next_picture);
1935 if (s->next_picture_ptr->f->buf[0] &&
1936 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1937 s->next_picture_ptr)) < 0)
1941 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1942 s->last_picture_ptr->f->buf[0]));
1944 if (s->picture_structure!= PICT_FRAME) {
1946 for (i = 0; i < 4; i++) {
1947 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1948 s->current_picture.f->data[i] +=
1949 s->current_picture.f->linesize[i];
1951 s->current_picture.f->linesize[i] *= 2;
1952 s->last_picture.f->linesize[i] *= 2;
1953 s->next_picture.f->linesize[i] *= 2;
1957 s->err_recognition = avctx->err_recognition;
1959 /* set dequantizer, we can't do it during init as
1960 * it might change for mpeg4 and we can't do it in the header
1961 * decode as init is not called for mpeg4 there yet */
1962 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1963 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1964 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1965 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1966 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1967 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1969 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1970 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1973 if (s->avctx->debug & FF_DEBUG_NOMC) {
1974 gray_frame(s->current_picture_ptr->f);
1980 /* called after a frame has been decoded. */
1981 void ff_mpv_frame_end(MpegEncContext *s)
1985 if (s->current_picture.reference)
1986 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1991 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
1994 return clip_line(ex, ey, sx, sy, maxx);
1999 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
2006 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
2014 * Draw a line from (ex, ey) -> (sx, sy).
2015 * @param w width of the image
2016 * @param h height of the image
2017 * @param stride stride/linesize of the image
2018 * @param color color of the arrow
2020 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2021 int w, int h, int stride, int color)
2025 if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2027 if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2030 sx = av_clip(sx, 0, w - 1);
2031 sy = av_clip(sy, 0, h - 1);
2032 ex = av_clip(ex, 0, w - 1);
2033 ey = av_clip(ey, 0, h - 1);
2035 buf[sy * stride + sx] += color;
2037 if (FFABS(ex - sx) > FFABS(ey - sy)) {
2039 FFSWAP(int, sx, ex);
2040 FFSWAP(int, sy, ey);
2042 buf += sx + sy * stride;
2044 f = ((ey - sy) << 16) / ex;
2045 for (x = 0; x <= ex; x++) {
2047 fr = (x * f) & 0xFFFF;
2048 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2049 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2053 FFSWAP(int, sx, ex);
2054 FFSWAP(int, sy, ey);
2056 buf += sx + sy * stride;
2059 f = ((ex - sx) << 16) / ey;
2062 for(y= 0; y <= ey; y++){
2064 fr = (y*f) & 0xFFFF;
2065 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2066 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2072 * Draw an arrow from (ex, ey) -> (sx, sy).
2073 * @param w width of the image
2074 * @param h height of the image
2075 * @param stride stride/linesize of the image
2076 * @param color color of the arrow
2078 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2079 int ey, int w, int h, int stride, int color, int tail, int direction)
2084 FFSWAP(int, sx, ex);
2085 FFSWAP(int, sy, ey);
2088 sx = av_clip(sx, -100, w + 100);
2089 sy = av_clip(sy, -100, h + 100);
2090 ex = av_clip(ex, -100, w + 100);
2091 ey = av_clip(ey, -100, h + 100);
2096 if (dx * dx + dy * dy > 3 * 3) {
2099 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2101 // FIXME subpixel accuracy
2102 rx = ROUNDED_DIV(rx * 3 << 4, length);
2103 ry = ROUNDED_DIV(ry * 3 << 4, length);
2110 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2111 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2113 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2117 static int add_mb(AVMotionVector *mb, uint32_t mb_type,
2118 int dst_x, int dst_y,
2119 int src_x, int src_y,
2122 if (dst_x == src_x && dst_y == src_y)
2124 mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
2125 mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
2130 mb->source = direction ? 1 : -1;
2131 mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
2136 * Print debugging info for the given picture.
2138 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2139 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2141 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2143 if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
2144 const int shift = 1 + quarter_sample;
2145 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2146 const int mv_stride = (mb_width << mv_sample_log2) +
2147 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2148 int mb_x, mb_y, mbcount = 0;
2150 /* size is width * height * 2 * 4 where 2 is for directions and 4 is
2151 * for the maximum number of MB (4 MB in case of IS_8x8) */
2152 AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
2156 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2157 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2158 int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2159 for (direction = 0; direction < 2; direction++) {
2160 if (!USES_LIST(mb_type, direction))
2162 if (IS_8X8(mb_type)) {
2163 for (i = 0; i < 4; i++) {
2164 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2165 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2166 int xy = (mb_x * 2 + (i & 1) +
2167 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2168 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2169 int my = (motion_val[direction][xy][1] >> shift) + sy;
2170 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2172 } else if (IS_16X8(mb_type)) {
2173 for (i = 0; i < 2; i++) {
2174 int sx = mb_x * 16 + 8;
2175 int sy = mb_y * 16 + 4 + 8 * i;
2176 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2177 int mx = (motion_val[direction][xy][0] >> shift);
2178 int my = (motion_val[direction][xy][1] >> shift);
2180 if (IS_INTERLACED(mb_type))
2183 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2185 } else if (IS_8X16(mb_type)) {
2186 for (i = 0; i < 2; i++) {
2187 int sx = mb_x * 16 + 4 + 8 * i;
2188 int sy = mb_y * 16 + 8;
2189 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2190 int mx = motion_val[direction][xy][0] >> shift;
2191 int my = motion_val[direction][xy][1] >> shift;
2193 if (IS_INTERLACED(mb_type))
2196 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2199 int sx = mb_x * 16 + 8;
2200 int sy = mb_y * 16 + 8;
2201 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2202 int mx = (motion_val[direction][xy][0]>>shift) + sx;
2203 int my = (motion_val[direction][xy][1]>>shift) + sy;
2204 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2211 AVFrameSideData *sd;
2213 av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
2214 sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
2217 memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
2223 /* TODO: export all the following to make them accessible for users (and filters) */
2224 if (avctx->hwaccel || !mbtype_table
2225 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2229 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2232 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2233 av_get_picture_type_char(pict->pict_type));
2234 for (y = 0; y < mb_height; y++) {
2235 for (x = 0; x < mb_width; x++) {
2236 if (avctx->debug & FF_DEBUG_SKIP) {
2237 int count = mbskip_table[x + y * mb_stride];
2240 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2242 if (avctx->debug & FF_DEBUG_QP) {
2243 av_log(avctx, AV_LOG_DEBUG, "%2d",
2244 qscale_table[x + y * mb_stride]);
2246 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2247 int mb_type = mbtype_table[x + y * mb_stride];
2248 // Type & MV direction
2249 if (IS_PCM(mb_type))
2250 av_log(avctx, AV_LOG_DEBUG, "P");
2251 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2252 av_log(avctx, AV_LOG_DEBUG, "A");
2253 else if (IS_INTRA4x4(mb_type))
2254 av_log(avctx, AV_LOG_DEBUG, "i");
2255 else if (IS_INTRA16x16(mb_type))
2256 av_log(avctx, AV_LOG_DEBUG, "I");
2257 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2258 av_log(avctx, AV_LOG_DEBUG, "d");
2259 else if (IS_DIRECT(mb_type))
2260 av_log(avctx, AV_LOG_DEBUG, "D");
2261 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2262 av_log(avctx, AV_LOG_DEBUG, "g");
2263 else if (IS_GMC(mb_type))
2264 av_log(avctx, AV_LOG_DEBUG, "G");
2265 else if (IS_SKIP(mb_type))
2266 av_log(avctx, AV_LOG_DEBUG, "S");
2267 else if (!USES_LIST(mb_type, 1))
2268 av_log(avctx, AV_LOG_DEBUG, ">");
2269 else if (!USES_LIST(mb_type, 0))
2270 av_log(avctx, AV_LOG_DEBUG, "<");
2272 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2273 av_log(avctx, AV_LOG_DEBUG, "X");
2277 if (IS_8X8(mb_type))
2278 av_log(avctx, AV_LOG_DEBUG, "+");
2279 else if (IS_16X8(mb_type))
2280 av_log(avctx, AV_LOG_DEBUG, "-");
2281 else if (IS_8X16(mb_type))
2282 av_log(avctx, AV_LOG_DEBUG, "|");
2283 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2284 av_log(avctx, AV_LOG_DEBUG, " ");
2286 av_log(avctx, AV_LOG_DEBUG, "?");
2289 if (IS_INTERLACED(mb_type))
2290 av_log(avctx, AV_LOG_DEBUG, "=");
2292 av_log(avctx, AV_LOG_DEBUG, " ");
2295 av_log(avctx, AV_LOG_DEBUG, "\n");
2299 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2300 (avctx->debug_mv)) {
2303 int h_chroma_shift, v_chroma_shift, block_height;
2305 const int shift = 1 + quarter_sample;
2307 const int width = avctx->width;
2308 const int height = avctx->height;
2310 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2311 const int mv_stride = (mb_width << mv_sample_log2) +
2312 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2314 *low_delay = 0; // needed to see the vectors without trashing the buffers
2316 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2318 av_frame_make_writable(pict);
2320 pict->opaque = NULL;
2322 ptr = pict->data[0];
2324 block_height = 16 >> v_chroma_shift;
2326 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2328 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2329 const int mb_index = mb_x + mb_y * mb_stride;
2331 if ((avctx->debug_mv) && motion_val[0]) {
2333 for (type = 0; type < 3; type++) {
2337 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2338 (pict->pict_type!= AV_PICTURE_TYPE_P))
2343 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2344 (pict->pict_type!= AV_PICTURE_TYPE_B))
2349 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2350 (pict->pict_type!= AV_PICTURE_TYPE_B))
2355 if (!USES_LIST(mbtype_table[mb_index], direction))
2358 if (IS_8X8(mbtype_table[mb_index])) {
2360 for (i = 0; i < 4; i++) {
2361 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2362 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2363 int xy = (mb_x * 2 + (i & 1) +
2364 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2365 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2366 int my = (motion_val[direction][xy][1] >> shift) + sy;
2367 draw_arrow(ptr, sx, sy, mx, my, width,
2368 height, pict->linesize[0], 100, 0, direction);
2370 } else if (IS_16X8(mbtype_table[mb_index])) {
2372 for (i = 0; i < 2; i++) {
2373 int sx = mb_x * 16 + 8;
2374 int sy = mb_y * 16 + 4 + 8 * i;
2375 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2376 int mx = (motion_val[direction][xy][0] >> shift);
2377 int my = (motion_val[direction][xy][1] >> shift);
2379 if (IS_INTERLACED(mbtype_table[mb_index]))
2382 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2383 height, pict->linesize[0], 100, 0, direction);
2385 } else if (IS_8X16(mbtype_table[mb_index])) {
2387 for (i = 0; i < 2; i++) {
2388 int sx = mb_x * 16 + 4 + 8 * i;
2389 int sy = mb_y * 16 + 8;
2390 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2391 int mx = motion_val[direction][xy][0] >> shift;
2392 int my = motion_val[direction][xy][1] >> shift;
2394 if (IS_INTERLACED(mbtype_table[mb_index]))
2397 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2398 height, pict->linesize[0], 100, 0, direction);
2401 int sx= mb_x * 16 + 8;
2402 int sy= mb_y * 16 + 8;
2403 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2404 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2405 int my= (motion_val[direction][xy][1]>>shift) + sy;
2406 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2411 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2412 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2413 0x0101010101010101ULL;
2415 for (y = 0; y < block_height; y++) {
2416 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2417 (block_height * mb_y + y) *
2418 pict->linesize[1]) = c;
2419 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2420 (block_height * mb_y + y) *
2421 pict->linesize[2]) = c;
2424 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2426 int mb_type = mbtype_table[mb_index];
2429 #define COLOR(theta, r) \
2430 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2431 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2435 if (IS_PCM(mb_type)) {
2437 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2438 IS_INTRA16x16(mb_type)) {
2440 } else if (IS_INTRA4x4(mb_type)) {
2442 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2444 } else if (IS_DIRECT(mb_type)) {
2446 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2448 } else if (IS_GMC(mb_type)) {
2450 } else if (IS_SKIP(mb_type)) {
2452 } else if (!USES_LIST(mb_type, 1)) {
2454 } else if (!USES_LIST(mb_type, 0)) {
2457 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2461 u *= 0x0101010101010101ULL;
2462 v *= 0x0101010101010101ULL;
2463 for (y = 0; y < block_height; y++) {
2464 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2465 (block_height * mb_y + y) * pict->linesize[1]) = u;
2466 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2467 (block_height * mb_y + y) * pict->linesize[2]) = v;
2471 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2472 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2473 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2474 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2475 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2477 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2478 for (y = 0; y < 16; y++)
2479 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2480 pict->linesize[0]] ^= 0x80;
2482 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2483 int dm = 1 << (mv_sample_log2 - 2);
2484 for (i = 0; i < 4; i++) {
2485 int sx = mb_x * 16 + 8 * (i & 1);
2486 int sy = mb_y * 16 + 8 * (i >> 1);
2487 int xy = (mb_x * 2 + (i & 1) +
2488 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2490 int32_t *mv = (int32_t *) &motion_val[0][xy];
2491 if (mv[0] != mv[dm] ||
2492 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2493 for (y = 0; y < 8; y++)
2494 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2495 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2496 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2497 pict->linesize[0]) ^= 0x8080808080808080ULL;
2501 if (IS_INTERLACED(mb_type) &&
2502 avctx->codec->id == AV_CODEC_ID_H264) {
2506 mbskip_table[mb_index] = 0;
2512 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2514 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2515 p->qscale_table, p->motion_val, &s->low_delay,
2516 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2519 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2521 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2522 int offset = 2*s->mb_stride + 1;
2524 return AVERROR(ENOMEM);
2525 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2526 ref->size -= offset;
2527 ref->data += offset;
2528 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2531 static inline int hpel_motion_lowres(MpegEncContext *s,
2532 uint8_t *dest, uint8_t *src,
2533 int field_based, int field_select,
2534 int src_x, int src_y,
2535 int width, int height, ptrdiff_t stride,
2536 int h_edge_pos, int v_edge_pos,
2537 int w, int h, h264_chroma_mc_func *pix_op,
2538 int motion_x, int motion_y)
2540 const int lowres = s->avctx->lowres;
2541 const int op_index = FFMIN(lowres, 3);
2542 const int s_mask = (2 << lowres) - 1;
2546 if (s->quarter_sample) {
2551 sx = motion_x & s_mask;
2552 sy = motion_y & s_mask;
2553 src_x += motion_x >> lowres + 1;
2554 src_y += motion_y >> lowres + 1;
2556 src += src_y * stride + src_x;
2558 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2559 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2560 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2561 s->linesize, s->linesize,
2562 w + 1, (h + 1) << field_based,
2563 src_x, src_y << field_based,
2564 h_edge_pos, v_edge_pos);
2565 src = s->edge_emu_buffer;
2569 sx = (sx << 2) >> lowres;
2570 sy = (sy << 2) >> lowres;
2573 pix_op[op_index](dest, src, stride, h, sx, sy);
2577 /* apply one mpeg motion vector to the three components */
2578 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2585 uint8_t **ref_picture,
2586 h264_chroma_mc_func *pix_op,
2587 int motion_x, int motion_y,
2590 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2591 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2592 ptrdiff_t uvlinesize, linesize;
2593 const int lowres = s->avctx->lowres;
2594 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2595 const int block_s = 8>>lowres;
2596 const int s_mask = (2 << lowres) - 1;
2597 const int h_edge_pos = s->h_edge_pos >> lowres;
2598 const int v_edge_pos = s->v_edge_pos >> lowres;
2599 linesize = s->current_picture.f->linesize[0] << field_based;
2600 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2602 // FIXME obviously not perfect but qpel will not work in lowres anyway
2603 if (s->quarter_sample) {
2609 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2612 sx = motion_x & s_mask;
2613 sy = motion_y & s_mask;
2614 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2615 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2617 if (s->out_format == FMT_H263) {
2618 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2619 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2620 uvsrc_x = src_x >> 1;
2621 uvsrc_y = src_y >> 1;
2622 } else if (s->out_format == FMT_H261) {
2623 // even chroma mv's are full pel in H261
2626 uvsx = (2 * mx) & s_mask;
2627 uvsy = (2 * my) & s_mask;
2628 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2629 uvsrc_y = mb_y * block_s + (my >> lowres);
2631 if(s->chroma_y_shift){
2636 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2637 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2639 if(s->chroma_x_shift){
2643 uvsy = motion_y & s_mask;
2645 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2648 uvsx = motion_x & s_mask;
2649 uvsy = motion_y & s_mask;
2656 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2657 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2658 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2660 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2661 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2662 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2663 linesize >> field_based, linesize >> field_based,
2664 17, 17 + field_based,
2665 src_x, src_y << field_based, h_edge_pos,
2667 ptr_y = s->edge_emu_buffer;
2668 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2669 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2670 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2671 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2672 uvlinesize >> field_based, uvlinesize >> field_based,
2674 uvsrc_x, uvsrc_y << field_based,
2675 h_edge_pos >> 1, v_edge_pos >> 1);
2676 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2677 uvlinesize >> field_based,uvlinesize >> field_based,
2679 uvsrc_x, uvsrc_y << field_based,
2680 h_edge_pos >> 1, v_edge_pos >> 1);
2686 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2688 dest_y += s->linesize;
2689 dest_cb += s->uvlinesize;
2690 dest_cr += s->uvlinesize;
2694 ptr_y += s->linesize;
2695 ptr_cb += s->uvlinesize;
2696 ptr_cr += s->uvlinesize;
2699 sx = (sx << 2) >> lowres;
2700 sy = (sy << 2) >> lowres;
2701 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2703 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2704 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2705 uvsx = (uvsx << 2) >> lowres;
2706 uvsy = (uvsy << 2) >> lowres;
2708 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2709 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2712 // FIXME h261 lowres loop filter
2715 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2716 uint8_t *dest_cb, uint8_t *dest_cr,
2717 uint8_t **ref_picture,
2718 h264_chroma_mc_func * pix_op,
2721 const int lowres = s->avctx->lowres;
2722 const int op_index = FFMIN(lowres, 3);
2723 const int block_s = 8 >> lowres;
2724 const int s_mask = (2 << lowres) - 1;
2725 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2726 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2727 int emu = 0, src_x, src_y, sx, sy;
2731 if (s->quarter_sample) {
2736 /* In case of 8X8, we construct a single chroma motion vector
2737 with a special rounding */
2738 mx = ff_h263_round_chroma(mx);
2739 my = ff_h263_round_chroma(my);
2743 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2744 src_y = s->mb_y * block_s + (my >> lowres + 1);
2746 offset = src_y * s->uvlinesize + src_x;
2747 ptr = ref_picture[1] + offset;
2748 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2749 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2750 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2751 s->uvlinesize, s->uvlinesize,
2753 src_x, src_y, h_edge_pos, v_edge_pos);
2754 ptr = s->edge_emu_buffer;
2757 sx = (sx << 2) >> lowres;
2758 sy = (sy << 2) >> lowres;
2759 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2761 ptr = ref_picture[2] + offset;
2763 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2764 s->uvlinesize, s->uvlinesize,
2766 src_x, src_y, h_edge_pos, v_edge_pos);
2767 ptr = s->edge_emu_buffer;
2769 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2773 * motion compensation of a single macroblock
2775 * @param dest_y luma destination pointer
2776 * @param dest_cb chroma cb/u destination pointer
2777 * @param dest_cr chroma cr/v destination pointer
2778 * @param dir direction (0->forward, 1->backward)
2779 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2780 * @param pix_op halfpel motion compensation function (average or put normally)
2781 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2783 static inline void MPV_motion_lowres(MpegEncContext *s,
2784 uint8_t *dest_y, uint8_t *dest_cb,
2786 int dir, uint8_t **ref_picture,
2787 h264_chroma_mc_func *pix_op)
2791 const int lowres = s->avctx->lowres;
2792 const int block_s = 8 >>lowres;
2797 switch (s->mv_type) {
2799 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2801 ref_picture, pix_op,
2802 s->mv[dir][0][0], s->mv[dir][0][1],
2808 for (i = 0; i < 4; i++) {
2809 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2810 s->linesize) * block_s,
2811 ref_picture[0], 0, 0,
2812 (2 * mb_x + (i & 1)) * block_s,
2813 (2 * mb_y + (i >> 1)) * block_s,
2814 s->width, s->height, s->linesize,
2815 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2816 block_s, block_s, pix_op,
2817 s->mv[dir][i][0], s->mv[dir][i][1]);
2819 mx += s->mv[dir][i][0];
2820 my += s->mv[dir][i][1];
2823 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2824 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2828 if (s->picture_structure == PICT_FRAME) {
2830 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2831 1, 0, s->field_select[dir][0],
2832 ref_picture, pix_op,
2833 s->mv[dir][0][0], s->mv[dir][0][1],
2836 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2837 1, 1, s->field_select[dir][1],
2838 ref_picture, pix_op,
2839 s->mv[dir][1][0], s->mv[dir][1][1],
2842 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2843 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2844 ref_picture = s->current_picture_ptr->f->data;
2847 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2848 0, 0, s->field_select[dir][0],
2849 ref_picture, pix_op,
2851 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2855 for (i = 0; i < 2; i++) {
2856 uint8_t **ref2picture;
2858 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2859 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2860 ref2picture = ref_picture;
2862 ref2picture = s->current_picture_ptr->f->data;
2865 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2866 0, 0, s->field_select[dir][i],
2867 ref2picture, pix_op,
2868 s->mv[dir][i][0], s->mv[dir][i][1] +
2869 2 * block_s * i, block_s, mb_y >> 1);
2871 dest_y += 2 * block_s * s->linesize;
2872 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2873 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2877 if (s->picture_structure == PICT_FRAME) {
2878 for (i = 0; i < 2; i++) {
2880 for (j = 0; j < 2; j++) {
2881 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2883 ref_picture, pix_op,
2884 s->mv[dir][2 * i + j][0],
2885 s->mv[dir][2 * i + j][1],
2888 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2891 for (i = 0; i < 2; i++) {
2892 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2893 0, 0, s->picture_structure != i + 1,
2894 ref_picture, pix_op,
2895 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2896 2 * block_s, mb_y >> 1);
2898 // after put we make avg of the same block
2899 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2901 // opposite parity is always in the same
2902 // frame if this is second field
2903 if (!s->first_field) {
2904 ref_picture = s->current_picture_ptr->f->data;
2915 * find the lowest MB row referenced in the MVs
2917 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2919 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2920 int my, off, i, mvs;
2922 if (s->picture_structure != PICT_FRAME || s->mcsel)
2925 switch (s->mv_type) {
2939 for (i = 0; i < mvs; i++) {
2940 my = s->mv[dir][i][1]<<qpel_shift;
2941 my_max = FFMAX(my_max, my);
2942 my_min = FFMIN(my_min, my);
2945 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2947 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2949 return s->mb_height-1;
2952 /* put block[] to dest[] */
2953 static inline void put_dct(MpegEncContext *s,
2954 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2956 s->dct_unquantize_intra(s, block, i, qscale);
2957 s->idsp.idct_put(dest, line_size, block);
2960 /* add block[] to dest[] */
2961 static inline void add_dct(MpegEncContext *s,
2962 int16_t *block, int i, uint8_t *dest, int line_size)
2964 if (s->block_last_index[i] >= 0) {
2965 s->idsp.idct_add(dest, line_size, block);
2969 static inline void add_dequant_dct(MpegEncContext *s,
2970 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2972 if (s->block_last_index[i] >= 0) {
2973 s->dct_unquantize_inter(s, block, i, qscale);
2975 s->idsp.idct_add(dest, line_size, block);
2980 * Clean dc, ac, coded_block for the current non-intra MB.
2982 void ff_clean_intra_table_entries(MpegEncContext *s)
2984 int wrap = s->b8_stride;
2985 int xy = s->block_index[0];
2988 s->dc_val[0][xy + 1 ] =
2989 s->dc_val[0][xy + wrap] =
2990 s->dc_val[0][xy + 1 + wrap] = 1024;
2992 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2993 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2994 if (s->msmpeg4_version>=3) {
2995 s->coded_block[xy ] =
2996 s->coded_block[xy + 1 ] =
2997 s->coded_block[xy + wrap] =
2998 s->coded_block[xy + 1 + wrap] = 0;
3001 wrap = s->mb_stride;
3002 xy = s->mb_x + s->mb_y * wrap;
3004 s->dc_val[2][xy] = 1024;
3006 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
3007 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
3009 s->mbintra_table[xy]= 0;
3012 /* generic function called after a macroblock has been parsed by the
3013 decoder or after it has been encoded by the encoder.
3015 Important variables used:
3016 s->mb_intra : true if intra macroblock
3017 s->mv_dir : motion vector direction
3018 s->mv_type : motion vector type
3019 s->mv : motion vector
3020 s->interlaced_dct : true if interlaced dct used (mpeg2)
3022 static av_always_inline
3023 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
3024 int lowres_flag, int is_mpeg12)
3026 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
3029 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
3030 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
3034 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
3035 /* print DCT coefficients */
3037 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
3039 for(j=0; j<64; j++){
3040 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
3041 block[i][s->idsp.idct_permutation[j]]);
3043 av_log(s->avctx, AV_LOG_DEBUG, "\n");
3047 s->current_picture.qscale_table[mb_xy] = s->qscale;
3049 /* update DC predictors for P macroblocks */
3051 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
3052 if(s->mbintra_table[mb_xy])
3053 ff_clean_intra_table_entries(s);
3057 s->last_dc[2] = 128 << s->intra_dc_precision;
3060 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
3061 s->mbintra_table[mb_xy]=1;
3063 if ( (s->flags&CODEC_FLAG_PSNR)
3064 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
3065 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
3066 uint8_t *dest_y, *dest_cb, *dest_cr;
3067 int dct_linesize, dct_offset;
3068 op_pixels_func (*op_pix)[4];
3069 qpel_mc_func (*op_qpix)[16];
3070 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3071 const int uvlinesize = s->current_picture.f->linesize[1];
3072 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
3073 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
3075 /* avoid copy if macroblock skipped in last frame too */
3076 /* skip only during decoding as we might trash the buffers during encoding a bit */
3078 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
3080 if (s->mb_skipped) {
3082 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
3084 } else if(!s->current_picture.reference) {
3087 *mbskip_ptr = 0; /* not skipped */
3091 dct_linesize = linesize << s->interlaced_dct;
3092 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
3096 dest_cb= s->dest[1];
3097 dest_cr= s->dest[2];
3099 dest_y = s->b_scratchpad;
3100 dest_cb= s->b_scratchpad+16*linesize;
3101 dest_cr= s->b_scratchpad+32*linesize;
3105 /* motion handling */
3106 /* decoding or more than one mb_type (MC was already done otherwise) */
3109 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
3110 if (s->mv_dir & MV_DIR_FORWARD) {
3111 ff_thread_await_progress(&s->last_picture_ptr->tf,
3112 ff_mpv_lowest_referenced_row(s, 0),
3115 if (s->mv_dir & MV_DIR_BACKWARD) {
3116 ff_thread_await_progress(&s->next_picture_ptr->tf,
3117 ff_mpv_lowest_referenced_row(s, 1),
3123 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
3125 if (s->mv_dir & MV_DIR_FORWARD) {
3126 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3127 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
3129 if (s->mv_dir & MV_DIR_BACKWARD) {
3130 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3133 op_qpix = s->me.qpel_put;
3134 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3135 op_pix = s->hdsp.put_pixels_tab;
3137 op_pix = s->hdsp.put_no_rnd_pixels_tab;
3139 if (s->mv_dir & MV_DIR_FORWARD) {
3140 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3141 op_pix = s->hdsp.avg_pixels_tab;
3142 op_qpix= s->me.qpel_avg;
3144 if (s->mv_dir & MV_DIR_BACKWARD) {
3145 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3150 /* skip dequant / idct if we are really late ;) */
3151 if(s->avctx->skip_idct){
3152 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
3153 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
3154 || s->avctx->skip_idct >= AVDISCARD_ALL)
3158 /* add dct residue */
3159 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3160 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3161 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3162 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3163 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3164 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3166 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3167 if (s->chroma_y_shift){
3168 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3169 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3173 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3174 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3175 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3176 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3179 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3180 add_dct(s, block[0], 0, dest_y , dct_linesize);
3181 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3182 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3183 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3185 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3186 if(s->chroma_y_shift){//Chroma420
3187 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3188 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3191 dct_linesize = uvlinesize << s->interlaced_dct;
3192 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3194 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3195 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3196 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3197 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3198 if(!s->chroma_x_shift){//Chroma444
3199 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3200 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3201 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3202 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3207 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3208 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3211 /* dct only in intra block */
3212 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3213 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3214 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3215 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3216 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3218 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3219 if(s->chroma_y_shift){
3220 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3221 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3225 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3226 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3227 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3228 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3232 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3233 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3234 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3235 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3237 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3238 if(s->chroma_y_shift){
3239 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3240 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3243 dct_linesize = uvlinesize << s->interlaced_dct;
3244 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3246 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3247 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3248 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3249 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3250 if(!s->chroma_x_shift){//Chroma444
3251 s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3252 s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3253 s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3254 s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3262 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3263 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3264 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3269 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
3272 if(s->out_format == FMT_MPEG1) {
3273 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
3274 else mpv_decode_mb_internal(s, block, 0, 1);
3277 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
3278 else mpv_decode_mb_internal(s, block, 0, 0);
3281 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3283 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3284 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3285 s->first_field, s->low_delay);
3288 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3289 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3290 const int uvlinesize = s->current_picture.f->linesize[1];
3291 const int mb_size= 4 - s->avctx->lowres;
3293 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3294 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3295 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3296 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3297 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3298 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3299 //block_index is not used by mpeg2, so it is not affected by chroma_format
3301 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3302 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3303 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3305 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3307 if(s->picture_structure==PICT_FRAME){
3308 s->dest[0] += s->mb_y * linesize << mb_size;
3309 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3310 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3312 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3313 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3314 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3315 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3321 * Permute an 8x8 block.
3322 * @param block the block which will be permuted according to the given permutation vector
3323 * @param permutation the permutation vector
3324 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3325 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3326 * (inverse) permutated to scantable order!
3328 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3334 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3336 for(i=0; i<=last; i++){
3337 const int j= scantable[i];
3342 for(i=0; i<=last; i++){
3343 const int j= scantable[i];
3344 const int perm_j= permutation[j];
3345 block[perm_j]= temp[j];
3349 void ff_mpeg_flush(AVCodecContext *avctx){
3351 MpegEncContext *s = avctx->priv_data;
3353 if (!s || !s->picture)
3356 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3357 ff_mpeg_unref_picture(s, &s->picture[i]);
3358 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3360 ff_mpeg_unref_picture(s, &s->current_picture);
3361 ff_mpeg_unref_picture(s, &s->last_picture);
3362 ff_mpeg_unref_picture(s, &s->next_picture);
3364 s->mb_x= s->mb_y= 0;
3367 s->parse_context.state= -1;
3368 s->parse_context.frame_start_found= 0;
3369 s->parse_context.overread= 0;
3370 s->parse_context.overread_index= 0;
3371 s->parse_context.index= 0;
3372 s->parse_context.last_index= 0;
3373 s->bitstream_buffer_size=0;
3378 * set qscale and update qscale dependent variables.
3380 void ff_set_qscale(MpegEncContext * s, int qscale)
3384 else if (qscale > 31)
3388 s->chroma_qscale= s->chroma_qscale_table[qscale];
3390 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3391 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3394 void ff_mpv_report_decode_progress(MpegEncContext *s)
3396 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3397 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);