2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/motion_vector.h"
35 #include "libavutil/timer.h"
38 #include "h264chroma.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
50 static const uint8_t ff_default_chroma_qscale_table[32] = {
51 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
53 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
56 const uint8_t ff_mpeg1_dc_scale_table[128] = {
57 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
68 static const uint8_t mpeg2_dc_scale_table1[128] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
80 static const uint8_t mpeg2_dc_scale_table2[128] = {
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 static const uint8_t mpeg2_dc_scale_table3[128] = {
93 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
105 ff_mpeg1_dc_scale_table,
106 mpeg2_dc_scale_table1,
107 mpeg2_dc_scale_table2,
108 mpeg2_dc_scale_table3,
111 const uint8_t ff_alternate_horizontal_scan[64] = {
112 0, 1, 2, 3, 8, 9, 16, 17,
113 10, 11, 4, 5, 6, 7, 15, 14,
114 13, 12, 19, 18, 24, 25, 32, 33,
115 26, 27, 20, 21, 22, 23, 28, 29,
116 30, 31, 34, 35, 40, 41, 48, 49,
117 42, 43, 36, 37, 38, 39, 44, 45,
118 46, 47, 50, 51, 56, 57, 58, 59,
119 52, 53, 54, 55, 60, 61, 62, 63,
122 const uint8_t ff_alternate_vertical_scan[64] = {
123 0, 8, 16, 24, 1, 9, 2, 10,
124 17, 25, 32, 40, 48, 56, 57, 49,
125 41, 33, 26, 18, 3, 11, 4, 12,
126 19, 27, 34, 42, 50, 58, 35, 43,
127 51, 59, 20, 28, 5, 13, 6, 14,
128 21, 29, 36, 44, 52, 60, 37, 45,
129 53, 61, 22, 30, 7, 15, 23, 31,
130 38, 46, 54, 62, 39, 47, 55, 63,
133 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
134 int16_t *block, int n, int qscale)
136 int i, level, nCoeffs;
137 const uint16_t *quant_matrix;
139 nCoeffs= s->block_last_index[n];
141 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
142 /* XXX: only mpeg1 */
143 quant_matrix = s->intra_matrix;
144 for(i=1;i<=nCoeffs;i++) {
145 int j= s->intra_scantable.permutated[i];
150 level = (int)(level * qscale * quant_matrix[j]) >> 3;
151 level = (level - 1) | 1;
154 level = (int)(level * qscale * quant_matrix[j]) >> 3;
155 level = (level - 1) | 1;
162 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
163 int16_t *block, int n, int qscale)
165 int i, level, nCoeffs;
166 const uint16_t *quant_matrix;
168 nCoeffs= s->block_last_index[n];
170 quant_matrix = s->inter_matrix;
171 for(i=0; i<=nCoeffs; i++) {
172 int j= s->intra_scantable.permutated[i];
177 level = (((level << 1) + 1) * qscale *
178 ((int) (quant_matrix[j]))) >> 4;
179 level = (level - 1) | 1;
182 level = (((level << 1) + 1) * qscale *
183 ((int) (quant_matrix[j]))) >> 4;
184 level = (level - 1) | 1;
191 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
192 int16_t *block, int n, int qscale)
194 int i, level, nCoeffs;
195 const uint16_t *quant_matrix;
197 if(s->alternate_scan) nCoeffs= 63;
198 else nCoeffs= s->block_last_index[n];
200 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
201 quant_matrix = s->intra_matrix;
202 for(i=1;i<=nCoeffs;i++) {
203 int j= s->intra_scantable.permutated[i];
208 level = (int)(level * qscale * quant_matrix[j]) >> 3;
211 level = (int)(level * qscale * quant_matrix[j]) >> 3;
218 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
219 int16_t *block, int n, int qscale)
221 int i, level, nCoeffs;
222 const uint16_t *quant_matrix;
225 if(s->alternate_scan) nCoeffs= 63;
226 else nCoeffs= s->block_last_index[n];
228 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
230 quant_matrix = s->intra_matrix;
231 for(i=1;i<=nCoeffs;i++) {
232 int j= s->intra_scantable.permutated[i];
237 level = (int)(level * qscale * quant_matrix[j]) >> 3;
240 level = (int)(level * qscale * quant_matrix[j]) >> 3;
249 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
250 int16_t *block, int n, int qscale)
252 int i, level, nCoeffs;
253 const uint16_t *quant_matrix;
256 if(s->alternate_scan) nCoeffs= 63;
257 else nCoeffs= s->block_last_index[n];
259 quant_matrix = s->inter_matrix;
260 for(i=0; i<=nCoeffs; i++) {
261 int j= s->intra_scantable.permutated[i];
266 level = (((level << 1) + 1) * qscale *
267 ((int) (quant_matrix[j]))) >> 4;
270 level = (((level << 1) + 1) * qscale *
271 ((int) (quant_matrix[j]))) >> 4;
280 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
281 int16_t *block, int n, int qscale)
283 int i, level, qmul, qadd;
286 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
291 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
292 qadd = (qscale - 1) | 1;
299 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
301 for(i=1; i<=nCoeffs; i++) {
305 level = level * qmul - qadd;
307 level = level * qmul + qadd;
314 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
315 int16_t *block, int n, int qscale)
317 int i, level, qmul, qadd;
320 av_assert2(s->block_last_index[n]>=0);
322 qadd = (qscale - 1) | 1;
325 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
327 for(i=0; i<=nCoeffs; i++) {
331 level = level * qmul - qadd;
333 level = level * qmul + qadd;
340 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
342 int mb_x, int mb_y, int mb_intra, int mb_skipped)
344 MpegEncContext *s = opaque;
347 s->mv_type = mv_type;
348 s->mb_intra = mb_intra;
349 s->mb_skipped = mb_skipped;
352 memcpy(s->mv, mv, sizeof(*mv));
354 ff_init_block_index(s);
355 ff_update_block_index(s);
357 s->bdsp.clear_blocks(s->block[0]);
359 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
360 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
364 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
365 ff_mpv_decode_mb(s, s->block);
368 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
371 memset(dst + h*linesize, 128, 16);
374 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
377 memset(dst + h*linesize, 128, 8);
380 /* init common dct for both encoder and decoder */
381 static av_cold int dct_init(MpegEncContext *s)
383 ff_blockdsp_init(&s->bdsp, s->avctx);
384 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
385 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
386 ff_me_cmp_init(&s->mecc, s->avctx);
387 ff_mpegvideodsp_init(&s->mdsp);
388 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
390 if (s->avctx->debug & FF_DEBUG_NOMC) {
392 for (i=0; i<4; i++) {
393 s->hdsp.avg_pixels_tab[0][i] = gray16;
394 s->hdsp.put_pixels_tab[0][i] = gray16;
395 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
397 s->hdsp.avg_pixels_tab[1][i] = gray8;
398 s->hdsp.put_pixels_tab[1][i] = gray8;
399 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
403 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
404 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
405 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
406 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
407 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
408 if (s->flags & CODEC_FLAG_BITEXACT)
409 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
410 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
412 if (HAVE_INTRINSICS_NEON)
413 ff_mpv_common_init_neon(s);
416 ff_mpv_common_init_axp(s);
418 ff_mpv_common_init_arm(s);
420 ff_mpv_common_init_ppc(s);
422 ff_mpv_common_init_x86(s);
427 av_cold void ff_mpv_idct_init(MpegEncContext *s)
429 ff_idctdsp_init(&s->idsp, s->avctx);
431 /* load & permutate scantables
432 * note: only wmv uses different ones
434 if (s->alternate_scan) {
435 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
436 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
438 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
439 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
441 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
442 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
445 static int frame_size_alloc(MpegEncContext *s, int linesize)
447 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
449 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
453 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
454 return AVERROR_PATCHWELCOME;
457 // edge emu needs blocksize + filter length - 1
458 // (= 17x17 for halfpel / 21x21 for h264)
459 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
460 // at uvlinesize. It supports only YUV420 so 24x24 is enough
461 // linesize * interlaced * MBsize
462 // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
463 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size, 4 * 68,
466 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size, 4 * 16 * 2,
468 s->me.temp = s->me.scratchpad;
469 s->rd_scratchpad = s->me.scratchpad;
470 s->b_scratchpad = s->me.scratchpad;
471 s->obmc_scratchpad = s->me.scratchpad + 16;
475 av_freep(&s->edge_emu_buffer);
476 return AVERROR(ENOMEM);
480 * Allocate a frame buffer
482 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
484 int edges_needed = av_codec_is_encoder(s->avctx->codec);
488 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
489 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
490 s->codec_id != AV_CODEC_ID_MSS2) {
492 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
493 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
496 r = ff_thread_get_buffer(s->avctx, &pic->tf,
497 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
499 pic->f->width = s->avctx->width;
500 pic->f->height = s->avctx->height;
501 pic->f->format = s->avctx->pix_fmt;
502 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
505 if (r < 0 || !pic->f->buf[0]) {
506 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
513 for (i = 0; pic->f->data[i]; i++) {
514 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
515 pic->f->linesize[i] +
516 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
517 pic->f->data[i] += offset;
519 pic->f->width = s->avctx->width;
520 pic->f->height = s->avctx->height;
523 if (s->avctx->hwaccel) {
524 assert(!pic->hwaccel_picture_private);
525 if (s->avctx->hwaccel->frame_priv_data_size) {
526 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
527 if (!pic->hwaccel_priv_buf) {
528 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
531 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
535 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
536 s->uvlinesize != pic->f->linesize[1])) {
537 av_log(s->avctx, AV_LOG_ERROR,
538 "get_buffer() failed (stride changed)\n");
539 ff_mpeg_unref_picture(s, pic);
543 if (pic->f->linesize[1] != pic->f->linesize[2]) {
544 av_log(s->avctx, AV_LOG_ERROR,
545 "get_buffer() failed (uv stride mismatch)\n");
546 ff_mpeg_unref_picture(s, pic);
550 if (!s->edge_emu_buffer &&
551 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
552 av_log(s->avctx, AV_LOG_ERROR,
553 "get_buffer() failed to allocate context scratch buffers.\n");
554 ff_mpeg_unref_picture(s, pic);
561 void ff_free_picture_tables(Picture *pic)
565 pic->alloc_mb_width =
566 pic->alloc_mb_height = 0;
568 av_buffer_unref(&pic->mb_var_buf);
569 av_buffer_unref(&pic->mc_mb_var_buf);
570 av_buffer_unref(&pic->mb_mean_buf);
571 av_buffer_unref(&pic->mbskip_table_buf);
572 av_buffer_unref(&pic->qscale_table_buf);
573 av_buffer_unref(&pic->mb_type_buf);
575 for (i = 0; i < 2; i++) {
576 av_buffer_unref(&pic->motion_val_buf[i]);
577 av_buffer_unref(&pic->ref_index_buf[i]);
581 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
583 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
584 const int mb_array_size = s->mb_stride * s->mb_height;
585 const int b8_array_size = s->b8_stride * s->mb_height * 2;
589 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
590 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
591 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
593 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
594 return AVERROR(ENOMEM);
597 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
598 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
599 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
600 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
601 return AVERROR(ENOMEM);
604 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv ||
605 (s->avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) {
606 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
607 int ref_index_size = 4 * mb_array_size;
609 for (i = 0; mv_size && i < 2; i++) {
610 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
611 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
612 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
613 return AVERROR(ENOMEM);
617 pic->alloc_mb_width = s->mb_width;
618 pic->alloc_mb_height = s->mb_height;
623 static int make_tables_writable(Picture *pic)
626 #define MAKE_WRITABLE(table) \
629 (ret = av_buffer_make_writable(&pic->table)) < 0)\
633 MAKE_WRITABLE(mb_var_buf);
634 MAKE_WRITABLE(mc_mb_var_buf);
635 MAKE_WRITABLE(mb_mean_buf);
636 MAKE_WRITABLE(mbskip_table_buf);
637 MAKE_WRITABLE(qscale_table_buf);
638 MAKE_WRITABLE(mb_type_buf);
640 for (i = 0; i < 2; i++) {
641 MAKE_WRITABLE(motion_val_buf[i]);
642 MAKE_WRITABLE(ref_index_buf[i]);
649 * Allocate a Picture.
650 * The pixels are allocated/set by calling get_buffer() if shared = 0
652 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
656 if (pic->qscale_table_buf)
657 if ( pic->alloc_mb_width != s->mb_width
658 || pic->alloc_mb_height != s->mb_height)
659 ff_free_picture_tables(pic);
662 av_assert0(pic->f->data[0]);
665 av_assert0(!pic->f->buf[0]);
667 if (alloc_frame_buffer(s, pic) < 0)
670 s->linesize = pic->f->linesize[0];
671 s->uvlinesize = pic->f->linesize[1];
674 if (!pic->qscale_table_buf)
675 ret = alloc_picture_tables(s, pic);
677 ret = make_tables_writable(pic);
682 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
683 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
684 pic->mb_mean = pic->mb_mean_buf->data;
687 pic->mbskip_table = pic->mbskip_table_buf->data;
688 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
689 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
691 if (pic->motion_val_buf[0]) {
692 for (i = 0; i < 2; i++) {
693 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
694 pic->ref_index[i] = pic->ref_index_buf[i]->data;
700 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
701 ff_mpeg_unref_picture(s, pic);
702 ff_free_picture_tables(pic);
703 return AVERROR(ENOMEM);
707 * Deallocate a picture.
709 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
711 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
714 /* WM Image / Screen codecs allocate internal buffers with different
715 * dimensions / colorspaces; ignore user-defined callbacks for these. */
716 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
717 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
718 s->codec_id != AV_CODEC_ID_MSS2)
719 ff_thread_release_buffer(s->avctx, &pic->tf);
721 av_frame_unref(pic->f);
723 av_buffer_unref(&pic->hwaccel_priv_buf);
725 if (pic->needs_realloc)
726 ff_free_picture_tables(pic);
728 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
731 static int update_picture_tables(Picture *dst, Picture *src)
735 #define UPDATE_TABLE(table)\
738 (!dst->table || dst->table->buffer != src->table->buffer)) {\
739 av_buffer_unref(&dst->table);\
740 dst->table = av_buffer_ref(src->table);\
742 ff_free_picture_tables(dst);\
743 return AVERROR(ENOMEM);\
748 UPDATE_TABLE(mb_var_buf);
749 UPDATE_TABLE(mc_mb_var_buf);
750 UPDATE_TABLE(mb_mean_buf);
751 UPDATE_TABLE(mbskip_table_buf);
752 UPDATE_TABLE(qscale_table_buf);
753 UPDATE_TABLE(mb_type_buf);
754 for (i = 0; i < 2; i++) {
755 UPDATE_TABLE(motion_val_buf[i]);
756 UPDATE_TABLE(ref_index_buf[i]);
759 dst->mb_var = src->mb_var;
760 dst->mc_mb_var = src->mc_mb_var;
761 dst->mb_mean = src->mb_mean;
762 dst->mbskip_table = src->mbskip_table;
763 dst->qscale_table = src->qscale_table;
764 dst->mb_type = src->mb_type;
765 for (i = 0; i < 2; i++) {
766 dst->motion_val[i] = src->motion_val[i];
767 dst->ref_index[i] = src->ref_index[i];
770 dst->alloc_mb_width = src->alloc_mb_width;
771 dst->alloc_mb_height = src->alloc_mb_height;
776 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
780 av_assert0(!dst->f->buf[0]);
781 av_assert0(src->f->buf[0]);
785 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
789 ret = update_picture_tables(dst, src);
793 if (src->hwaccel_picture_private) {
794 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
795 if (!dst->hwaccel_priv_buf)
797 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
800 dst->field_picture = src->field_picture;
801 dst->mb_var_sum = src->mb_var_sum;
802 dst->mc_mb_var_sum = src->mc_mb_var_sum;
803 dst->b_frame_score = src->b_frame_score;
804 dst->needs_realloc = src->needs_realloc;
805 dst->reference = src->reference;
806 dst->shared = src->shared;
810 ff_mpeg_unref_picture(s, dst);
814 static void exchange_uv(MpegEncContext *s)
819 s->pblocks[4] = s->pblocks[5];
823 static int init_duplicate_context(MpegEncContext *s)
825 int y_size = s->b8_stride * (2 * s->mb_height + 1);
826 int c_size = s->mb_stride * (s->mb_height + 1);
827 int yc_size = y_size + 2 * c_size;
830 if (s->mb_height & 1)
831 yc_size += 2*s->b8_stride + 2*s->mb_stride;
838 s->obmc_scratchpad = NULL;
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
842 ME_MAP_SIZE * sizeof(uint32_t), fail)
843 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
844 ME_MAP_SIZE * sizeof(uint32_t), fail)
845 if (s->avctx->noise_reduction) {
846 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
847 2 * 64 * sizeof(int), fail)
850 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
851 s->block = s->blocks[0];
853 for (i = 0; i < 12; i++) {
854 s->pblocks[i] = &s->block[i];
856 if (s->avctx->codec_tag == AV_RL32("VCR2"))
859 if (s->out_format == FMT_H263) {
861 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
862 yc_size * sizeof(int16_t) * 16, fail);
863 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
864 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
865 s->ac_val[2] = s->ac_val[1] + c_size;
870 return -1; // free() through ff_mpv_common_end()
873 static void free_duplicate_context(MpegEncContext *s)
878 av_freep(&s->edge_emu_buffer);
879 av_freep(&s->me.scratchpad);
883 s->obmc_scratchpad = NULL;
885 av_freep(&s->dct_error_sum);
886 av_freep(&s->me.map);
887 av_freep(&s->me.score_map);
888 av_freep(&s->blocks);
889 av_freep(&s->ac_val_base);
893 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
895 #define COPY(a) bak->a = src->a
896 COPY(edge_emu_buffer);
901 COPY(obmc_scratchpad);
908 COPY(me.map_generation);
920 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
924 // FIXME copy only needed parts
926 backup_duplicate_context(&bak, dst);
927 memcpy(dst, src, sizeof(MpegEncContext));
928 backup_duplicate_context(dst, &bak);
929 for (i = 0; i < 12; i++) {
930 dst->pblocks[i] = &dst->block[i];
932 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
934 if (!dst->edge_emu_buffer &&
935 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
936 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
937 "scratch buffers.\n");
940 // STOP_TIMER("update_duplicate_context")
941 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
945 int ff_mpeg_update_thread_context(AVCodecContext *dst,
946 const AVCodecContext *src)
949 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
956 // FIXME can parameters change on I-frames?
957 // in that case dst may need a reinit
958 if (!s->context_initialized) {
959 memcpy(s, s1, sizeof(MpegEncContext));
962 s->bitstream_buffer = NULL;
963 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
965 if (s1->context_initialized){
966 // s->picture_range_start += MAX_PICTURE_COUNT;
967 // s->picture_range_end += MAX_PICTURE_COUNT;
969 if((ret = ff_mpv_common_init(s)) < 0){
970 memset(s, 0, sizeof(MpegEncContext));
977 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
978 s->context_reinit = 0;
979 s->height = s1->height;
980 s->width = s1->width;
981 if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
985 s->avctx->coded_height = s1->avctx->coded_height;
986 s->avctx->coded_width = s1->avctx->coded_width;
987 s->avctx->width = s1->avctx->width;
988 s->avctx->height = s1->avctx->height;
990 s->coded_picture_number = s1->coded_picture_number;
991 s->picture_number = s1->picture_number;
993 av_assert0(!s->picture || s->picture != s1->picture);
995 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
996 ff_mpeg_unref_picture(s, &s->picture[i]);
997 if (s1->picture[i].f->buf[0] &&
998 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
1002 #define UPDATE_PICTURE(pic)\
1004 ff_mpeg_unref_picture(s, &s->pic);\
1005 if (s1->pic.f && s1->pic.f->buf[0])\
1006 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
1008 ret = update_picture_tables(&s->pic, &s1->pic);\
1013 UPDATE_PICTURE(current_picture);
1014 UPDATE_PICTURE(last_picture);
1015 UPDATE_PICTURE(next_picture);
1017 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1018 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1019 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1021 // Error/bug resilience
1022 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1023 s->workaround_bugs = s1->workaround_bugs;
1024 s->padding_bug_score = s1->padding_bug_score;
1026 // MPEG4 timing info
1027 memcpy(&s->last_time_base, &s1->last_time_base,
1028 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1029 (char *) &s1->last_time_base);
1032 s->max_b_frames = s1->max_b_frames;
1033 s->low_delay = s1->low_delay;
1034 s->droppable = s1->droppable;
1036 // DivX handling (doesn't work)
1037 s->divx_packed = s1->divx_packed;
1039 if (s1->bitstream_buffer) {
1040 if (s1->bitstream_buffer_size +
1041 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1042 av_fast_malloc(&s->bitstream_buffer,
1043 &s->allocated_bitstream_buffer_size,
1044 s1->allocated_bitstream_buffer_size);
1045 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1046 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1047 s1->bitstream_buffer_size);
1048 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1049 FF_INPUT_BUFFER_PADDING_SIZE);
1052 // linesize dependend scratch buffer allocation
1053 if (!s->edge_emu_buffer)
1055 if (frame_size_alloc(s, s1->linesize) < 0) {
1056 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1057 "scratch buffers.\n");
1058 return AVERROR(ENOMEM);
1061 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1062 "be allocated due to unknown size.\n");
1065 // MPEG2/interlacing info
1066 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1067 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1069 if (!s1->first_field) {
1070 s->last_pict_type = s1->pict_type;
1071 if (s1->current_picture_ptr)
1072 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1079 * Set the given MpegEncContext to common defaults
1080 * (same for encoding and decoding).
1081 * The changed fields will not depend upon the
1082 * prior state of the MpegEncContext.
1084 void ff_mpv_common_defaults(MpegEncContext *s)
1086 s->y_dc_scale_table =
1087 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1088 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1089 s->progressive_frame = 1;
1090 s->progressive_sequence = 1;
1091 s->picture_structure = PICT_FRAME;
1093 s->coded_picture_number = 0;
1094 s->picture_number = 0;
1099 s->slice_context_count = 1;
1103 * Set the given MpegEncContext to defaults for decoding.
1104 * the changed fields will not depend upon
1105 * the prior state of the MpegEncContext.
1107 void ff_mpv_decode_defaults(MpegEncContext *s)
1109 ff_mpv_common_defaults(s);
1112 void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
1115 s->width = avctx->coded_width;
1116 s->height = avctx->coded_height;
1117 s->codec_id = avctx->codec->id;
1118 s->workaround_bugs = avctx->workaround_bugs;
1119 s->flags = avctx->flags;
1120 s->flags2 = avctx->flags2;
1122 /* convert fourcc to upper case */
1123 s->codec_tag = avpriv_toupper4(avctx->codec_tag);
1125 s->stream_codec_tag = avpriv_toupper4(avctx->stream_codec_tag);
1128 static int init_er(MpegEncContext *s)
1130 ERContext *er = &s->er;
1131 int mb_array_size = s->mb_height * s->mb_stride;
1134 er->avctx = s->avctx;
1135 er->mecc = &s->mecc;
1137 er->mb_index2xy = s->mb_index2xy;
1138 er->mb_num = s->mb_num;
1139 er->mb_width = s->mb_width;
1140 er->mb_height = s->mb_height;
1141 er->mb_stride = s->mb_stride;
1142 er->b8_stride = s->b8_stride;
1144 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1145 er->error_status_table = av_mallocz(mb_array_size);
1146 if (!er->er_temp_buffer || !er->error_status_table)
1149 er->mbskip_table = s->mbskip_table;
1150 er->mbintra_table = s->mbintra_table;
1152 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1153 er->dc_val[i] = s->dc_val[i];
1155 er->decode_mb = mpeg_er_decode_mb;
1160 av_freep(&er->er_temp_buffer);
1161 av_freep(&er->error_status_table);
1162 return AVERROR(ENOMEM);
1166 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1168 static int init_context_frame(MpegEncContext *s)
1170 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1172 s->mb_width = (s->width + 15) / 16;
1173 s->mb_stride = s->mb_width + 1;
1174 s->b8_stride = s->mb_width * 2 + 1;
1175 mb_array_size = s->mb_height * s->mb_stride;
1176 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1178 /* set default edge pos, will be overridden
1179 * in decode_header if needed */
1180 s->h_edge_pos = s->mb_width * 16;
1181 s->v_edge_pos = s->mb_height * 16;
1183 s->mb_num = s->mb_width * s->mb_height;
1188 s->block_wrap[3] = s->b8_stride;
1190 s->block_wrap[5] = s->mb_stride;
1192 y_size = s->b8_stride * (2 * s->mb_height + 1);
1193 c_size = s->mb_stride * (s->mb_height + 1);
1194 yc_size = y_size + 2 * c_size;
1196 if (s->mb_height & 1)
1197 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1199 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1200 for (y = 0; y < s->mb_height; y++)
1201 for (x = 0; x < s->mb_width; x++)
1202 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1204 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1207 /* Allocate MV tables */
1208 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1209 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1210 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1211 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1212 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1213 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1214 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1215 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1216 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1217 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1218 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1219 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1221 /* Allocate MB type table */
1222 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1224 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1226 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1227 mb_array_size * sizeof(float), fail);
1228 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1229 mb_array_size * sizeof(float), fail);
1233 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1234 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1235 /* interlaced direct mode decoding tables */
1236 for (i = 0; i < 2; i++) {
1238 for (j = 0; j < 2; j++) {
1239 for (k = 0; k < 2; k++) {
1240 FF_ALLOCZ_OR_GOTO(s->avctx,
1241 s->b_field_mv_table_base[i][j][k],
1242 mv_table_size * 2 * sizeof(int16_t),
1244 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1247 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1248 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1249 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1251 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1254 if (s->out_format == FMT_H263) {
1256 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1257 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1259 /* cbp, ac_pred, pred_dir */
1260 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1261 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1264 if (s->h263_pred || s->h263_plus || !s->encoding) {
1266 // MN: we need these for error resilience of intra-frames
1267 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1268 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1269 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1270 s->dc_val[2] = s->dc_val[1] + c_size;
1271 for (i = 0; i < yc_size; i++)
1272 s->dc_val_base[i] = 1024;
1275 /* which mb is a intra block */
1276 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1277 memset(s->mbintra_table, 1, mb_array_size);
1279 /* init macroblock skip table */
1280 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1281 // Note the + 1 is for a quicker mpeg4 slice_end detection
1285 return AVERROR(ENOMEM);
1289 * init common structure for both encoder and decoder.
1290 * this assumes that some variables like width/height are already set
1292 av_cold int ff_mpv_common_init(MpegEncContext *s)
1295 int nb_slices = (HAVE_THREADS &&
1296 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1297 s->avctx->thread_count : 1;
1299 if (s->encoding && s->avctx->slices)
1300 nb_slices = s->avctx->slices;
1302 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1303 s->mb_height = (s->height + 31) / 32 * 2;
1305 s->mb_height = (s->height + 15) / 16;
1307 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1308 av_log(s->avctx, AV_LOG_ERROR,
1309 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1313 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1316 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1318 max_slices = MAX_THREADS;
1319 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1320 " reducing to %d\n", nb_slices, max_slices);
1321 nb_slices = max_slices;
1324 if ((s->width || s->height) &&
1325 av_image_check_size(s->width, s->height, 0, s->avctx))
1330 s->flags = s->avctx->flags;
1331 s->flags2 = s->avctx->flags2;
1333 /* set chroma shifts */
1334 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1336 &s->chroma_y_shift);
1339 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1340 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1341 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1342 s->picture[i].f = av_frame_alloc();
1343 if (!s->picture[i].f)
1346 memset(&s->next_picture, 0, sizeof(s->next_picture));
1347 memset(&s->last_picture, 0, sizeof(s->last_picture));
1348 memset(&s->current_picture, 0, sizeof(s->current_picture));
1349 memset(&s->new_picture, 0, sizeof(s->new_picture));
1350 s->next_picture.f = av_frame_alloc();
1351 if (!s->next_picture.f)
1353 s->last_picture.f = av_frame_alloc();
1354 if (!s->last_picture.f)
1356 s->current_picture.f = av_frame_alloc();
1357 if (!s->current_picture.f)
1359 s->new_picture.f = av_frame_alloc();
1360 if (!s->new_picture.f)
1363 if (init_context_frame(s))
1366 s->parse_context.state = -1;
1368 s->context_initialized = 1;
1369 s->thread_context[0] = s;
1371 // if (s->width && s->height) {
1372 if (nb_slices > 1) {
1373 for (i = 1; i < nb_slices; i++) {
1374 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1375 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1378 for (i = 0; i < nb_slices; i++) {
1379 if (init_duplicate_context(s->thread_context[i]) < 0)
1381 s->thread_context[i]->start_mb_y =
1382 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1383 s->thread_context[i]->end_mb_y =
1384 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1387 if (init_duplicate_context(s) < 0)
1390 s->end_mb_y = s->mb_height;
1392 s->slice_context_count = nb_slices;
1397 ff_mpv_common_end(s);
1402 * Frees and resets MpegEncContext fields depending on the resolution.
1403 * Is used during resolution changes to avoid a full reinitialization of the
1406 static void free_context_frame(MpegEncContext *s)
1410 av_freep(&s->mb_type);
1411 av_freep(&s->p_mv_table_base);
1412 av_freep(&s->b_forw_mv_table_base);
1413 av_freep(&s->b_back_mv_table_base);
1414 av_freep(&s->b_bidir_forw_mv_table_base);
1415 av_freep(&s->b_bidir_back_mv_table_base);
1416 av_freep(&s->b_direct_mv_table_base);
1417 s->p_mv_table = NULL;
1418 s->b_forw_mv_table = NULL;
1419 s->b_back_mv_table = NULL;
1420 s->b_bidir_forw_mv_table = NULL;
1421 s->b_bidir_back_mv_table = NULL;
1422 s->b_direct_mv_table = NULL;
1423 for (i = 0; i < 2; i++) {
1424 for (j = 0; j < 2; j++) {
1425 for (k = 0; k < 2; k++) {
1426 av_freep(&s->b_field_mv_table_base[i][j][k]);
1427 s->b_field_mv_table[i][j][k] = NULL;
1429 av_freep(&s->b_field_select_table[i][j]);
1430 av_freep(&s->p_field_mv_table_base[i][j]);
1431 s->p_field_mv_table[i][j] = NULL;
1433 av_freep(&s->p_field_select_table[i]);
1436 av_freep(&s->dc_val_base);
1437 av_freep(&s->coded_block_base);
1438 av_freep(&s->mbintra_table);
1439 av_freep(&s->cbp_table);
1440 av_freep(&s->pred_dir_table);
1442 av_freep(&s->mbskip_table);
1444 av_freep(&s->er.error_status_table);
1445 av_freep(&s->er.er_temp_buffer);
1446 av_freep(&s->mb_index2xy);
1447 av_freep(&s->lambda_table);
1449 av_freep(&s->cplx_tab);
1450 av_freep(&s->bits_tab);
1452 s->linesize = s->uvlinesize = 0;
1455 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1459 if (!s->context_initialized)
1460 return AVERROR(EINVAL);
1462 if (s->slice_context_count > 1) {
1463 for (i = 0; i < s->slice_context_count; i++) {
1464 free_duplicate_context(s->thread_context[i]);
1466 for (i = 1; i < s->slice_context_count; i++) {
1467 av_freep(&s->thread_context[i]);
1470 free_duplicate_context(s);
1472 free_context_frame(s);
1475 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1476 s->picture[i].needs_realloc = 1;
1479 s->last_picture_ptr =
1480 s->next_picture_ptr =
1481 s->current_picture_ptr = NULL;
1484 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1485 s->mb_height = (s->height + 31) / 32 * 2;
1487 s->mb_height = (s->height + 15) / 16;
1489 if ((s->width || s->height) &&
1490 (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1493 if ((err = init_context_frame(s)))
1496 s->thread_context[0] = s;
1498 if (s->width && s->height) {
1499 int nb_slices = s->slice_context_count;
1500 if (nb_slices > 1) {
1501 for (i = 1; i < nb_slices; i++) {
1502 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1503 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1506 for (i = 0; i < nb_slices; i++) {
1507 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1509 s->thread_context[i]->start_mb_y =
1510 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1511 s->thread_context[i]->end_mb_y =
1512 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1515 err = init_duplicate_context(s);
1519 s->end_mb_y = s->mb_height;
1521 s->slice_context_count = nb_slices;
1526 ff_mpv_common_end(s);
1530 /* init common structure for both encoder and decoder */
1531 void ff_mpv_common_end(MpegEncContext *s)
1535 if (s->slice_context_count > 1) {
1536 for (i = 0; i < s->slice_context_count; i++) {
1537 free_duplicate_context(s->thread_context[i]);
1539 for (i = 1; i < s->slice_context_count; i++) {
1540 av_freep(&s->thread_context[i]);
1542 s->slice_context_count = 1;
1543 } else free_duplicate_context(s);
1545 av_freep(&s->parse_context.buffer);
1546 s->parse_context.buffer_size = 0;
1548 av_freep(&s->bitstream_buffer);
1549 s->allocated_bitstream_buffer_size = 0;
1552 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1553 ff_free_picture_tables(&s->picture[i]);
1554 ff_mpeg_unref_picture(s, &s->picture[i]);
1555 av_frame_free(&s->picture[i].f);
1558 av_freep(&s->picture);
1559 ff_free_picture_tables(&s->last_picture);
1560 ff_mpeg_unref_picture(s, &s->last_picture);
1561 av_frame_free(&s->last_picture.f);
1562 ff_free_picture_tables(&s->current_picture);
1563 ff_mpeg_unref_picture(s, &s->current_picture);
1564 av_frame_free(&s->current_picture.f);
1565 ff_free_picture_tables(&s->next_picture);
1566 ff_mpeg_unref_picture(s, &s->next_picture);
1567 av_frame_free(&s->next_picture.f);
1568 ff_free_picture_tables(&s->new_picture);
1569 ff_mpeg_unref_picture(s, &s->new_picture);
1570 av_frame_free(&s->new_picture.f);
1572 free_context_frame(s);
1574 s->context_initialized = 0;
1575 s->last_picture_ptr =
1576 s->next_picture_ptr =
1577 s->current_picture_ptr = NULL;
1578 s->linesize = s->uvlinesize = 0;
1581 av_cold void ff_init_rl(RLTable *rl,
1582 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1584 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1585 uint8_t index_run[MAX_RUN + 1];
1586 int last, run, level, start, end, i;
1588 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1589 if (static_store && rl->max_level[0])
1592 /* compute max_level[], max_run[] and index_run[] */
1593 for (last = 0; last < 2; last++) {
1602 memset(max_level, 0, MAX_RUN + 1);
1603 memset(max_run, 0, MAX_LEVEL + 1);
1604 memset(index_run, rl->n, MAX_RUN + 1);
1605 for (i = start; i < end; i++) {
1606 run = rl->table_run[i];
1607 level = rl->table_level[i];
1608 if (index_run[run] == rl->n)
1610 if (level > max_level[run])
1611 max_level[run] = level;
1612 if (run > max_run[level])
1613 max_run[level] = run;
1616 rl->max_level[last] = static_store[last];
1618 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1619 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1621 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1623 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1624 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1626 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1628 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1629 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1633 av_cold void ff_init_vlc_rl(RLTable *rl, unsigned static_size)
1636 VLC_TYPE table[1500][2] = {{0}};
1637 VLC vlc = { .table = table, .table_allocated = static_size };
1638 av_assert0(static_size <= FF_ARRAY_ELEMS(table));
1639 init_vlc(&vlc, 9, rl->n + 1, &rl->table_vlc[0][1], 4, 2, &rl->table_vlc[0][0], 4, 2, INIT_VLC_USE_NEW_STATIC);
1641 for (q = 0; q < 32; q++) {
1643 int qadd = (q - 1) | 1;
1649 for (i = 0; i < vlc.table_size; i++) {
1650 int code = vlc.table[i][0];
1651 int len = vlc.table[i][1];
1654 if (len == 0) { // illegal code
1657 } else if (len < 0) { // more bits needed
1661 if (code == rl->n) { // esc
1665 run = rl->table_run[code] + 1;
1666 level = rl->table_level[code] * qmul + qadd;
1667 if (code >= rl->last) run += 192;
1670 rl->rl_vlc[q][i].len = len;
1671 rl->rl_vlc[q][i].level = level;
1672 rl->rl_vlc[q][i].run = run;
1677 static void release_unused_pictures(MpegEncContext *s)
1681 /* release non reference frames */
1682 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1683 if (!s->picture[i].reference)
1684 ff_mpeg_unref_picture(s, &s->picture[i]);
1688 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1690 if (pic == s->last_picture_ptr)
1692 if (!pic->f->buf[0])
1694 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1699 static int find_unused_picture(MpegEncContext *s, int shared)
1704 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1705 if (!s->picture[i].f->buf[0] && &s->picture[i] != s->last_picture_ptr)
1709 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1710 if (pic_is_unused(s, &s->picture[i]))
1715 av_log(s->avctx, AV_LOG_FATAL,
1716 "Internal error, picture buffer overflow\n");
1717 /* We could return -1, but the codec would crash trying to draw into a
1718 * non-existing frame anyway. This is safer than waiting for a random crash.
1719 * Also the return of this is never useful, an encoder must only allocate
1720 * as much as allowed in the specification. This has no relationship to how
1721 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1722 * enough for such valid streams).
1723 * Plus, a decoder has to check stream validity and remove frames if too
1724 * many reference frames are around. Waiting for "OOM" is not correct at
1725 * all. Similarly, missing reference frames have to be replaced by
1726 * interpolated/MC frames, anything else is a bug in the codec ...
1732 int ff_find_unused_picture(MpegEncContext *s, int shared)
1734 int ret = find_unused_picture(s, shared);
1736 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1737 if (s->picture[ret].needs_realloc) {
1738 s->picture[ret].needs_realloc = 0;
1739 ff_free_picture_tables(&s->picture[ret]);
1740 ff_mpeg_unref_picture(s, &s->picture[ret]);
1746 static void gray_frame(AVFrame *frame)
1748 int i, h_chroma_shift, v_chroma_shift;
1750 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1752 for(i=0; i<frame->height; i++)
1753 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1754 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1755 memset(frame->data[1] + frame->linesize[1]*i,
1756 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1757 memset(frame->data[2] + frame->linesize[2]*i,
1758 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1763 * generic function called after decoding
1764 * the header and before a frame is decoded.
1766 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1772 if (!ff_thread_can_start_frame(avctx)) {
1773 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1777 /* mark & release old frames */
1778 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1779 s->last_picture_ptr != s->next_picture_ptr &&
1780 s->last_picture_ptr->f->buf[0]) {
1781 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1784 /* release forgotten pictures */
1785 /* if (mpeg124/h263) */
1786 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1787 if (&s->picture[i] != s->last_picture_ptr &&
1788 &s->picture[i] != s->next_picture_ptr &&
1789 s->picture[i].reference && !s->picture[i].needs_realloc) {
1790 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1791 av_log(avctx, AV_LOG_ERROR,
1792 "releasing zombie picture\n");
1793 ff_mpeg_unref_picture(s, &s->picture[i]);
1797 ff_mpeg_unref_picture(s, &s->current_picture);
1799 release_unused_pictures(s);
1801 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1802 // we already have a unused image
1803 // (maybe it was set before reading the header)
1804 pic = s->current_picture_ptr;
1806 i = ff_find_unused_picture(s, 0);
1808 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1811 pic = &s->picture[i];
1815 if (!s->droppable) {
1816 if (s->pict_type != AV_PICTURE_TYPE_B)
1820 pic->f->coded_picture_number = s->coded_picture_number++;
1822 if (ff_alloc_picture(s, pic, 0) < 0)
1825 s->current_picture_ptr = pic;
1826 // FIXME use only the vars from current_pic
1827 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1828 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1829 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1830 if (s->picture_structure != PICT_FRAME)
1831 s->current_picture_ptr->f->top_field_first =
1832 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1834 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1835 !s->progressive_sequence;
1836 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1838 s->current_picture_ptr->f->pict_type = s->pict_type;
1839 // if (s->flags && CODEC_FLAG_QSCALE)
1840 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1841 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1843 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1844 s->current_picture_ptr)) < 0)
1847 if (s->pict_type != AV_PICTURE_TYPE_B) {
1848 s->last_picture_ptr = s->next_picture_ptr;
1850 s->next_picture_ptr = s->current_picture_ptr;
1852 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1853 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1854 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1855 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1856 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1857 s->pict_type, s->droppable);
1859 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1860 (s->pict_type != AV_PICTURE_TYPE_I ||
1861 s->picture_structure != PICT_FRAME)) {
1862 int h_chroma_shift, v_chroma_shift;
1863 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1864 &h_chroma_shift, &v_chroma_shift);
1865 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1866 av_log(avctx, AV_LOG_DEBUG,
1867 "allocating dummy last picture for B frame\n");
1868 else if (s->pict_type != AV_PICTURE_TYPE_I)
1869 av_log(avctx, AV_LOG_ERROR,
1870 "warning: first frame is no keyframe\n");
1871 else if (s->picture_structure != PICT_FRAME)
1872 av_log(avctx, AV_LOG_DEBUG,
1873 "allocate dummy last picture for field based first keyframe\n");
1875 /* Allocate a dummy frame */
1876 i = ff_find_unused_picture(s, 0);
1878 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1881 s->last_picture_ptr = &s->picture[i];
1883 s->last_picture_ptr->reference = 3;
1884 s->last_picture_ptr->f->key_frame = 0;
1885 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1887 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1888 s->last_picture_ptr = NULL;
1892 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1893 for(i=0; i<avctx->height; i++)
1894 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1895 0x80, avctx->width);
1896 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1897 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1898 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1899 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1900 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1903 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1904 for(i=0; i<avctx->height; i++)
1905 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1909 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1910 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1912 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1913 s->pict_type == AV_PICTURE_TYPE_B) {
1914 /* Allocate a dummy frame */
1915 i = ff_find_unused_picture(s, 0);
1917 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1920 s->next_picture_ptr = &s->picture[i];
1922 s->next_picture_ptr->reference = 3;
1923 s->next_picture_ptr->f->key_frame = 0;
1924 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1926 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1927 s->next_picture_ptr = NULL;
1930 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1931 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1934 #if 0 // BUFREF-FIXME
1935 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1936 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1938 if (s->last_picture_ptr) {
1939 ff_mpeg_unref_picture(s, &s->last_picture);
1940 if (s->last_picture_ptr->f->buf[0] &&
1941 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1942 s->last_picture_ptr)) < 0)
1945 if (s->next_picture_ptr) {
1946 ff_mpeg_unref_picture(s, &s->next_picture);
1947 if (s->next_picture_ptr->f->buf[0] &&
1948 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1949 s->next_picture_ptr)) < 0)
1953 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1954 s->last_picture_ptr->f->buf[0]));
1956 if (s->picture_structure!= PICT_FRAME) {
1958 for (i = 0; i < 4; i++) {
1959 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1960 s->current_picture.f->data[i] +=
1961 s->current_picture.f->linesize[i];
1963 s->current_picture.f->linesize[i] *= 2;
1964 s->last_picture.f->linesize[i] *= 2;
1965 s->next_picture.f->linesize[i] *= 2;
1969 s->err_recognition = avctx->err_recognition;
1971 /* set dequantizer, we can't do it during init as
1972 * it might change for mpeg4 and we can't do it in the header
1973 * decode as init is not called for mpeg4 there yet */
1974 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1975 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1976 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1977 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1978 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1979 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1981 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1982 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1985 if (s->avctx->debug & FF_DEBUG_NOMC) {
1986 gray_frame(s->current_picture_ptr->f);
1992 /* called after a frame has been decoded. */
1993 void ff_mpv_frame_end(MpegEncContext *s)
1997 if (s->current_picture.reference)
1998 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
2003 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
2006 return clip_line(ex, ey, sx, sy, maxx);
2011 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
2018 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
2026 * Draw a line from (ex, ey) -> (sx, sy).
2027 * @param w width of the image
2028 * @param h height of the image
2029 * @param stride stride/linesize of the image
2030 * @param color color of the arrow
2032 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2033 int w, int h, int stride, int color)
2037 if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2039 if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2042 sx = av_clip(sx, 0, w - 1);
2043 sy = av_clip(sy, 0, h - 1);
2044 ex = av_clip(ex, 0, w - 1);
2045 ey = av_clip(ey, 0, h - 1);
2047 buf[sy * stride + sx] += color;
2049 if (FFABS(ex - sx) > FFABS(ey - sy)) {
2051 FFSWAP(int, sx, ex);
2052 FFSWAP(int, sy, ey);
2054 buf += sx + sy * stride;
2056 f = ((ey - sy) << 16) / ex;
2057 for (x = 0; x <= ex; x++) {
2059 fr = (x * f) & 0xFFFF;
2060 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2061 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2065 FFSWAP(int, sx, ex);
2066 FFSWAP(int, sy, ey);
2068 buf += sx + sy * stride;
2071 f = ((ex - sx) << 16) / ey;
2074 for(y= 0; y <= ey; y++){
2076 fr = (y*f) & 0xFFFF;
2077 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2078 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2084 * Draw an arrow from (ex, ey) -> (sx, sy).
2085 * @param w width of the image
2086 * @param h height of the image
2087 * @param stride stride/linesize of the image
2088 * @param color color of the arrow
2090 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2091 int ey, int w, int h, int stride, int color, int tail, int direction)
2096 FFSWAP(int, sx, ex);
2097 FFSWAP(int, sy, ey);
2100 sx = av_clip(sx, -100, w + 100);
2101 sy = av_clip(sy, -100, h + 100);
2102 ex = av_clip(ex, -100, w + 100);
2103 ey = av_clip(ey, -100, h + 100);
2108 if (dx * dx + dy * dy > 3 * 3) {
2111 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2113 // FIXME subpixel accuracy
2114 rx = ROUNDED_DIV(rx * 3 << 4, length);
2115 ry = ROUNDED_DIV(ry * 3 << 4, length);
2122 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2123 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2125 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2129 static int add_mb(AVMotionVector *mb, uint32_t mb_type,
2130 int dst_x, int dst_y,
2131 int src_x, int src_y,
2134 if (dst_x == src_x && dst_y == src_y)
2136 mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
2137 mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
2142 mb->source = direction ? 1 : -1;
2143 mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
2148 * Print debugging info for the given picture.
2150 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2151 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2153 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2155 if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
2156 const int shift = 1 + quarter_sample;
2157 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2158 const int mv_stride = (mb_width << mv_sample_log2) +
2159 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2160 int mb_x, mb_y, mbcount = 0;
2162 /* size is width * height * 2 * 4 where 2 is for directions and 4 is
2163 * for the maximum number of MB (4 MB in case of IS_8x8) */
2164 AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
2168 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2169 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2170 int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2171 for (direction = 0; direction < 2; direction++) {
2172 if (!USES_LIST(mb_type, direction))
2174 if (IS_8X8(mb_type)) {
2175 for (i = 0; i < 4; i++) {
2176 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2177 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2178 int xy = (mb_x * 2 + (i & 1) +
2179 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2180 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2181 int my = (motion_val[direction][xy][1] >> shift) + sy;
2182 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2184 } else if (IS_16X8(mb_type)) {
2185 for (i = 0; i < 2; i++) {
2186 int sx = mb_x * 16 + 8;
2187 int sy = mb_y * 16 + 4 + 8 * i;
2188 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2189 int mx = (motion_val[direction][xy][0] >> shift);
2190 int my = (motion_val[direction][xy][1] >> shift);
2192 if (IS_INTERLACED(mb_type))
2195 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2197 } else if (IS_8X16(mb_type)) {
2198 for (i = 0; i < 2; i++) {
2199 int sx = mb_x * 16 + 4 + 8 * i;
2200 int sy = mb_y * 16 + 8;
2201 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2202 int mx = motion_val[direction][xy][0] >> shift;
2203 int my = motion_val[direction][xy][1] >> shift;
2205 if (IS_INTERLACED(mb_type))
2208 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2211 int sx = mb_x * 16 + 8;
2212 int sy = mb_y * 16 + 8;
2213 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2214 int mx = (motion_val[direction][xy][0]>>shift) + sx;
2215 int my = (motion_val[direction][xy][1]>>shift) + sy;
2216 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2223 AVFrameSideData *sd;
2225 av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
2226 sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
2229 memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
2235 /* TODO: export all the following to make them accessible for users (and filters) */
2236 if (avctx->hwaccel || !mbtype_table
2237 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2241 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2244 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2245 av_get_picture_type_char(pict->pict_type));
2246 for (y = 0; y < mb_height; y++) {
2247 for (x = 0; x < mb_width; x++) {
2248 if (avctx->debug & FF_DEBUG_SKIP) {
2249 int count = mbskip_table[x + y * mb_stride];
2252 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2254 if (avctx->debug & FF_DEBUG_QP) {
2255 av_log(avctx, AV_LOG_DEBUG, "%2d",
2256 qscale_table[x + y * mb_stride]);
2258 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2259 int mb_type = mbtype_table[x + y * mb_stride];
2260 // Type & MV direction
2261 if (IS_PCM(mb_type))
2262 av_log(avctx, AV_LOG_DEBUG, "P");
2263 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2264 av_log(avctx, AV_LOG_DEBUG, "A");
2265 else if (IS_INTRA4x4(mb_type))
2266 av_log(avctx, AV_LOG_DEBUG, "i");
2267 else if (IS_INTRA16x16(mb_type))
2268 av_log(avctx, AV_LOG_DEBUG, "I");
2269 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2270 av_log(avctx, AV_LOG_DEBUG, "d");
2271 else if (IS_DIRECT(mb_type))
2272 av_log(avctx, AV_LOG_DEBUG, "D");
2273 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2274 av_log(avctx, AV_LOG_DEBUG, "g");
2275 else if (IS_GMC(mb_type))
2276 av_log(avctx, AV_LOG_DEBUG, "G");
2277 else if (IS_SKIP(mb_type))
2278 av_log(avctx, AV_LOG_DEBUG, "S");
2279 else if (!USES_LIST(mb_type, 1))
2280 av_log(avctx, AV_LOG_DEBUG, ">");
2281 else if (!USES_LIST(mb_type, 0))
2282 av_log(avctx, AV_LOG_DEBUG, "<");
2284 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2285 av_log(avctx, AV_LOG_DEBUG, "X");
2289 if (IS_8X8(mb_type))
2290 av_log(avctx, AV_LOG_DEBUG, "+");
2291 else if (IS_16X8(mb_type))
2292 av_log(avctx, AV_LOG_DEBUG, "-");
2293 else if (IS_8X16(mb_type))
2294 av_log(avctx, AV_LOG_DEBUG, "|");
2295 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2296 av_log(avctx, AV_LOG_DEBUG, " ");
2298 av_log(avctx, AV_LOG_DEBUG, "?");
2301 if (IS_INTERLACED(mb_type))
2302 av_log(avctx, AV_LOG_DEBUG, "=");
2304 av_log(avctx, AV_LOG_DEBUG, " ");
2307 av_log(avctx, AV_LOG_DEBUG, "\n");
2311 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2312 (avctx->debug_mv)) {
2315 int h_chroma_shift, v_chroma_shift, block_height;
2317 const int shift = 1 + quarter_sample;
2319 const int width = avctx->width;
2320 const int height = avctx->height;
2322 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2323 const int mv_stride = (mb_width << mv_sample_log2) +
2324 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2326 *low_delay = 0; // needed to see the vectors without trashing the buffers
2328 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2330 av_frame_make_writable(pict);
2332 pict->opaque = NULL;
2334 ptr = pict->data[0];
2336 block_height = 16 >> v_chroma_shift;
2338 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2340 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2341 const int mb_index = mb_x + mb_y * mb_stride;
2343 if ((avctx->debug_mv) && motion_val[0]) {
2345 for (type = 0; type < 3; type++) {
2349 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2350 (pict->pict_type!= AV_PICTURE_TYPE_P))
2355 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2356 (pict->pict_type!= AV_PICTURE_TYPE_B))
2361 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2362 (pict->pict_type!= AV_PICTURE_TYPE_B))
2367 if (!USES_LIST(mbtype_table[mb_index], direction))
2370 if (IS_8X8(mbtype_table[mb_index])) {
2372 for (i = 0; i < 4; i++) {
2373 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2374 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2375 int xy = (mb_x * 2 + (i & 1) +
2376 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2377 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2378 int my = (motion_val[direction][xy][1] >> shift) + sy;
2379 draw_arrow(ptr, sx, sy, mx, my, width,
2380 height, pict->linesize[0], 100, 0, direction);
2382 } else if (IS_16X8(mbtype_table[mb_index])) {
2384 for (i = 0; i < 2; i++) {
2385 int sx = mb_x * 16 + 8;
2386 int sy = mb_y * 16 + 4 + 8 * i;
2387 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2388 int mx = (motion_val[direction][xy][0] >> shift);
2389 int my = (motion_val[direction][xy][1] >> shift);
2391 if (IS_INTERLACED(mbtype_table[mb_index]))
2394 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2395 height, pict->linesize[0], 100, 0, direction);
2397 } else if (IS_8X16(mbtype_table[mb_index])) {
2399 for (i = 0; i < 2; i++) {
2400 int sx = mb_x * 16 + 4 + 8 * i;
2401 int sy = mb_y * 16 + 8;
2402 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2403 int mx = motion_val[direction][xy][0] >> shift;
2404 int my = motion_val[direction][xy][1] >> shift;
2406 if (IS_INTERLACED(mbtype_table[mb_index]))
2409 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2410 height, pict->linesize[0], 100, 0, direction);
2413 int sx= mb_x * 16 + 8;
2414 int sy= mb_y * 16 + 8;
2415 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2416 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2417 int my= (motion_val[direction][xy][1]>>shift) + sy;
2418 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2423 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2424 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2425 0x0101010101010101ULL;
2427 for (y = 0; y < block_height; y++) {
2428 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2429 (block_height * mb_y + y) *
2430 pict->linesize[1]) = c;
2431 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2432 (block_height * mb_y + y) *
2433 pict->linesize[2]) = c;
2436 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2438 int mb_type = mbtype_table[mb_index];
2441 #define COLOR(theta, r) \
2442 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2443 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2447 if (IS_PCM(mb_type)) {
2449 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2450 IS_INTRA16x16(mb_type)) {
2452 } else if (IS_INTRA4x4(mb_type)) {
2454 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2456 } else if (IS_DIRECT(mb_type)) {
2458 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2460 } else if (IS_GMC(mb_type)) {
2462 } else if (IS_SKIP(mb_type)) {
2464 } else if (!USES_LIST(mb_type, 1)) {
2466 } else if (!USES_LIST(mb_type, 0)) {
2469 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2473 u *= 0x0101010101010101ULL;
2474 v *= 0x0101010101010101ULL;
2475 for (y = 0; y < block_height; y++) {
2476 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2477 (block_height * mb_y + y) * pict->linesize[1]) = u;
2478 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2479 (block_height * mb_y + y) * pict->linesize[2]) = v;
2483 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2484 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2485 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2486 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2487 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2489 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2490 for (y = 0; y < 16; y++)
2491 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2492 pict->linesize[0]] ^= 0x80;
2494 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2495 int dm = 1 << (mv_sample_log2 - 2);
2496 for (i = 0; i < 4; i++) {
2497 int sx = mb_x * 16 + 8 * (i & 1);
2498 int sy = mb_y * 16 + 8 * (i >> 1);
2499 int xy = (mb_x * 2 + (i & 1) +
2500 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2502 int32_t *mv = (int32_t *) &motion_val[0][xy];
2503 if (mv[0] != mv[dm] ||
2504 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2505 for (y = 0; y < 8; y++)
2506 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2507 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2508 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2509 pict->linesize[0]) ^= 0x8080808080808080ULL;
2513 if (IS_INTERLACED(mb_type) &&
2514 avctx->codec->id == AV_CODEC_ID_H264) {
2518 mbskip_table[mb_index] = 0;
2524 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2526 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2527 p->qscale_table, p->motion_val, &s->low_delay,
2528 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2531 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2533 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2534 int offset = 2*s->mb_stride + 1;
2536 return AVERROR(ENOMEM);
2537 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2538 ref->size -= offset;
2539 ref->data += offset;
2540 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2543 static inline int hpel_motion_lowres(MpegEncContext *s,
2544 uint8_t *dest, uint8_t *src,
2545 int field_based, int field_select,
2546 int src_x, int src_y,
2547 int width, int height, ptrdiff_t stride,
2548 int h_edge_pos, int v_edge_pos,
2549 int w, int h, h264_chroma_mc_func *pix_op,
2550 int motion_x, int motion_y)
2552 const int lowres = s->avctx->lowres;
2553 const int op_index = FFMIN(lowres, 3);
2554 const int s_mask = (2 << lowres) - 1;
2558 if (s->quarter_sample) {
2563 sx = motion_x & s_mask;
2564 sy = motion_y & s_mask;
2565 src_x += motion_x >> lowres + 1;
2566 src_y += motion_y >> lowres + 1;
2568 src += src_y * stride + src_x;
2570 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2571 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2572 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2573 s->linesize, s->linesize,
2574 w + 1, (h + 1) << field_based,
2575 src_x, src_y << field_based,
2576 h_edge_pos, v_edge_pos);
2577 src = s->edge_emu_buffer;
2581 sx = (sx << 2) >> lowres;
2582 sy = (sy << 2) >> lowres;
2585 pix_op[op_index](dest, src, stride, h, sx, sy);
2589 /* apply one mpeg motion vector to the three components */
2590 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2597 uint8_t **ref_picture,
2598 h264_chroma_mc_func *pix_op,
2599 int motion_x, int motion_y,
2602 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2603 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2604 ptrdiff_t uvlinesize, linesize;
2605 const int lowres = s->avctx->lowres;
2606 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2607 const int block_s = 8>>lowres;
2608 const int s_mask = (2 << lowres) - 1;
2609 const int h_edge_pos = s->h_edge_pos >> lowres;
2610 const int v_edge_pos = s->v_edge_pos >> lowres;
2611 linesize = s->current_picture.f->linesize[0] << field_based;
2612 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2614 // FIXME obviously not perfect but qpel will not work in lowres anyway
2615 if (s->quarter_sample) {
2621 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2624 sx = motion_x & s_mask;
2625 sy = motion_y & s_mask;
2626 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2627 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2629 if (s->out_format == FMT_H263) {
2630 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2631 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2632 uvsrc_x = src_x >> 1;
2633 uvsrc_y = src_y >> 1;
2634 } else if (s->out_format == FMT_H261) {
2635 // even chroma mv's are full pel in H261
2638 uvsx = (2 * mx) & s_mask;
2639 uvsy = (2 * my) & s_mask;
2640 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2641 uvsrc_y = mb_y * block_s + (my >> lowres);
2643 if(s->chroma_y_shift){
2648 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2649 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2651 if(s->chroma_x_shift){
2655 uvsy = motion_y & s_mask;
2657 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2660 uvsx = motion_x & s_mask;
2661 uvsy = motion_y & s_mask;
2668 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2669 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2670 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2672 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2673 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2674 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2675 linesize >> field_based, linesize >> field_based,
2676 17, 17 + field_based,
2677 src_x, src_y << field_based, h_edge_pos,
2679 ptr_y = s->edge_emu_buffer;
2680 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2681 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2682 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2683 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2684 uvlinesize >> field_based, uvlinesize >> field_based,
2686 uvsrc_x, uvsrc_y << field_based,
2687 h_edge_pos >> 1, v_edge_pos >> 1);
2688 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2689 uvlinesize >> field_based,uvlinesize >> field_based,
2691 uvsrc_x, uvsrc_y << field_based,
2692 h_edge_pos >> 1, v_edge_pos >> 1);
2698 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2700 dest_y += s->linesize;
2701 dest_cb += s->uvlinesize;
2702 dest_cr += s->uvlinesize;
2706 ptr_y += s->linesize;
2707 ptr_cb += s->uvlinesize;
2708 ptr_cr += s->uvlinesize;
2711 sx = (sx << 2) >> lowres;
2712 sy = (sy << 2) >> lowres;
2713 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2715 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2716 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2717 uvsx = (uvsx << 2) >> lowres;
2718 uvsy = (uvsy << 2) >> lowres;
2720 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2721 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2724 // FIXME h261 lowres loop filter
2727 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2728 uint8_t *dest_cb, uint8_t *dest_cr,
2729 uint8_t **ref_picture,
2730 h264_chroma_mc_func * pix_op,
2733 const int lowres = s->avctx->lowres;
2734 const int op_index = FFMIN(lowres, 3);
2735 const int block_s = 8 >> lowres;
2736 const int s_mask = (2 << lowres) - 1;
2737 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2738 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2739 int emu = 0, src_x, src_y, sx, sy;
2743 if (s->quarter_sample) {
2748 /* In case of 8X8, we construct a single chroma motion vector
2749 with a special rounding */
2750 mx = ff_h263_round_chroma(mx);
2751 my = ff_h263_round_chroma(my);
2755 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2756 src_y = s->mb_y * block_s + (my >> lowres + 1);
2758 offset = src_y * s->uvlinesize + src_x;
2759 ptr = ref_picture[1] + offset;
2760 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2761 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2762 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2763 s->uvlinesize, s->uvlinesize,
2765 src_x, src_y, h_edge_pos, v_edge_pos);
2766 ptr = s->edge_emu_buffer;
2769 sx = (sx << 2) >> lowres;
2770 sy = (sy << 2) >> lowres;
2771 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2773 ptr = ref_picture[2] + offset;
2775 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2776 s->uvlinesize, s->uvlinesize,
2778 src_x, src_y, h_edge_pos, v_edge_pos);
2779 ptr = s->edge_emu_buffer;
2781 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2785 * motion compensation of a single macroblock
2787 * @param dest_y luma destination pointer
2788 * @param dest_cb chroma cb/u destination pointer
2789 * @param dest_cr chroma cr/v destination pointer
2790 * @param dir direction (0->forward, 1->backward)
2791 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2792 * @param pix_op halfpel motion compensation function (average or put normally)
2793 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2795 static inline void MPV_motion_lowres(MpegEncContext *s,
2796 uint8_t *dest_y, uint8_t *dest_cb,
2798 int dir, uint8_t **ref_picture,
2799 h264_chroma_mc_func *pix_op)
2803 const int lowres = s->avctx->lowres;
2804 const int block_s = 8 >>lowres;
2809 switch (s->mv_type) {
2811 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2813 ref_picture, pix_op,
2814 s->mv[dir][0][0], s->mv[dir][0][1],
2820 for (i = 0; i < 4; i++) {
2821 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2822 s->linesize) * block_s,
2823 ref_picture[0], 0, 0,
2824 (2 * mb_x + (i & 1)) * block_s,
2825 (2 * mb_y + (i >> 1)) * block_s,
2826 s->width, s->height, s->linesize,
2827 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2828 block_s, block_s, pix_op,
2829 s->mv[dir][i][0], s->mv[dir][i][1]);
2831 mx += s->mv[dir][i][0];
2832 my += s->mv[dir][i][1];
2835 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2836 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2840 if (s->picture_structure == PICT_FRAME) {
2842 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2843 1, 0, s->field_select[dir][0],
2844 ref_picture, pix_op,
2845 s->mv[dir][0][0], s->mv[dir][0][1],
2848 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2849 1, 1, s->field_select[dir][1],
2850 ref_picture, pix_op,
2851 s->mv[dir][1][0], s->mv[dir][1][1],
2854 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2855 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2856 ref_picture = s->current_picture_ptr->f->data;
2859 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2860 0, 0, s->field_select[dir][0],
2861 ref_picture, pix_op,
2863 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2867 for (i = 0; i < 2; i++) {
2868 uint8_t **ref2picture;
2870 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2871 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2872 ref2picture = ref_picture;
2874 ref2picture = s->current_picture_ptr->f->data;
2877 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2878 0, 0, s->field_select[dir][i],
2879 ref2picture, pix_op,
2880 s->mv[dir][i][0], s->mv[dir][i][1] +
2881 2 * block_s * i, block_s, mb_y >> 1);
2883 dest_y += 2 * block_s * s->linesize;
2884 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2885 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2889 if (s->picture_structure == PICT_FRAME) {
2890 for (i = 0; i < 2; i++) {
2892 for (j = 0; j < 2; j++) {
2893 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2895 ref_picture, pix_op,
2896 s->mv[dir][2 * i + j][0],
2897 s->mv[dir][2 * i + j][1],
2900 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2903 for (i = 0; i < 2; i++) {
2904 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2905 0, 0, s->picture_structure != i + 1,
2906 ref_picture, pix_op,
2907 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2908 2 * block_s, mb_y >> 1);
2910 // after put we make avg of the same block
2911 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2913 // opposite parity is always in the same
2914 // frame if this is second field
2915 if (!s->first_field) {
2916 ref_picture = s->current_picture_ptr->f->data;
2927 * find the lowest MB row referenced in the MVs
2929 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2931 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2932 int my, off, i, mvs;
2934 if (s->picture_structure != PICT_FRAME || s->mcsel)
2937 switch (s->mv_type) {
2951 for (i = 0; i < mvs; i++) {
2952 my = s->mv[dir][i][1]<<qpel_shift;
2953 my_max = FFMAX(my_max, my);
2954 my_min = FFMIN(my_min, my);
2957 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2959 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2961 return s->mb_height-1;
2964 /* put block[] to dest[] */
2965 static inline void put_dct(MpegEncContext *s,
2966 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2968 s->dct_unquantize_intra(s, block, i, qscale);
2969 s->idsp.idct_put(dest, line_size, block);
2972 /* add block[] to dest[] */
2973 static inline void add_dct(MpegEncContext *s,
2974 int16_t *block, int i, uint8_t *dest, int line_size)
2976 if (s->block_last_index[i] >= 0) {
2977 s->idsp.idct_add(dest, line_size, block);
2981 static inline void add_dequant_dct(MpegEncContext *s,
2982 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2984 if (s->block_last_index[i] >= 0) {
2985 s->dct_unquantize_inter(s, block, i, qscale);
2987 s->idsp.idct_add(dest, line_size, block);
2992 * Clean dc, ac, coded_block for the current non-intra MB.
2994 void ff_clean_intra_table_entries(MpegEncContext *s)
2996 int wrap = s->b8_stride;
2997 int xy = s->block_index[0];
3000 s->dc_val[0][xy + 1 ] =
3001 s->dc_val[0][xy + wrap] =
3002 s->dc_val[0][xy + 1 + wrap] = 1024;
3004 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
3005 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
3006 if (s->msmpeg4_version>=3) {
3007 s->coded_block[xy ] =
3008 s->coded_block[xy + 1 ] =
3009 s->coded_block[xy + wrap] =
3010 s->coded_block[xy + 1 + wrap] = 0;
3013 wrap = s->mb_stride;
3014 xy = s->mb_x + s->mb_y * wrap;
3016 s->dc_val[2][xy] = 1024;
3018 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
3019 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
3021 s->mbintra_table[xy]= 0;
3024 /* generic function called after a macroblock has been parsed by the
3025 decoder or after it has been encoded by the encoder.
3027 Important variables used:
3028 s->mb_intra : true if intra macroblock
3029 s->mv_dir : motion vector direction
3030 s->mv_type : motion vector type
3031 s->mv : motion vector
3032 s->interlaced_dct : true if interlaced dct used (mpeg2)
3034 static av_always_inline
3035 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
3036 int lowres_flag, int is_mpeg12)
3038 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
3041 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
3042 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
3046 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
3047 /* print DCT coefficients */
3049 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
3051 for(j=0; j<64; j++){
3052 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
3053 block[i][s->idsp.idct_permutation[j]]);
3055 av_log(s->avctx, AV_LOG_DEBUG, "\n");
3059 s->current_picture.qscale_table[mb_xy] = s->qscale;
3061 /* update DC predictors for P macroblocks */
3063 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
3064 if(s->mbintra_table[mb_xy])
3065 ff_clean_intra_table_entries(s);
3069 s->last_dc[2] = 128 << s->intra_dc_precision;
3072 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
3073 s->mbintra_table[mb_xy]=1;
3075 if ( (s->flags&CODEC_FLAG_PSNR)
3076 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
3077 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
3078 uint8_t *dest_y, *dest_cb, *dest_cr;
3079 int dct_linesize, dct_offset;
3080 op_pixels_func (*op_pix)[4];
3081 qpel_mc_func (*op_qpix)[16];
3082 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3083 const int uvlinesize = s->current_picture.f->linesize[1];
3084 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
3085 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
3087 /* avoid copy if macroblock skipped in last frame too */
3088 /* skip only during decoding as we might trash the buffers during encoding a bit */
3090 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
3092 if (s->mb_skipped) {
3094 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
3096 } else if(!s->current_picture.reference) {
3099 *mbskip_ptr = 0; /* not skipped */
3103 dct_linesize = linesize << s->interlaced_dct;
3104 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
3108 dest_cb= s->dest[1];
3109 dest_cr= s->dest[2];
3111 dest_y = s->b_scratchpad;
3112 dest_cb= s->b_scratchpad+16*linesize;
3113 dest_cr= s->b_scratchpad+32*linesize;
3117 /* motion handling */
3118 /* decoding or more than one mb_type (MC was already done otherwise) */
3121 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
3122 if (s->mv_dir & MV_DIR_FORWARD) {
3123 ff_thread_await_progress(&s->last_picture_ptr->tf,
3124 ff_mpv_lowest_referenced_row(s, 0),
3127 if (s->mv_dir & MV_DIR_BACKWARD) {
3128 ff_thread_await_progress(&s->next_picture_ptr->tf,
3129 ff_mpv_lowest_referenced_row(s, 1),
3135 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
3137 if (s->mv_dir & MV_DIR_FORWARD) {
3138 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3139 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
3141 if (s->mv_dir & MV_DIR_BACKWARD) {
3142 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3145 op_qpix = s->me.qpel_put;
3146 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3147 op_pix = s->hdsp.put_pixels_tab;
3149 op_pix = s->hdsp.put_no_rnd_pixels_tab;
3151 if (s->mv_dir & MV_DIR_FORWARD) {
3152 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3153 op_pix = s->hdsp.avg_pixels_tab;
3154 op_qpix= s->me.qpel_avg;
3156 if (s->mv_dir & MV_DIR_BACKWARD) {
3157 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3162 /* skip dequant / idct if we are really late ;) */
3163 if(s->avctx->skip_idct){
3164 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
3165 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
3166 || s->avctx->skip_idct >= AVDISCARD_ALL)
3170 /* add dct residue */
3171 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3172 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3173 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3174 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3175 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3176 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3178 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3179 if (s->chroma_y_shift){
3180 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3181 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3185 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3186 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3187 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3188 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3191 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3192 add_dct(s, block[0], 0, dest_y , dct_linesize);
3193 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3194 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3195 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3197 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3198 if(s->chroma_y_shift){//Chroma420
3199 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3200 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3203 dct_linesize = uvlinesize << s->interlaced_dct;
3204 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3206 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3207 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3208 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3209 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3210 if(!s->chroma_x_shift){//Chroma444
3211 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3212 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3213 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3214 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3219 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3220 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3223 /* dct only in intra block */
3224 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3225 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3226 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3227 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3228 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3230 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3231 if(s->chroma_y_shift){
3232 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3233 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3237 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3238 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3239 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3240 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3244 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3245 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3246 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3247 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3249 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3250 if(s->chroma_y_shift){
3251 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3252 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3255 dct_linesize = uvlinesize << s->interlaced_dct;
3256 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3258 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3259 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3260 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3261 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3262 if(!s->chroma_x_shift){//Chroma444
3263 s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3264 s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3265 s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3266 s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3274 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3275 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3276 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3281 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
3284 if(s->out_format == FMT_MPEG1) {
3285 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
3286 else mpv_decode_mb_internal(s, block, 0, 1);
3289 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
3290 else mpv_decode_mb_internal(s, block, 0, 0);
3293 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3295 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3296 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3297 s->first_field, s->low_delay);
3300 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3301 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3302 const int uvlinesize = s->current_picture.f->linesize[1];
3303 const int mb_size= 4 - s->avctx->lowres;
3305 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3306 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3307 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3308 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3309 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3310 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3311 //block_index is not used by mpeg2, so it is not affected by chroma_format
3313 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3314 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3315 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3317 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3319 if(s->picture_structure==PICT_FRAME){
3320 s->dest[0] += s->mb_y * linesize << mb_size;
3321 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3322 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3324 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3325 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3326 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3327 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3333 * Permute an 8x8 block.
3334 * @param block the block which will be permuted according to the given permutation vector
3335 * @param permutation the permutation vector
3336 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3337 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3338 * (inverse) permutated to scantable order!
3340 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3346 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3348 for(i=0; i<=last; i++){
3349 const int j= scantable[i];
3354 for(i=0; i<=last; i++){
3355 const int j= scantable[i];
3356 const int perm_j= permutation[j];
3357 block[perm_j]= temp[j];
3361 void ff_mpeg_flush(AVCodecContext *avctx){
3363 MpegEncContext *s = avctx->priv_data;
3365 if (!s || !s->picture)
3368 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3369 ff_mpeg_unref_picture(s, &s->picture[i]);
3370 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3372 ff_mpeg_unref_picture(s, &s->current_picture);
3373 ff_mpeg_unref_picture(s, &s->last_picture);
3374 ff_mpeg_unref_picture(s, &s->next_picture);
3376 s->mb_x= s->mb_y= 0;
3379 s->parse_context.state= -1;
3380 s->parse_context.frame_start_found= 0;
3381 s->parse_context.overread= 0;
3382 s->parse_context.overread_index= 0;
3383 s->parse_context.index= 0;
3384 s->parse_context.last_index= 0;
3385 s->bitstream_buffer_size=0;
3390 * set qscale and update qscale dependent variables.
3392 void ff_set_qscale(MpegEncContext * s, int qscale)
3396 else if (qscale > 31)
3400 s->chroma_qscale= s->chroma_qscale_table[qscale];
3402 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3403 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3406 void ff_mpv_report_decode_progress(MpegEncContext *s)
3408 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3409 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);