2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/motion_vector.h"
35 #include "libavutil/timer.h"
38 #include "h264chroma.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
50 static const uint8_t ff_default_chroma_qscale_table[32] = {
51 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
53 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
56 const uint8_t ff_mpeg1_dc_scale_table[128] = {
57 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
68 static const uint8_t mpeg2_dc_scale_table1[128] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
80 static const uint8_t mpeg2_dc_scale_table2[128] = {
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 static const uint8_t mpeg2_dc_scale_table3[128] = {
93 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
105 ff_mpeg1_dc_scale_table,
106 mpeg2_dc_scale_table1,
107 mpeg2_dc_scale_table2,
108 mpeg2_dc_scale_table3,
111 const uint8_t ff_alternate_horizontal_scan[64] = {
112 0, 1, 2, 3, 8, 9, 16, 17,
113 10, 11, 4, 5, 6, 7, 15, 14,
114 13, 12, 19, 18, 24, 25, 32, 33,
115 26, 27, 20, 21, 22, 23, 28, 29,
116 30, 31, 34, 35, 40, 41, 48, 49,
117 42, 43, 36, 37, 38, 39, 44, 45,
118 46, 47, 50, 51, 56, 57, 58, 59,
119 52, 53, 54, 55, 60, 61, 62, 63,
122 const uint8_t ff_alternate_vertical_scan[64] = {
123 0, 8, 16, 24, 1, 9, 2, 10,
124 17, 25, 32, 40, 48, 56, 57, 49,
125 41, 33, 26, 18, 3, 11, 4, 12,
126 19, 27, 34, 42, 50, 58, 35, 43,
127 51, 59, 20, 28, 5, 13, 6, 14,
128 21, 29, 36, 44, 52, 60, 37, 45,
129 53, 61, 22, 30, 7, 15, 23, 31,
130 38, 46, 54, 62, 39, 47, 55, 63,
133 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
134 int16_t *block, int n, int qscale)
136 int i, level, nCoeffs;
137 const uint16_t *quant_matrix;
139 nCoeffs= s->block_last_index[n];
141 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
142 /* XXX: only mpeg1 */
143 quant_matrix = s->intra_matrix;
144 for(i=1;i<=nCoeffs;i++) {
145 int j= s->intra_scantable.permutated[i];
150 level = (int)(level * qscale * quant_matrix[j]) >> 3;
151 level = (level - 1) | 1;
154 level = (int)(level * qscale * quant_matrix[j]) >> 3;
155 level = (level - 1) | 1;
162 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
163 int16_t *block, int n, int qscale)
165 int i, level, nCoeffs;
166 const uint16_t *quant_matrix;
168 nCoeffs= s->block_last_index[n];
170 quant_matrix = s->inter_matrix;
171 for(i=0; i<=nCoeffs; i++) {
172 int j= s->intra_scantable.permutated[i];
177 level = (((level << 1) + 1) * qscale *
178 ((int) (quant_matrix[j]))) >> 4;
179 level = (level - 1) | 1;
182 level = (((level << 1) + 1) * qscale *
183 ((int) (quant_matrix[j]))) >> 4;
184 level = (level - 1) | 1;
191 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
192 int16_t *block, int n, int qscale)
194 int i, level, nCoeffs;
195 const uint16_t *quant_matrix;
197 if(s->alternate_scan) nCoeffs= 63;
198 else nCoeffs= s->block_last_index[n];
200 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
201 quant_matrix = s->intra_matrix;
202 for(i=1;i<=nCoeffs;i++) {
203 int j= s->intra_scantable.permutated[i];
208 level = (int)(level * qscale * quant_matrix[j]) >> 3;
211 level = (int)(level * qscale * quant_matrix[j]) >> 3;
218 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
219 int16_t *block, int n, int qscale)
221 int i, level, nCoeffs;
222 const uint16_t *quant_matrix;
225 if(s->alternate_scan) nCoeffs= 63;
226 else nCoeffs= s->block_last_index[n];
228 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
230 quant_matrix = s->intra_matrix;
231 for(i=1;i<=nCoeffs;i++) {
232 int j= s->intra_scantable.permutated[i];
237 level = (int)(level * qscale * quant_matrix[j]) >> 3;
240 level = (int)(level * qscale * quant_matrix[j]) >> 3;
249 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
250 int16_t *block, int n, int qscale)
252 int i, level, nCoeffs;
253 const uint16_t *quant_matrix;
256 if(s->alternate_scan) nCoeffs= 63;
257 else nCoeffs= s->block_last_index[n];
259 quant_matrix = s->inter_matrix;
260 for(i=0; i<=nCoeffs; i++) {
261 int j= s->intra_scantable.permutated[i];
266 level = (((level << 1) + 1) * qscale *
267 ((int) (quant_matrix[j]))) >> 4;
270 level = (((level << 1) + 1) * qscale *
271 ((int) (quant_matrix[j]))) >> 4;
280 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
281 int16_t *block, int n, int qscale)
283 int i, level, qmul, qadd;
286 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
291 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
292 qadd = (qscale - 1) | 1;
299 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
301 for(i=1; i<=nCoeffs; i++) {
305 level = level * qmul - qadd;
307 level = level * qmul + qadd;
314 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
315 int16_t *block, int n, int qscale)
317 int i, level, qmul, qadd;
320 av_assert2(s->block_last_index[n]>=0);
322 qadd = (qscale - 1) | 1;
325 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
327 for(i=0; i<=nCoeffs; i++) {
331 level = level * qmul - qadd;
333 level = level * qmul + qadd;
340 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
342 int mb_x, int mb_y, int mb_intra, int mb_skipped)
344 MpegEncContext *s = opaque;
347 s->mv_type = mv_type;
348 s->mb_intra = mb_intra;
349 s->mb_skipped = mb_skipped;
352 memcpy(s->mv, mv, sizeof(*mv));
354 ff_init_block_index(s);
355 ff_update_block_index(s);
357 s->bdsp.clear_blocks(s->block[0]);
359 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
360 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
364 av_log(s->avctx, AV_LOG_DEBUG,
365 "Interlaced error concealment is not fully implemented\n");
366 ff_mpv_decode_mb(s, s->block);
369 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
372 memset(dst + h*linesize, 128, 16);
375 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
378 memset(dst + h*linesize, 128, 8);
381 /* init common dct for both encoder and decoder */
382 static av_cold int dct_init(MpegEncContext *s)
384 ff_blockdsp_init(&s->bdsp, s->avctx);
385 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
386 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
387 ff_mpegvideodsp_init(&s->mdsp);
388 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
390 if (s->avctx->debug & FF_DEBUG_NOMC) {
392 for (i=0; i<4; i++) {
393 s->hdsp.avg_pixels_tab[0][i] = gray16;
394 s->hdsp.put_pixels_tab[0][i] = gray16;
395 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
397 s->hdsp.avg_pixels_tab[1][i] = gray8;
398 s->hdsp.put_pixels_tab[1][i] = gray8;
399 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
403 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
404 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
405 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
406 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
407 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
408 if (s->flags & CODEC_FLAG_BITEXACT)
409 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
410 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
412 if (HAVE_INTRINSICS_NEON)
413 ff_mpv_common_init_neon(s);
416 ff_mpv_common_init_axp(s);
418 ff_mpv_common_init_arm(s);
420 ff_mpv_common_init_ppc(s);
422 ff_mpv_common_init_x86(s);
427 av_cold void ff_mpv_idct_init(MpegEncContext *s)
429 ff_idctdsp_init(&s->idsp, s->avctx);
431 /* load & permutate scantables
432 * note: only wmv uses different ones
434 if (s->alternate_scan) {
435 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
436 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
438 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
439 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
441 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
442 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
445 static int frame_size_alloc(MpegEncContext *s, int linesize)
447 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
449 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
453 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
454 return AVERROR_PATCHWELCOME;
457 // edge emu needs blocksize + filter length - 1
458 // (= 17x17 for halfpel / 21x21 for h264)
459 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
460 // at uvlinesize. It supports only YUV420 so 24x24 is enough
461 // linesize * interlaced * MBsize
462 // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
463 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size, 4 * 68,
466 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size, 4 * 16 * 2,
468 s->me.temp = s->me.scratchpad;
469 s->rd_scratchpad = s->me.scratchpad;
470 s->b_scratchpad = s->me.scratchpad;
471 s->obmc_scratchpad = s->me.scratchpad + 16;
475 av_freep(&s->edge_emu_buffer);
476 return AVERROR(ENOMEM);
480 * Allocate a frame buffer
482 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
484 int edges_needed = av_codec_is_encoder(s->avctx->codec);
488 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
489 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
490 s->codec_id != AV_CODEC_ID_MSS2) {
492 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
493 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
496 r = ff_thread_get_buffer(s->avctx, &pic->tf,
497 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
499 pic->f->width = s->avctx->width;
500 pic->f->height = s->avctx->height;
501 pic->f->format = s->avctx->pix_fmt;
502 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
505 if (r < 0 || !pic->f->buf[0]) {
506 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
513 for (i = 0; pic->f->data[i]; i++) {
514 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
515 pic->f->linesize[i] +
516 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
517 pic->f->data[i] += offset;
519 pic->f->width = s->avctx->width;
520 pic->f->height = s->avctx->height;
523 if (s->avctx->hwaccel) {
524 assert(!pic->hwaccel_picture_private);
525 if (s->avctx->hwaccel->frame_priv_data_size) {
526 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
527 if (!pic->hwaccel_priv_buf) {
528 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
531 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
535 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
536 s->uvlinesize != pic->f->linesize[1])) {
537 av_log(s->avctx, AV_LOG_ERROR,
538 "get_buffer() failed (stride changed)\n");
539 ff_mpeg_unref_picture(s, pic);
543 if (pic->f->linesize[1] != pic->f->linesize[2]) {
544 av_log(s->avctx, AV_LOG_ERROR,
545 "get_buffer() failed (uv stride mismatch)\n");
546 ff_mpeg_unref_picture(s, pic);
550 if (!s->edge_emu_buffer &&
551 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
552 av_log(s->avctx, AV_LOG_ERROR,
553 "get_buffer() failed to allocate context scratch buffers.\n");
554 ff_mpeg_unref_picture(s, pic);
561 void ff_free_picture_tables(Picture *pic)
565 pic->alloc_mb_width =
566 pic->alloc_mb_height = 0;
568 av_buffer_unref(&pic->mb_var_buf);
569 av_buffer_unref(&pic->mc_mb_var_buf);
570 av_buffer_unref(&pic->mb_mean_buf);
571 av_buffer_unref(&pic->mbskip_table_buf);
572 av_buffer_unref(&pic->qscale_table_buf);
573 av_buffer_unref(&pic->mb_type_buf);
575 for (i = 0; i < 2; i++) {
576 av_buffer_unref(&pic->motion_val_buf[i]);
577 av_buffer_unref(&pic->ref_index_buf[i]);
581 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
583 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
584 const int mb_array_size = s->mb_stride * s->mb_height;
585 const int b8_array_size = s->b8_stride * s->mb_height * 2;
589 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
590 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
591 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
593 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
594 return AVERROR(ENOMEM);
597 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
598 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
599 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
600 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
601 return AVERROR(ENOMEM);
604 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv ||
605 (s->avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) {
606 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
607 int ref_index_size = 4 * mb_array_size;
609 for (i = 0; mv_size && i < 2; i++) {
610 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
611 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
612 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
613 return AVERROR(ENOMEM);
617 pic->alloc_mb_width = s->mb_width;
618 pic->alloc_mb_height = s->mb_height;
623 static int make_tables_writable(Picture *pic)
626 #define MAKE_WRITABLE(table) \
629 (ret = av_buffer_make_writable(&pic->table)) < 0)\
633 MAKE_WRITABLE(mb_var_buf);
634 MAKE_WRITABLE(mc_mb_var_buf);
635 MAKE_WRITABLE(mb_mean_buf);
636 MAKE_WRITABLE(mbskip_table_buf);
637 MAKE_WRITABLE(qscale_table_buf);
638 MAKE_WRITABLE(mb_type_buf);
640 for (i = 0; i < 2; i++) {
641 MAKE_WRITABLE(motion_val_buf[i]);
642 MAKE_WRITABLE(ref_index_buf[i]);
649 * Allocate a Picture.
650 * The pixels are allocated/set by calling get_buffer() if shared = 0
652 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
656 if (pic->qscale_table_buf)
657 if ( pic->alloc_mb_width != s->mb_width
658 || pic->alloc_mb_height != s->mb_height)
659 ff_free_picture_tables(pic);
662 av_assert0(pic->f->data[0]);
665 av_assert0(!pic->f->buf[0]);
667 if (alloc_frame_buffer(s, pic) < 0)
670 s->linesize = pic->f->linesize[0];
671 s->uvlinesize = pic->f->linesize[1];
674 if (!pic->qscale_table_buf)
675 ret = alloc_picture_tables(s, pic);
677 ret = make_tables_writable(pic);
682 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
683 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
684 pic->mb_mean = pic->mb_mean_buf->data;
687 pic->mbskip_table = pic->mbskip_table_buf->data;
688 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
689 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
691 if (pic->motion_val_buf[0]) {
692 for (i = 0; i < 2; i++) {
693 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
694 pic->ref_index[i] = pic->ref_index_buf[i]->data;
700 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
701 ff_mpeg_unref_picture(s, pic);
702 ff_free_picture_tables(pic);
703 return AVERROR(ENOMEM);
707 * Deallocate a picture.
709 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
711 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
714 /* WM Image / Screen codecs allocate internal buffers with different
715 * dimensions / colorspaces; ignore user-defined callbacks for these. */
716 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
717 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
718 s->codec_id != AV_CODEC_ID_MSS2)
719 ff_thread_release_buffer(s->avctx, &pic->tf);
721 av_frame_unref(pic->f);
723 av_buffer_unref(&pic->hwaccel_priv_buf);
725 if (pic->needs_realloc)
726 ff_free_picture_tables(pic);
728 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
731 static int update_picture_tables(Picture *dst, Picture *src)
735 #define UPDATE_TABLE(table)\
738 (!dst->table || dst->table->buffer != src->table->buffer)) {\
739 av_buffer_unref(&dst->table);\
740 dst->table = av_buffer_ref(src->table);\
742 ff_free_picture_tables(dst);\
743 return AVERROR(ENOMEM);\
748 UPDATE_TABLE(mb_var_buf);
749 UPDATE_TABLE(mc_mb_var_buf);
750 UPDATE_TABLE(mb_mean_buf);
751 UPDATE_TABLE(mbskip_table_buf);
752 UPDATE_TABLE(qscale_table_buf);
753 UPDATE_TABLE(mb_type_buf);
754 for (i = 0; i < 2; i++) {
755 UPDATE_TABLE(motion_val_buf[i]);
756 UPDATE_TABLE(ref_index_buf[i]);
759 dst->mb_var = src->mb_var;
760 dst->mc_mb_var = src->mc_mb_var;
761 dst->mb_mean = src->mb_mean;
762 dst->mbskip_table = src->mbskip_table;
763 dst->qscale_table = src->qscale_table;
764 dst->mb_type = src->mb_type;
765 for (i = 0; i < 2; i++) {
766 dst->motion_val[i] = src->motion_val[i];
767 dst->ref_index[i] = src->ref_index[i];
770 dst->alloc_mb_width = src->alloc_mb_width;
771 dst->alloc_mb_height = src->alloc_mb_height;
776 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
780 av_assert0(!dst->f->buf[0]);
781 av_assert0(src->f->buf[0]);
785 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
789 ret = update_picture_tables(dst, src);
793 if (src->hwaccel_picture_private) {
794 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
795 if (!dst->hwaccel_priv_buf)
797 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
800 dst->field_picture = src->field_picture;
801 dst->mb_var_sum = src->mb_var_sum;
802 dst->mc_mb_var_sum = src->mc_mb_var_sum;
803 dst->b_frame_score = src->b_frame_score;
804 dst->needs_realloc = src->needs_realloc;
805 dst->reference = src->reference;
806 dst->shared = src->shared;
810 ff_mpeg_unref_picture(s, dst);
814 static void exchange_uv(MpegEncContext *s)
819 s->pblocks[4] = s->pblocks[5];
823 static int init_duplicate_context(MpegEncContext *s)
825 int y_size = s->b8_stride * (2 * s->mb_height + 1);
826 int c_size = s->mb_stride * (s->mb_height + 1);
827 int yc_size = y_size + 2 * c_size;
830 if (s->mb_height & 1)
831 yc_size += 2*s->b8_stride + 2*s->mb_stride;
838 s->obmc_scratchpad = NULL;
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
842 ME_MAP_SIZE * sizeof(uint32_t), fail)
843 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
844 ME_MAP_SIZE * sizeof(uint32_t), fail)
845 if (s->avctx->noise_reduction) {
846 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
847 2 * 64 * sizeof(int), fail)
850 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
851 s->block = s->blocks[0];
853 for (i = 0; i < 12; i++) {
854 s->pblocks[i] = &s->block[i];
856 if (s->avctx->codec_tag == AV_RL32("VCR2"))
859 if (s->out_format == FMT_H263) {
861 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
862 yc_size * sizeof(int16_t) * 16, fail);
863 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
864 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
865 s->ac_val[2] = s->ac_val[1] + c_size;
870 return -1; // free() through ff_mpv_common_end()
873 static void free_duplicate_context(MpegEncContext *s)
878 av_freep(&s->edge_emu_buffer);
879 av_freep(&s->me.scratchpad);
883 s->obmc_scratchpad = NULL;
885 av_freep(&s->dct_error_sum);
886 av_freep(&s->me.map);
887 av_freep(&s->me.score_map);
888 av_freep(&s->blocks);
889 av_freep(&s->ac_val_base);
893 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
895 #define COPY(a) bak->a = src->a
896 COPY(edge_emu_buffer);
901 COPY(obmc_scratchpad);
908 COPY(me.map_generation);
920 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
924 // FIXME copy only needed parts
926 backup_duplicate_context(&bak, dst);
927 memcpy(dst, src, sizeof(MpegEncContext));
928 backup_duplicate_context(dst, &bak);
929 for (i = 0; i < 12; i++) {
930 dst->pblocks[i] = &dst->block[i];
932 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
934 if (!dst->edge_emu_buffer &&
935 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
936 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
937 "scratch buffers.\n");
940 // STOP_TIMER("update_duplicate_context")
941 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
945 int ff_mpeg_update_thread_context(AVCodecContext *dst,
946 const AVCodecContext *src)
949 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
956 // FIXME can parameters change on I-frames?
957 // in that case dst may need a reinit
958 if (!s->context_initialized) {
960 memcpy(s, s1, sizeof(MpegEncContext));
963 s->bitstream_buffer = NULL;
964 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
966 if (s1->context_initialized){
967 // s->picture_range_start += MAX_PICTURE_COUNT;
968 // s->picture_range_end += MAX_PICTURE_COUNT;
970 if((err = ff_mpv_common_init(s)) < 0){
971 memset(s, 0, sizeof(MpegEncContext));
978 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
979 s->context_reinit = 0;
980 s->height = s1->height;
981 s->width = s1->width;
982 if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
986 s->avctx->coded_height = s1->avctx->coded_height;
987 s->avctx->coded_width = s1->avctx->coded_width;
988 s->avctx->width = s1->avctx->width;
989 s->avctx->height = s1->avctx->height;
991 s->coded_picture_number = s1->coded_picture_number;
992 s->picture_number = s1->picture_number;
994 av_assert0(!s->picture || s->picture != s1->picture);
996 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
997 ff_mpeg_unref_picture(s, &s->picture[i]);
998 if (s1->picture[i].f->buf[0] &&
999 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
1003 #define UPDATE_PICTURE(pic)\
1005 ff_mpeg_unref_picture(s, &s->pic);\
1006 if (s1->pic.f && s1->pic.f->buf[0])\
1007 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
1009 ret = update_picture_tables(&s->pic, &s1->pic);\
1014 UPDATE_PICTURE(current_picture);
1015 UPDATE_PICTURE(last_picture);
1016 UPDATE_PICTURE(next_picture);
1018 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
1019 ((pic && pic >= old_ctx->picture && \
1020 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
1021 &new_ctx->picture[pic - old_ctx->picture] : NULL)
1023 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1024 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1025 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1027 // Error/bug resilience
1028 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1029 s->workaround_bugs = s1->workaround_bugs;
1030 s->padding_bug_score = s1->padding_bug_score;
1032 // MPEG4 timing info
1033 memcpy(&s->last_time_base, &s1->last_time_base,
1034 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1035 (char *) &s1->last_time_base);
1038 s->max_b_frames = s1->max_b_frames;
1039 s->low_delay = s1->low_delay;
1040 s->droppable = s1->droppable;
1042 // DivX handling (doesn't work)
1043 s->divx_packed = s1->divx_packed;
1045 if (s1->bitstream_buffer) {
1046 if (s1->bitstream_buffer_size +
1047 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1048 av_fast_malloc(&s->bitstream_buffer,
1049 &s->allocated_bitstream_buffer_size,
1050 s1->allocated_bitstream_buffer_size);
1051 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1052 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1053 s1->bitstream_buffer_size);
1054 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1055 FF_INPUT_BUFFER_PADDING_SIZE);
1058 // linesize dependend scratch buffer allocation
1059 if (!s->edge_emu_buffer)
1061 if (frame_size_alloc(s, s1->linesize) < 0) {
1062 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1063 "scratch buffers.\n");
1064 return AVERROR(ENOMEM);
1067 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1068 "be allocated due to unknown size.\n");
1071 // MPEG2/interlacing info
1072 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1073 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1075 if (!s1->first_field) {
1076 s->last_pict_type = s1->pict_type;
1077 if (s1->current_picture_ptr)
1078 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1085 * Set the given MpegEncContext to common defaults
1086 * (same for encoding and decoding).
1087 * The changed fields will not depend upon the
1088 * prior state of the MpegEncContext.
1090 void ff_mpv_common_defaults(MpegEncContext *s)
1092 s->y_dc_scale_table =
1093 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1094 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1095 s->progressive_frame = 1;
1096 s->progressive_sequence = 1;
1097 s->picture_structure = PICT_FRAME;
1099 s->coded_picture_number = 0;
1100 s->picture_number = 0;
1105 s->slice_context_count = 1;
1109 * Set the given MpegEncContext to defaults for decoding.
1110 * the changed fields will not depend upon
1111 * the prior state of the MpegEncContext.
1113 void ff_mpv_decode_defaults(MpegEncContext *s)
1115 ff_mpv_common_defaults(s);
1118 void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
1121 s->width = avctx->coded_width;
1122 s->height = avctx->coded_height;
1123 s->codec_id = avctx->codec->id;
1124 s->workaround_bugs = avctx->workaround_bugs;
1125 s->flags = avctx->flags;
1126 s->flags2 = avctx->flags2;
1128 /* convert fourcc to upper case */
1129 s->codec_tag = avpriv_toupper4(avctx->codec_tag);
1132 static int init_er(MpegEncContext *s)
1134 ERContext *er = &s->er;
1135 int mb_array_size = s->mb_height * s->mb_stride;
1138 er->avctx = s->avctx;
1140 er->mb_index2xy = s->mb_index2xy;
1141 er->mb_num = s->mb_num;
1142 er->mb_width = s->mb_width;
1143 er->mb_height = s->mb_height;
1144 er->mb_stride = s->mb_stride;
1145 er->b8_stride = s->b8_stride;
1147 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1148 er->error_status_table = av_mallocz(mb_array_size);
1149 if (!er->er_temp_buffer || !er->error_status_table)
1152 er->mbskip_table = s->mbskip_table;
1153 er->mbintra_table = s->mbintra_table;
1155 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1156 er->dc_val[i] = s->dc_val[i];
1158 er->decode_mb = mpeg_er_decode_mb;
1163 av_freep(&er->er_temp_buffer);
1164 av_freep(&er->error_status_table);
1165 return AVERROR(ENOMEM);
1169 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1171 static int init_context_frame(MpegEncContext *s)
1173 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1175 s->mb_width = (s->width + 15) / 16;
1176 s->mb_stride = s->mb_width + 1;
1177 s->b8_stride = s->mb_width * 2 + 1;
1178 mb_array_size = s->mb_height * s->mb_stride;
1179 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1181 /* set default edge pos, will be overridden
1182 * in decode_header if needed */
1183 s->h_edge_pos = s->mb_width * 16;
1184 s->v_edge_pos = s->mb_height * 16;
1186 s->mb_num = s->mb_width * s->mb_height;
1191 s->block_wrap[3] = s->b8_stride;
1193 s->block_wrap[5] = s->mb_stride;
1195 y_size = s->b8_stride * (2 * s->mb_height + 1);
1196 c_size = s->mb_stride * (s->mb_height + 1);
1197 yc_size = y_size + 2 * c_size;
1199 if (s->mb_height & 1)
1200 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1202 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1203 for (y = 0; y < s->mb_height; y++)
1204 for (x = 0; x < s->mb_width; x++)
1205 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1207 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1210 /* Allocate MV tables */
1211 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1212 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1213 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1214 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1215 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1216 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1217 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1218 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1219 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1220 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1221 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1222 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1224 /* Allocate MB type table */
1225 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1227 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1229 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1230 mb_array_size * sizeof(float), fail);
1231 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1232 mb_array_size * sizeof(float), fail);
1236 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1237 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1238 /* interlaced direct mode decoding tables */
1239 for (i = 0; i < 2; i++) {
1241 for (j = 0; j < 2; j++) {
1242 for (k = 0; k < 2; k++) {
1243 FF_ALLOCZ_OR_GOTO(s->avctx,
1244 s->b_field_mv_table_base[i][j][k],
1245 mv_table_size * 2 * sizeof(int16_t),
1247 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1250 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1251 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1252 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1254 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1257 if (s->out_format == FMT_H263) {
1259 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1260 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1262 /* cbp, ac_pred, pred_dir */
1263 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1264 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1267 if (s->h263_pred || s->h263_plus || !s->encoding) {
1269 // MN: we need these for error resilience of intra-frames
1270 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1271 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1272 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1273 s->dc_val[2] = s->dc_val[1] + c_size;
1274 for (i = 0; i < yc_size; i++)
1275 s->dc_val_base[i] = 1024;
1278 /* which mb is a intra block */
1279 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1280 memset(s->mbintra_table, 1, mb_array_size);
1282 /* init macroblock skip table */
1283 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1284 // Note the + 1 is for a quicker mpeg4 slice_end detection
1288 return AVERROR(ENOMEM);
1292 * init common structure for both encoder and decoder.
1293 * this assumes that some variables like width/height are already set
1295 av_cold int ff_mpv_common_init(MpegEncContext *s)
1298 int nb_slices = (HAVE_THREADS &&
1299 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1300 s->avctx->thread_count : 1;
1302 if (s->encoding && s->avctx->slices)
1303 nb_slices = s->avctx->slices;
1305 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1306 s->mb_height = (s->height + 31) / 32 * 2;
1308 s->mb_height = (s->height + 15) / 16;
1310 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1311 av_log(s->avctx, AV_LOG_ERROR,
1312 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1316 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1319 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1321 max_slices = MAX_THREADS;
1322 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1323 " reducing to %d\n", nb_slices, max_slices);
1324 nb_slices = max_slices;
1327 if ((s->width || s->height) &&
1328 av_image_check_size(s->width, s->height, 0, s->avctx))
1333 s->flags = s->avctx->flags;
1334 s->flags2 = s->avctx->flags2;
1336 /* set chroma shifts */
1337 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1339 &s->chroma_y_shift);
1342 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1343 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1344 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1345 s->picture[i].f = av_frame_alloc();
1346 if (!s->picture[i].f)
1349 memset(&s->next_picture, 0, sizeof(s->next_picture));
1350 memset(&s->last_picture, 0, sizeof(s->last_picture));
1351 memset(&s->current_picture, 0, sizeof(s->current_picture));
1352 memset(&s->new_picture, 0, sizeof(s->new_picture));
1353 s->next_picture.f = av_frame_alloc();
1354 if (!s->next_picture.f)
1356 s->last_picture.f = av_frame_alloc();
1357 if (!s->last_picture.f)
1359 s->current_picture.f = av_frame_alloc();
1360 if (!s->current_picture.f)
1362 s->new_picture.f = av_frame_alloc();
1363 if (!s->new_picture.f)
1366 if (init_context_frame(s))
1369 s->parse_context.state = -1;
1371 s->context_initialized = 1;
1372 s->thread_context[0] = s;
1374 // if (s->width && s->height) {
1375 if (nb_slices > 1) {
1376 for (i = 1; i < nb_slices; i++) {
1377 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1378 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1381 for (i = 0; i < nb_slices; i++) {
1382 if (init_duplicate_context(s->thread_context[i]) < 0)
1384 s->thread_context[i]->start_mb_y =
1385 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1386 s->thread_context[i]->end_mb_y =
1387 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1390 if (init_duplicate_context(s) < 0)
1393 s->end_mb_y = s->mb_height;
1395 s->slice_context_count = nb_slices;
1400 ff_mpv_common_end(s);
1405 * Frees and resets MpegEncContext fields depending on the resolution.
1406 * Is used during resolution changes to avoid a full reinitialization of the
1409 static void free_context_frame(MpegEncContext *s)
1413 av_freep(&s->mb_type);
1414 av_freep(&s->p_mv_table_base);
1415 av_freep(&s->b_forw_mv_table_base);
1416 av_freep(&s->b_back_mv_table_base);
1417 av_freep(&s->b_bidir_forw_mv_table_base);
1418 av_freep(&s->b_bidir_back_mv_table_base);
1419 av_freep(&s->b_direct_mv_table_base);
1420 s->p_mv_table = NULL;
1421 s->b_forw_mv_table = NULL;
1422 s->b_back_mv_table = NULL;
1423 s->b_bidir_forw_mv_table = NULL;
1424 s->b_bidir_back_mv_table = NULL;
1425 s->b_direct_mv_table = NULL;
1426 for (i = 0; i < 2; i++) {
1427 for (j = 0; j < 2; j++) {
1428 for (k = 0; k < 2; k++) {
1429 av_freep(&s->b_field_mv_table_base[i][j][k]);
1430 s->b_field_mv_table[i][j][k] = NULL;
1432 av_freep(&s->b_field_select_table[i][j]);
1433 av_freep(&s->p_field_mv_table_base[i][j]);
1434 s->p_field_mv_table[i][j] = NULL;
1436 av_freep(&s->p_field_select_table[i]);
1439 av_freep(&s->dc_val_base);
1440 av_freep(&s->coded_block_base);
1441 av_freep(&s->mbintra_table);
1442 av_freep(&s->cbp_table);
1443 av_freep(&s->pred_dir_table);
1445 av_freep(&s->mbskip_table);
1447 av_freep(&s->er.error_status_table);
1448 av_freep(&s->er.er_temp_buffer);
1449 av_freep(&s->mb_index2xy);
1450 av_freep(&s->lambda_table);
1452 av_freep(&s->cplx_tab);
1453 av_freep(&s->bits_tab);
1455 s->linesize = s->uvlinesize = 0;
1458 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1462 if (!s->context_initialized)
1463 return AVERROR(EINVAL);
1465 if (s->slice_context_count > 1) {
1466 for (i = 0; i < s->slice_context_count; i++) {
1467 free_duplicate_context(s->thread_context[i]);
1469 for (i = 1; i < s->slice_context_count; i++) {
1470 av_freep(&s->thread_context[i]);
1473 free_duplicate_context(s);
1475 free_context_frame(s);
1478 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1479 s->picture[i].needs_realloc = 1;
1482 s->last_picture_ptr =
1483 s->next_picture_ptr =
1484 s->current_picture_ptr = NULL;
1487 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1488 s->mb_height = (s->height + 31) / 32 * 2;
1490 s->mb_height = (s->height + 15) / 16;
1492 if ((s->width || s->height) &&
1493 (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1496 if ((err = init_context_frame(s)))
1499 s->thread_context[0] = s;
1501 if (s->width && s->height) {
1502 int nb_slices = s->slice_context_count;
1503 if (nb_slices > 1) {
1504 for (i = 1; i < nb_slices; i++) {
1505 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1506 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1509 for (i = 0; i < nb_slices; i++) {
1510 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1512 s->thread_context[i]->start_mb_y =
1513 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1514 s->thread_context[i]->end_mb_y =
1515 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1518 err = init_duplicate_context(s);
1522 s->end_mb_y = s->mb_height;
1524 s->slice_context_count = nb_slices;
1529 ff_mpv_common_end(s);
1533 /* init common structure for both encoder and decoder */
1534 void ff_mpv_common_end(MpegEncContext *s)
1538 if (s->slice_context_count > 1) {
1539 for (i = 0; i < s->slice_context_count; i++) {
1540 free_duplicate_context(s->thread_context[i]);
1542 for (i = 1; i < s->slice_context_count; i++) {
1543 av_freep(&s->thread_context[i]);
1545 s->slice_context_count = 1;
1546 } else free_duplicate_context(s);
1548 av_freep(&s->parse_context.buffer);
1549 s->parse_context.buffer_size = 0;
1551 av_freep(&s->bitstream_buffer);
1552 s->allocated_bitstream_buffer_size = 0;
1555 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1556 ff_free_picture_tables(&s->picture[i]);
1557 ff_mpeg_unref_picture(s, &s->picture[i]);
1558 av_frame_free(&s->picture[i].f);
1561 av_freep(&s->picture);
1562 ff_free_picture_tables(&s->last_picture);
1563 ff_mpeg_unref_picture(s, &s->last_picture);
1564 av_frame_free(&s->last_picture.f);
1565 ff_free_picture_tables(&s->current_picture);
1566 ff_mpeg_unref_picture(s, &s->current_picture);
1567 av_frame_free(&s->current_picture.f);
1568 ff_free_picture_tables(&s->next_picture);
1569 ff_mpeg_unref_picture(s, &s->next_picture);
1570 av_frame_free(&s->next_picture.f);
1571 ff_free_picture_tables(&s->new_picture);
1572 ff_mpeg_unref_picture(s, &s->new_picture);
1573 av_frame_free(&s->new_picture.f);
1575 free_context_frame(s);
1577 s->context_initialized = 0;
1578 s->last_picture_ptr =
1579 s->next_picture_ptr =
1580 s->current_picture_ptr = NULL;
1581 s->linesize = s->uvlinesize = 0;
1584 av_cold void ff_init_rl(RLTable *rl,
1585 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1587 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1588 uint8_t index_run[MAX_RUN + 1];
1589 int last, run, level, start, end, i;
1591 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1592 if (static_store && rl->max_level[0])
1595 /* compute max_level[], max_run[] and index_run[] */
1596 for (last = 0; last < 2; last++) {
1605 memset(max_level, 0, MAX_RUN + 1);
1606 memset(max_run, 0, MAX_LEVEL + 1);
1607 memset(index_run, rl->n, MAX_RUN + 1);
1608 for (i = start; i < end; i++) {
1609 run = rl->table_run[i];
1610 level = rl->table_level[i];
1611 if (index_run[run] == rl->n)
1613 if (level > max_level[run])
1614 max_level[run] = level;
1615 if (run > max_run[level])
1616 max_run[level] = run;
1619 rl->max_level[last] = static_store[last];
1621 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1622 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1624 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1626 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1627 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1629 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1631 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1632 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1636 av_cold void ff_init_vlc_rl(RLTable *rl, unsigned static_size)
1639 VLC_TYPE table[1500][2] = {{0}};
1640 VLC vlc = { .table = table, .table_allocated = static_size };
1641 av_assert0(static_size <= FF_ARRAY_ELEMS(table));
1642 init_vlc(&vlc, 9, rl->n + 1, &rl->table_vlc[0][1], 4, 2, &rl->table_vlc[0][0], 4, 2, INIT_VLC_USE_NEW_STATIC);
1644 for (q = 0; q < 32; q++) {
1646 int qadd = (q - 1) | 1;
1652 for (i = 0; i < vlc.table_size; i++) {
1653 int code = vlc.table[i][0];
1654 int len = vlc.table[i][1];
1657 if (len == 0) { // illegal code
1660 } else if (len < 0) { // more bits needed
1664 if (code == rl->n) { // esc
1668 run = rl->table_run[code] + 1;
1669 level = rl->table_level[code] * qmul + qadd;
1670 if (code >= rl->last) run += 192;
1673 rl->rl_vlc[q][i].len = len;
1674 rl->rl_vlc[q][i].level = level;
1675 rl->rl_vlc[q][i].run = run;
1680 static void release_unused_pictures(MpegEncContext *s)
1684 /* release non reference frames */
1685 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1686 if (!s->picture[i].reference)
1687 ff_mpeg_unref_picture(s, &s->picture[i]);
1691 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1693 if (pic == s->last_picture_ptr)
1695 if (!pic->f->buf[0])
1697 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1702 static int find_unused_picture(MpegEncContext *s, int shared)
1707 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1708 if (!s->picture[i].f->buf[0] && &s->picture[i] != s->last_picture_ptr)
1712 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1713 if (pic_is_unused(s, &s->picture[i]))
1718 av_log(s->avctx, AV_LOG_FATAL,
1719 "Internal error, picture buffer overflow\n");
1720 /* We could return -1, but the codec would crash trying to draw into a
1721 * non-existing frame anyway. This is safer than waiting for a random crash.
1722 * Also the return of this is never useful, an encoder must only allocate
1723 * as much as allowed in the specification. This has no relationship to how
1724 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1725 * enough for such valid streams).
1726 * Plus, a decoder has to check stream validity and remove frames if too
1727 * many reference frames are around. Waiting for "OOM" is not correct at
1728 * all. Similarly, missing reference frames have to be replaced by
1729 * interpolated/MC frames, anything else is a bug in the codec ...
1735 int ff_find_unused_picture(MpegEncContext *s, int shared)
1737 int ret = find_unused_picture(s, shared);
1739 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1740 if (s->picture[ret].needs_realloc) {
1741 s->picture[ret].needs_realloc = 0;
1742 ff_free_picture_tables(&s->picture[ret]);
1743 ff_mpeg_unref_picture(s, &s->picture[ret]);
1749 static void gray_frame(AVFrame *frame)
1751 int i, h_chroma_shift, v_chroma_shift;
1753 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1755 for(i=0; i<frame->height; i++)
1756 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1757 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1758 memset(frame->data[1] + frame->linesize[1]*i,
1759 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1760 memset(frame->data[2] + frame->linesize[2]*i,
1761 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1766 * generic function called after decoding
1767 * the header and before a frame is decoded.
1769 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1775 if (!ff_thread_can_start_frame(avctx)) {
1776 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1780 /* mark & release old frames */
1781 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1782 s->last_picture_ptr != s->next_picture_ptr &&
1783 s->last_picture_ptr->f->buf[0]) {
1784 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1787 /* release forgotten pictures */
1788 /* if (mpeg124/h263) */
1789 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1790 if (&s->picture[i] != s->last_picture_ptr &&
1791 &s->picture[i] != s->next_picture_ptr &&
1792 s->picture[i].reference && !s->picture[i].needs_realloc) {
1793 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1794 av_log(avctx, AV_LOG_ERROR,
1795 "releasing zombie picture\n");
1796 ff_mpeg_unref_picture(s, &s->picture[i]);
1800 ff_mpeg_unref_picture(s, &s->current_picture);
1802 release_unused_pictures(s);
1804 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1805 // we already have a unused image
1806 // (maybe it was set before reading the header)
1807 pic = s->current_picture_ptr;
1809 i = ff_find_unused_picture(s, 0);
1811 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1814 pic = &s->picture[i];
1818 if (!s->droppable) {
1819 if (s->pict_type != AV_PICTURE_TYPE_B)
1823 pic->f->coded_picture_number = s->coded_picture_number++;
1825 if (ff_alloc_picture(s, pic, 0) < 0)
1828 s->current_picture_ptr = pic;
1829 // FIXME use only the vars from current_pic
1830 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1831 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1832 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1833 if (s->picture_structure != PICT_FRAME)
1834 s->current_picture_ptr->f->top_field_first =
1835 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1837 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1838 !s->progressive_sequence;
1839 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1841 s->current_picture_ptr->f->pict_type = s->pict_type;
1842 // if (s->flags && CODEC_FLAG_QSCALE)
1843 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1844 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1846 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1847 s->current_picture_ptr)) < 0)
1850 if (s->pict_type != AV_PICTURE_TYPE_B) {
1851 s->last_picture_ptr = s->next_picture_ptr;
1853 s->next_picture_ptr = s->current_picture_ptr;
1855 ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1856 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1857 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1858 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1859 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1860 s->pict_type, s->droppable);
1862 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1863 (s->pict_type != AV_PICTURE_TYPE_I ||
1864 s->picture_structure != PICT_FRAME)) {
1865 int h_chroma_shift, v_chroma_shift;
1866 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1867 &h_chroma_shift, &v_chroma_shift);
1868 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1869 av_log(avctx, AV_LOG_DEBUG,
1870 "allocating dummy last picture for B frame\n");
1871 else if (s->pict_type != AV_PICTURE_TYPE_I)
1872 av_log(avctx, AV_LOG_ERROR,
1873 "warning: first frame is no keyframe\n");
1874 else if (s->picture_structure != PICT_FRAME)
1875 av_log(avctx, AV_LOG_DEBUG,
1876 "allocate dummy last picture for field based first keyframe\n");
1878 /* Allocate a dummy frame */
1879 i = ff_find_unused_picture(s, 0);
1881 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1884 s->last_picture_ptr = &s->picture[i];
1886 s->last_picture_ptr->reference = 3;
1887 s->last_picture_ptr->f->key_frame = 0;
1888 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1890 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1891 s->last_picture_ptr = NULL;
1895 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1896 for(i=0; i<avctx->height; i++)
1897 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1898 0x80, avctx->width);
1899 if (s->last_picture_ptr->f->data[2]) {
1900 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1901 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1902 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1903 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1904 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1908 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1909 for(i=0; i<avctx->height; i++)
1910 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1914 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1915 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1917 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1918 s->pict_type == AV_PICTURE_TYPE_B) {
1919 /* Allocate a dummy frame */
1920 i = ff_find_unused_picture(s, 0);
1922 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1925 s->next_picture_ptr = &s->picture[i];
1927 s->next_picture_ptr->reference = 3;
1928 s->next_picture_ptr->f->key_frame = 0;
1929 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1931 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1932 s->next_picture_ptr = NULL;
1935 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1936 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1939 #if 0 // BUFREF-FIXME
1940 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1941 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1943 if (s->last_picture_ptr) {
1944 ff_mpeg_unref_picture(s, &s->last_picture);
1945 if (s->last_picture_ptr->f->buf[0] &&
1946 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1947 s->last_picture_ptr)) < 0)
1950 if (s->next_picture_ptr) {
1951 ff_mpeg_unref_picture(s, &s->next_picture);
1952 if (s->next_picture_ptr->f->buf[0] &&
1953 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1954 s->next_picture_ptr)) < 0)
1958 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1959 s->last_picture_ptr->f->buf[0]));
1961 if (s->picture_structure!= PICT_FRAME) {
1963 for (i = 0; i < 4; i++) {
1964 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1965 s->current_picture.f->data[i] +=
1966 s->current_picture.f->linesize[i];
1968 s->current_picture.f->linesize[i] *= 2;
1969 s->last_picture.f->linesize[i] *= 2;
1970 s->next_picture.f->linesize[i] *= 2;
1974 s->err_recognition = avctx->err_recognition;
1976 /* set dequantizer, we can't do it during init as
1977 * it might change for mpeg4 and we can't do it in the header
1978 * decode as init is not called for mpeg4 there yet */
1979 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1980 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1981 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1982 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1983 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1984 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1986 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1987 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1990 if (s->avctx->debug & FF_DEBUG_NOMC) {
1991 gray_frame(s->current_picture_ptr->f);
1997 /* called after a frame has been decoded. */
1998 void ff_mpv_frame_end(MpegEncContext *s)
2002 if (s->current_picture.reference)
2003 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
2008 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
2011 return clip_line(ex, ey, sx, sy, maxx);
2016 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
2023 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
2031 * Draw a line from (ex, ey) -> (sx, sy).
2032 * @param w width of the image
2033 * @param h height of the image
2034 * @param stride stride/linesize of the image
2035 * @param color color of the arrow
2037 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2038 int w, int h, int stride, int color)
2042 if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2044 if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2047 sx = av_clip(sx, 0, w - 1);
2048 sy = av_clip(sy, 0, h - 1);
2049 ex = av_clip(ex, 0, w - 1);
2050 ey = av_clip(ey, 0, h - 1);
2052 buf[sy * stride + sx] += color;
2054 if (FFABS(ex - sx) > FFABS(ey - sy)) {
2056 FFSWAP(int, sx, ex);
2057 FFSWAP(int, sy, ey);
2059 buf += sx + sy * stride;
2061 f = ((ey - sy) << 16) / ex;
2062 for (x = 0; x <= ex; x++) {
2064 fr = (x * f) & 0xFFFF;
2065 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2066 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2070 FFSWAP(int, sx, ex);
2071 FFSWAP(int, sy, ey);
2073 buf += sx + sy * stride;
2076 f = ((ex - sx) << 16) / ey;
2079 for(y= 0; y <= ey; y++){
2081 fr = (y*f) & 0xFFFF;
2082 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2083 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2089 * Draw an arrow from (ex, ey) -> (sx, sy).
2090 * @param w width of the image
2091 * @param h height of the image
2092 * @param stride stride/linesize of the image
2093 * @param color color of the arrow
2095 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2096 int ey, int w, int h, int stride, int color, int tail, int direction)
2101 FFSWAP(int, sx, ex);
2102 FFSWAP(int, sy, ey);
2105 sx = av_clip(sx, -100, w + 100);
2106 sy = av_clip(sy, -100, h + 100);
2107 ex = av_clip(ex, -100, w + 100);
2108 ey = av_clip(ey, -100, h + 100);
2113 if (dx * dx + dy * dy > 3 * 3) {
2116 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2118 // FIXME subpixel accuracy
2119 rx = ROUNDED_DIV(rx * 3 << 4, length);
2120 ry = ROUNDED_DIV(ry * 3 << 4, length);
2127 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2128 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2130 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2134 static int add_mb(AVMotionVector *mb, uint32_t mb_type,
2135 int dst_x, int dst_y,
2136 int src_x, int src_y,
2139 mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
2140 mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
2145 mb->source = direction ? 1 : -1;
2146 mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
2151 * Print debugging info for the given picture.
2153 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2154 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2156 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2158 if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
2159 const int shift = 1 + quarter_sample;
2160 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2161 const int mv_stride = (mb_width << mv_sample_log2) +
2162 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2163 int mb_x, mb_y, mbcount = 0;
2165 /* size is width * height * 2 * 4 where 2 is for directions and 4 is
2166 * for the maximum number of MB (4 MB in case of IS_8x8) */
2167 AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
2171 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2172 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2173 int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2174 for (direction = 0; direction < 2; direction++) {
2175 if (!USES_LIST(mb_type, direction))
2177 if (IS_8X8(mb_type)) {
2178 for (i = 0; i < 4; i++) {
2179 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2180 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2181 int xy = (mb_x * 2 + (i & 1) +
2182 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2183 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2184 int my = (motion_val[direction][xy][1] >> shift) + sy;
2185 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2187 } else if (IS_16X8(mb_type)) {
2188 for (i = 0; i < 2; i++) {
2189 int sx = mb_x * 16 + 8;
2190 int sy = mb_y * 16 + 4 + 8 * i;
2191 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2192 int mx = (motion_val[direction][xy][0] >> shift);
2193 int my = (motion_val[direction][xy][1] >> shift);
2195 if (IS_INTERLACED(mb_type))
2198 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2200 } else if (IS_8X16(mb_type)) {
2201 for (i = 0; i < 2; i++) {
2202 int sx = mb_x * 16 + 4 + 8 * i;
2203 int sy = mb_y * 16 + 8;
2204 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2205 int mx = motion_val[direction][xy][0] >> shift;
2206 int my = motion_val[direction][xy][1] >> shift;
2208 if (IS_INTERLACED(mb_type))
2211 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2214 int sx = mb_x * 16 + 8;
2215 int sy = mb_y * 16 + 8;
2216 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2217 int mx = (motion_val[direction][xy][0]>>shift) + sx;
2218 int my = (motion_val[direction][xy][1]>>shift) + sy;
2219 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2226 AVFrameSideData *sd;
2228 av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
2229 sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
2234 memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
2240 /* TODO: export all the following to make them accessible for users (and filters) */
2241 if (avctx->hwaccel || !mbtype_table
2242 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2246 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2249 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2250 av_get_picture_type_char(pict->pict_type));
2251 for (y = 0; y < mb_height; y++) {
2252 for (x = 0; x < mb_width; x++) {
2253 if (avctx->debug & FF_DEBUG_SKIP) {
2254 int count = mbskip_table ? mbskip_table[x + y * mb_stride] : 0;
2257 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2259 if (avctx->debug & FF_DEBUG_QP) {
2260 av_log(avctx, AV_LOG_DEBUG, "%2d",
2261 qscale_table[x + y * mb_stride]);
2263 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2264 int mb_type = mbtype_table[x + y * mb_stride];
2265 // Type & MV direction
2266 if (IS_PCM(mb_type))
2267 av_log(avctx, AV_LOG_DEBUG, "P");
2268 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2269 av_log(avctx, AV_LOG_DEBUG, "A");
2270 else if (IS_INTRA4x4(mb_type))
2271 av_log(avctx, AV_LOG_DEBUG, "i");
2272 else if (IS_INTRA16x16(mb_type))
2273 av_log(avctx, AV_LOG_DEBUG, "I");
2274 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2275 av_log(avctx, AV_LOG_DEBUG, "d");
2276 else if (IS_DIRECT(mb_type))
2277 av_log(avctx, AV_LOG_DEBUG, "D");
2278 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2279 av_log(avctx, AV_LOG_DEBUG, "g");
2280 else if (IS_GMC(mb_type))
2281 av_log(avctx, AV_LOG_DEBUG, "G");
2282 else if (IS_SKIP(mb_type))
2283 av_log(avctx, AV_LOG_DEBUG, "S");
2284 else if (!USES_LIST(mb_type, 1))
2285 av_log(avctx, AV_LOG_DEBUG, ">");
2286 else if (!USES_LIST(mb_type, 0))
2287 av_log(avctx, AV_LOG_DEBUG, "<");
2289 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2290 av_log(avctx, AV_LOG_DEBUG, "X");
2294 if (IS_8X8(mb_type))
2295 av_log(avctx, AV_LOG_DEBUG, "+");
2296 else if (IS_16X8(mb_type))
2297 av_log(avctx, AV_LOG_DEBUG, "-");
2298 else if (IS_8X16(mb_type))
2299 av_log(avctx, AV_LOG_DEBUG, "|");
2300 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2301 av_log(avctx, AV_LOG_DEBUG, " ");
2303 av_log(avctx, AV_LOG_DEBUG, "?");
2306 if (IS_INTERLACED(mb_type))
2307 av_log(avctx, AV_LOG_DEBUG, "=");
2309 av_log(avctx, AV_LOG_DEBUG, " ");
2312 av_log(avctx, AV_LOG_DEBUG, "\n");
2316 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2317 (avctx->debug_mv)) {
2320 int h_chroma_shift, v_chroma_shift, block_height;
2322 const int shift = 1 + quarter_sample;
2324 const int width = avctx->width;
2325 const int height = avctx->height;
2327 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2328 const int mv_stride = (mb_width << mv_sample_log2) +
2329 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2331 *low_delay = 0; // needed to see the vectors without trashing the buffers
2333 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2335 av_frame_make_writable(pict);
2337 pict->opaque = NULL;
2339 ptr = pict->data[0];
2341 block_height = 16 >> v_chroma_shift;
2343 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2345 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2346 const int mb_index = mb_x + mb_y * mb_stride;
2348 if ((avctx->debug_mv) && motion_val[0]) {
2350 for (type = 0; type < 3; type++) {
2354 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2355 (pict->pict_type!= AV_PICTURE_TYPE_P))
2360 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2361 (pict->pict_type!= AV_PICTURE_TYPE_B))
2366 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2367 (pict->pict_type!= AV_PICTURE_TYPE_B))
2372 if (!USES_LIST(mbtype_table[mb_index], direction))
2375 if (IS_8X8(mbtype_table[mb_index])) {
2377 for (i = 0; i < 4; i++) {
2378 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2379 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2380 int xy = (mb_x * 2 + (i & 1) +
2381 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2382 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2383 int my = (motion_val[direction][xy][1] >> shift) + sy;
2384 draw_arrow(ptr, sx, sy, mx, my, width,
2385 height, pict->linesize[0], 100, 0, direction);
2387 } else if (IS_16X8(mbtype_table[mb_index])) {
2389 for (i = 0; i < 2; i++) {
2390 int sx = mb_x * 16 + 8;
2391 int sy = mb_y * 16 + 4 + 8 * i;
2392 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2393 int mx = (motion_val[direction][xy][0] >> shift);
2394 int my = (motion_val[direction][xy][1] >> shift);
2396 if (IS_INTERLACED(mbtype_table[mb_index]))
2399 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2400 height, pict->linesize[0], 100, 0, direction);
2402 } else if (IS_8X16(mbtype_table[mb_index])) {
2404 for (i = 0; i < 2; i++) {
2405 int sx = mb_x * 16 + 4 + 8 * i;
2406 int sy = mb_y * 16 + 8;
2407 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2408 int mx = motion_val[direction][xy][0] >> shift;
2409 int my = motion_val[direction][xy][1] >> shift;
2411 if (IS_INTERLACED(mbtype_table[mb_index]))
2414 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2415 height, pict->linesize[0], 100, 0, direction);
2418 int sx= mb_x * 16 + 8;
2419 int sy= mb_y * 16 + 8;
2420 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2421 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2422 int my= (motion_val[direction][xy][1]>>shift) + sy;
2423 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2428 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2429 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2430 0x0101010101010101ULL;
2432 for (y = 0; y < block_height; y++) {
2433 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2434 (block_height * mb_y + y) *
2435 pict->linesize[1]) = c;
2436 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2437 (block_height * mb_y + y) *
2438 pict->linesize[2]) = c;
2441 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2443 int mb_type = mbtype_table[mb_index];
2446 #define COLOR(theta, r) \
2447 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2448 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2452 if (IS_PCM(mb_type)) {
2454 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2455 IS_INTRA16x16(mb_type)) {
2457 } else if (IS_INTRA4x4(mb_type)) {
2459 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2461 } else if (IS_DIRECT(mb_type)) {
2463 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2465 } else if (IS_GMC(mb_type)) {
2467 } else if (IS_SKIP(mb_type)) {
2469 } else if (!USES_LIST(mb_type, 1)) {
2471 } else if (!USES_LIST(mb_type, 0)) {
2474 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2478 u *= 0x0101010101010101ULL;
2479 v *= 0x0101010101010101ULL;
2480 for (y = 0; y < block_height; y++) {
2481 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2482 (block_height * mb_y + y) * pict->linesize[1]) = u;
2483 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2484 (block_height * mb_y + y) * pict->linesize[2]) = v;
2488 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2489 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2490 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2491 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2492 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2494 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2495 for (y = 0; y < 16; y++)
2496 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2497 pict->linesize[0]] ^= 0x80;
2499 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2500 int dm = 1 << (mv_sample_log2 - 2);
2501 for (i = 0; i < 4; i++) {
2502 int sx = mb_x * 16 + 8 * (i & 1);
2503 int sy = mb_y * 16 + 8 * (i >> 1);
2504 int xy = (mb_x * 2 + (i & 1) +
2505 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2507 int32_t *mv = (int32_t *) &motion_val[0][xy];
2508 if (mv[0] != mv[dm] ||
2509 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2510 for (y = 0; y < 8; y++)
2511 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2512 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2513 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2514 pict->linesize[0]) ^= 0x8080808080808080ULL;
2518 if (IS_INTERLACED(mb_type) &&
2519 avctx->codec->id == AV_CODEC_ID_H264) {
2524 mbskip_table[mb_index] = 0;
2530 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2532 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2533 p->qscale_table, p->motion_val, &s->low_delay,
2534 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2537 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2539 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2540 int offset = 2*s->mb_stride + 1;
2542 return AVERROR(ENOMEM);
2543 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2544 ref->size -= offset;
2545 ref->data += offset;
2546 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2549 static inline int hpel_motion_lowres(MpegEncContext *s,
2550 uint8_t *dest, uint8_t *src,
2551 int field_based, int field_select,
2552 int src_x, int src_y,
2553 int width, int height, ptrdiff_t stride,
2554 int h_edge_pos, int v_edge_pos,
2555 int w, int h, h264_chroma_mc_func *pix_op,
2556 int motion_x, int motion_y)
2558 const int lowres = s->avctx->lowres;
2559 const int op_index = FFMIN(lowres, 3);
2560 const int s_mask = (2 << lowres) - 1;
2564 if (s->quarter_sample) {
2569 sx = motion_x & s_mask;
2570 sy = motion_y & s_mask;
2571 src_x += motion_x >> lowres + 1;
2572 src_y += motion_y >> lowres + 1;
2574 src += src_y * stride + src_x;
2576 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2577 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2578 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2579 s->linesize, s->linesize,
2580 w + 1, (h + 1) << field_based,
2581 src_x, src_y << field_based,
2582 h_edge_pos, v_edge_pos);
2583 src = s->edge_emu_buffer;
2587 sx = (sx << 2) >> lowres;
2588 sy = (sy << 2) >> lowres;
2591 pix_op[op_index](dest, src, stride, h, sx, sy);
2595 /* apply one mpeg motion vector to the three components */
2596 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2603 uint8_t **ref_picture,
2604 h264_chroma_mc_func *pix_op,
2605 int motion_x, int motion_y,
2608 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2609 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2610 ptrdiff_t uvlinesize, linesize;
2611 const int lowres = s->avctx->lowres;
2612 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2613 const int block_s = 8>>lowres;
2614 const int s_mask = (2 << lowres) - 1;
2615 const int h_edge_pos = s->h_edge_pos >> lowres;
2616 const int v_edge_pos = s->v_edge_pos >> lowres;
2617 linesize = s->current_picture.f->linesize[0] << field_based;
2618 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2620 // FIXME obviously not perfect but qpel will not work in lowres anyway
2621 if (s->quarter_sample) {
2627 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2630 sx = motion_x & s_mask;
2631 sy = motion_y & s_mask;
2632 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2633 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2635 if (s->out_format == FMT_H263) {
2636 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2637 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2638 uvsrc_x = src_x >> 1;
2639 uvsrc_y = src_y >> 1;
2640 } else if (s->out_format == FMT_H261) {
2641 // even chroma mv's are full pel in H261
2644 uvsx = (2 * mx) & s_mask;
2645 uvsy = (2 * my) & s_mask;
2646 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2647 uvsrc_y = mb_y * block_s + (my >> lowres);
2649 if(s->chroma_y_shift){
2654 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2655 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2657 if(s->chroma_x_shift){
2661 uvsy = motion_y & s_mask;
2663 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2666 uvsx = motion_x & s_mask;
2667 uvsy = motion_y & s_mask;
2674 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2675 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2676 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2678 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2679 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2680 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2681 linesize >> field_based, linesize >> field_based,
2682 17, 17 + field_based,
2683 src_x, src_y << field_based, h_edge_pos,
2685 ptr_y = s->edge_emu_buffer;
2686 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2687 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2688 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2689 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2690 uvlinesize >> field_based, uvlinesize >> field_based,
2692 uvsrc_x, uvsrc_y << field_based,
2693 h_edge_pos >> 1, v_edge_pos >> 1);
2694 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2695 uvlinesize >> field_based,uvlinesize >> field_based,
2697 uvsrc_x, uvsrc_y << field_based,
2698 h_edge_pos >> 1, v_edge_pos >> 1);
2704 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2706 dest_y += s->linesize;
2707 dest_cb += s->uvlinesize;
2708 dest_cr += s->uvlinesize;
2712 ptr_y += s->linesize;
2713 ptr_cb += s->uvlinesize;
2714 ptr_cr += s->uvlinesize;
2717 sx = (sx << 2) >> lowres;
2718 sy = (sy << 2) >> lowres;
2719 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2721 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2722 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2723 uvsx = (uvsx << 2) >> lowres;
2724 uvsy = (uvsy << 2) >> lowres;
2726 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2727 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2730 // FIXME h261 lowres loop filter
2733 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2734 uint8_t *dest_cb, uint8_t *dest_cr,
2735 uint8_t **ref_picture,
2736 h264_chroma_mc_func * pix_op,
2739 const int lowres = s->avctx->lowres;
2740 const int op_index = FFMIN(lowres, 3);
2741 const int block_s = 8 >> lowres;
2742 const int s_mask = (2 << lowres) - 1;
2743 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2744 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2745 int emu = 0, src_x, src_y, sx, sy;
2749 if (s->quarter_sample) {
2754 /* In case of 8X8, we construct a single chroma motion vector
2755 with a special rounding */
2756 mx = ff_h263_round_chroma(mx);
2757 my = ff_h263_round_chroma(my);
2761 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2762 src_y = s->mb_y * block_s + (my >> lowres + 1);
2764 offset = src_y * s->uvlinesize + src_x;
2765 ptr = ref_picture[1] + offset;
2766 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2767 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2768 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2769 s->uvlinesize, s->uvlinesize,
2771 src_x, src_y, h_edge_pos, v_edge_pos);
2772 ptr = s->edge_emu_buffer;
2775 sx = (sx << 2) >> lowres;
2776 sy = (sy << 2) >> lowres;
2777 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2779 ptr = ref_picture[2] + offset;
2781 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2782 s->uvlinesize, s->uvlinesize,
2784 src_x, src_y, h_edge_pos, v_edge_pos);
2785 ptr = s->edge_emu_buffer;
2787 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2791 * motion compensation of a single macroblock
2793 * @param dest_y luma destination pointer
2794 * @param dest_cb chroma cb/u destination pointer
2795 * @param dest_cr chroma cr/v destination pointer
2796 * @param dir direction (0->forward, 1->backward)
2797 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2798 * @param pix_op halfpel motion compensation function (average or put normally)
2799 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2801 static inline void MPV_motion_lowres(MpegEncContext *s,
2802 uint8_t *dest_y, uint8_t *dest_cb,
2804 int dir, uint8_t **ref_picture,
2805 h264_chroma_mc_func *pix_op)
2809 const int lowres = s->avctx->lowres;
2810 const int block_s = 8 >>lowres;
2815 switch (s->mv_type) {
2817 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2819 ref_picture, pix_op,
2820 s->mv[dir][0][0], s->mv[dir][0][1],
2826 for (i = 0; i < 4; i++) {
2827 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2828 s->linesize) * block_s,
2829 ref_picture[0], 0, 0,
2830 (2 * mb_x + (i & 1)) * block_s,
2831 (2 * mb_y + (i >> 1)) * block_s,
2832 s->width, s->height, s->linesize,
2833 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2834 block_s, block_s, pix_op,
2835 s->mv[dir][i][0], s->mv[dir][i][1]);
2837 mx += s->mv[dir][i][0];
2838 my += s->mv[dir][i][1];
2841 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2842 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2846 if (s->picture_structure == PICT_FRAME) {
2848 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2849 1, 0, s->field_select[dir][0],
2850 ref_picture, pix_op,
2851 s->mv[dir][0][0], s->mv[dir][0][1],
2854 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2855 1, 1, s->field_select[dir][1],
2856 ref_picture, pix_op,
2857 s->mv[dir][1][0], s->mv[dir][1][1],
2860 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2861 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2862 ref_picture = s->current_picture_ptr->f->data;
2865 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2866 0, 0, s->field_select[dir][0],
2867 ref_picture, pix_op,
2869 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2873 for (i = 0; i < 2; i++) {
2874 uint8_t **ref2picture;
2876 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2877 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2878 ref2picture = ref_picture;
2880 ref2picture = s->current_picture_ptr->f->data;
2883 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2884 0, 0, s->field_select[dir][i],
2885 ref2picture, pix_op,
2886 s->mv[dir][i][0], s->mv[dir][i][1] +
2887 2 * block_s * i, block_s, mb_y >> 1);
2889 dest_y += 2 * block_s * s->linesize;
2890 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2891 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2895 if (s->picture_structure == PICT_FRAME) {
2896 for (i = 0; i < 2; i++) {
2898 for (j = 0; j < 2; j++) {
2899 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2901 ref_picture, pix_op,
2902 s->mv[dir][2 * i + j][0],
2903 s->mv[dir][2 * i + j][1],
2906 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2909 for (i = 0; i < 2; i++) {
2910 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2911 0, 0, s->picture_structure != i + 1,
2912 ref_picture, pix_op,
2913 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2914 2 * block_s, mb_y >> 1);
2916 // after put we make avg of the same block
2917 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2919 // opposite parity is always in the same
2920 // frame if this is second field
2921 if (!s->first_field) {
2922 ref_picture = s->current_picture_ptr->f->data;
2933 * find the lowest MB row referenced in the MVs
2935 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2937 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2938 int my, off, i, mvs;
2940 if (s->picture_structure != PICT_FRAME || s->mcsel)
2943 switch (s->mv_type) {
2957 for (i = 0; i < mvs; i++) {
2958 my = s->mv[dir][i][1];
2959 my_max = FFMAX(my_max, my);
2960 my_min = FFMIN(my_min, my);
2963 off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
2965 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2967 return s->mb_height-1;
2970 /* put block[] to dest[] */
2971 static inline void put_dct(MpegEncContext *s,
2972 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2974 s->dct_unquantize_intra(s, block, i, qscale);
2975 s->idsp.idct_put(dest, line_size, block);
2978 /* add block[] to dest[] */
2979 static inline void add_dct(MpegEncContext *s,
2980 int16_t *block, int i, uint8_t *dest, int line_size)
2982 if (s->block_last_index[i] >= 0) {
2983 s->idsp.idct_add(dest, line_size, block);
2987 static inline void add_dequant_dct(MpegEncContext *s,
2988 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2990 if (s->block_last_index[i] >= 0) {
2991 s->dct_unquantize_inter(s, block, i, qscale);
2993 s->idsp.idct_add(dest, line_size, block);
2998 * Clean dc, ac, coded_block for the current non-intra MB.
3000 void ff_clean_intra_table_entries(MpegEncContext *s)
3002 int wrap = s->b8_stride;
3003 int xy = s->block_index[0];
3006 s->dc_val[0][xy + 1 ] =
3007 s->dc_val[0][xy + wrap] =
3008 s->dc_val[0][xy + 1 + wrap] = 1024;
3010 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
3011 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
3012 if (s->msmpeg4_version>=3) {
3013 s->coded_block[xy ] =
3014 s->coded_block[xy + 1 ] =
3015 s->coded_block[xy + wrap] =
3016 s->coded_block[xy + 1 + wrap] = 0;
3019 wrap = s->mb_stride;
3020 xy = s->mb_x + s->mb_y * wrap;
3022 s->dc_val[2][xy] = 1024;
3024 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
3025 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
3027 s->mbintra_table[xy]= 0;
3030 /* generic function called after a macroblock has been parsed by the
3031 decoder or after it has been encoded by the encoder.
3033 Important variables used:
3034 s->mb_intra : true if intra macroblock
3035 s->mv_dir : motion vector direction
3036 s->mv_type : motion vector type
3037 s->mv : motion vector
3038 s->interlaced_dct : true if interlaced dct used (mpeg2)
3040 static av_always_inline
3041 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
3042 int lowres_flag, int is_mpeg12)
3044 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
3047 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
3048 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
3052 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
3053 /* print DCT coefficients */
3055 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
3057 for(j=0; j<64; j++){
3058 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
3059 block[i][s->idsp.idct_permutation[j]]);
3061 av_log(s->avctx, AV_LOG_DEBUG, "\n");
3065 s->current_picture.qscale_table[mb_xy] = s->qscale;
3067 /* update DC predictors for P macroblocks */
3069 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
3070 if(s->mbintra_table[mb_xy])
3071 ff_clean_intra_table_entries(s);
3075 s->last_dc[2] = 128 << s->intra_dc_precision;
3078 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
3079 s->mbintra_table[mb_xy]=1;
3081 if ( (s->flags&CODEC_FLAG_PSNR)
3082 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
3083 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
3084 uint8_t *dest_y, *dest_cb, *dest_cr;
3085 int dct_linesize, dct_offset;
3086 op_pixels_func (*op_pix)[4];
3087 qpel_mc_func (*op_qpix)[16];
3088 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3089 const int uvlinesize = s->current_picture.f->linesize[1];
3090 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
3091 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
3093 /* avoid copy if macroblock skipped in last frame too */
3094 /* skip only during decoding as we might trash the buffers during encoding a bit */
3096 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
3098 if (s->mb_skipped) {
3100 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
3102 } else if(!s->current_picture.reference) {
3105 *mbskip_ptr = 0; /* not skipped */
3109 dct_linesize = linesize << s->interlaced_dct;
3110 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
3114 dest_cb= s->dest[1];
3115 dest_cr= s->dest[2];
3117 dest_y = s->b_scratchpad;
3118 dest_cb= s->b_scratchpad+16*linesize;
3119 dest_cr= s->b_scratchpad+32*linesize;
3123 /* motion handling */
3124 /* decoding or more than one mb_type (MC was already done otherwise) */
3127 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
3128 if (s->mv_dir & MV_DIR_FORWARD) {
3129 ff_thread_await_progress(&s->last_picture_ptr->tf,
3130 ff_mpv_lowest_referenced_row(s, 0),
3133 if (s->mv_dir & MV_DIR_BACKWARD) {
3134 ff_thread_await_progress(&s->next_picture_ptr->tf,
3135 ff_mpv_lowest_referenced_row(s, 1),
3141 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
3143 if (s->mv_dir & MV_DIR_FORWARD) {
3144 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3145 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
3147 if (s->mv_dir & MV_DIR_BACKWARD) {
3148 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3151 op_qpix = s->me.qpel_put;
3152 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3153 op_pix = s->hdsp.put_pixels_tab;
3155 op_pix = s->hdsp.put_no_rnd_pixels_tab;
3157 if (s->mv_dir & MV_DIR_FORWARD) {
3158 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3159 op_pix = s->hdsp.avg_pixels_tab;
3160 op_qpix= s->me.qpel_avg;
3162 if (s->mv_dir & MV_DIR_BACKWARD) {
3163 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3168 /* skip dequant / idct if we are really late ;) */
3169 if(s->avctx->skip_idct){
3170 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
3171 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
3172 || s->avctx->skip_idct >= AVDISCARD_ALL)
3176 /* add dct residue */
3177 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3178 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3179 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3180 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3181 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3182 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3184 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3185 if (s->chroma_y_shift){
3186 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3187 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3191 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3192 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3193 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3194 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3197 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3198 add_dct(s, block[0], 0, dest_y , dct_linesize);
3199 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3200 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3201 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3203 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3204 if(s->chroma_y_shift){//Chroma420
3205 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3206 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3209 dct_linesize = uvlinesize << s->interlaced_dct;
3210 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3212 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3213 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3214 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3215 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3216 if(!s->chroma_x_shift){//Chroma444
3217 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3218 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3219 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3220 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3225 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3226 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3229 /* dct only in intra block */
3230 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3231 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3232 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3233 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3234 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3236 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3237 if(s->chroma_y_shift){
3238 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3239 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3243 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3244 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3245 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3246 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3250 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3251 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3252 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3253 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3255 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3256 if(s->chroma_y_shift){
3257 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3258 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3261 dct_linesize = uvlinesize << s->interlaced_dct;
3262 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3264 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3265 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3266 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3267 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3268 if(!s->chroma_x_shift){//Chroma444
3269 s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3270 s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3271 s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3272 s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3280 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3281 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
3282 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3283 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3289 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
3292 if(s->out_format == FMT_MPEG1) {
3293 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
3294 else mpv_decode_mb_internal(s, block, 0, 1);
3297 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
3298 else mpv_decode_mb_internal(s, block, 0, 0);
3301 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3303 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3304 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3305 s->first_field, s->low_delay);
3308 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3309 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3310 const int uvlinesize = s->current_picture.f->linesize[1];
3311 const int mb_size= 4 - s->avctx->lowres;
3313 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3314 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3315 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3316 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3317 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3318 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3319 //block_index is not used by mpeg2, so it is not affected by chroma_format
3321 s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << mb_size);
3322 s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (mb_size - s->chroma_x_shift));
3323 s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (mb_size - s->chroma_x_shift));
3325 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3327 if(s->picture_structure==PICT_FRAME){
3328 s->dest[0] += s->mb_y * linesize << mb_size;
3329 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3330 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3332 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3333 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3334 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3335 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3341 * Permute an 8x8 block.
3342 * @param block the block which will be permuted according to the given permutation vector
3343 * @param permutation the permutation vector
3344 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3345 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3346 * (inverse) permutated to scantable order!
3348 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3354 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3356 for(i=0; i<=last; i++){
3357 const int j= scantable[i];
3362 for(i=0; i<=last; i++){
3363 const int j= scantable[i];
3364 const int perm_j= permutation[j];
3365 block[perm_j]= temp[j];
3369 void ff_mpeg_flush(AVCodecContext *avctx){
3371 MpegEncContext *s = avctx->priv_data;
3373 if (!s || !s->picture)
3376 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3377 ff_mpeg_unref_picture(s, &s->picture[i]);
3378 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3380 ff_mpeg_unref_picture(s, &s->current_picture);
3381 ff_mpeg_unref_picture(s, &s->last_picture);
3382 ff_mpeg_unref_picture(s, &s->next_picture);
3384 s->mb_x= s->mb_y= 0;
3387 s->parse_context.state= -1;
3388 s->parse_context.frame_start_found= 0;
3389 s->parse_context.overread= 0;
3390 s->parse_context.overread_index= 0;
3391 s->parse_context.index= 0;
3392 s->parse_context.last_index= 0;
3393 s->bitstream_buffer_size=0;
3398 * set qscale and update qscale dependent variables.
3400 void ff_set_qscale(MpegEncContext * s, int qscale)
3404 else if (qscale > 31)
3408 s->chroma_qscale= s->chroma_qscale_table[qscale];
3410 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3411 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3414 void ff_mpv_report_decode_progress(MpegEncContext *s)
3416 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3417 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);