2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/motion_vector.h"
35 #include "libavutil/timer.h"
38 #include "h264chroma.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
50 static const uint8_t ff_default_chroma_qscale_table[32] = {
51 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
53 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
56 const uint8_t ff_mpeg1_dc_scale_table[128] = {
57 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
58 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
68 static const uint8_t mpeg2_dc_scale_table1[128] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
80 static const uint8_t mpeg2_dc_scale_table2[128] = {
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
82 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 static const uint8_t mpeg2_dc_scale_table3[128] = {
93 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
94 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
105 ff_mpeg1_dc_scale_table,
106 mpeg2_dc_scale_table1,
107 mpeg2_dc_scale_table2,
108 mpeg2_dc_scale_table3,
111 const uint8_t ff_alternate_horizontal_scan[64] = {
112 0, 1, 2, 3, 8, 9, 16, 17,
113 10, 11, 4, 5, 6, 7, 15, 14,
114 13, 12, 19, 18, 24, 25, 32, 33,
115 26, 27, 20, 21, 22, 23, 28, 29,
116 30, 31, 34, 35, 40, 41, 48, 49,
117 42, 43, 36, 37, 38, 39, 44, 45,
118 46, 47, 50, 51, 56, 57, 58, 59,
119 52, 53, 54, 55, 60, 61, 62, 63,
122 const uint8_t ff_alternate_vertical_scan[64] = {
123 0, 8, 16, 24, 1, 9, 2, 10,
124 17, 25, 32, 40, 48, 56, 57, 49,
125 41, 33, 26, 18, 3, 11, 4, 12,
126 19, 27, 34, 42, 50, 58, 35, 43,
127 51, 59, 20, 28, 5, 13, 6, 14,
128 21, 29, 36, 44, 52, 60, 37, 45,
129 53, 61, 22, 30, 7, 15, 23, 31,
130 38, 46, 54, 62, 39, 47, 55, 63,
133 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
134 int16_t *block, int n, int qscale)
136 int i, level, nCoeffs;
137 const uint16_t *quant_matrix;
139 nCoeffs= s->block_last_index[n];
141 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
142 /* XXX: only mpeg1 */
143 quant_matrix = s->intra_matrix;
144 for(i=1;i<=nCoeffs;i++) {
145 int j= s->intra_scantable.permutated[i];
150 level = (int)(level * qscale * quant_matrix[j]) >> 3;
151 level = (level - 1) | 1;
154 level = (int)(level * qscale * quant_matrix[j]) >> 3;
155 level = (level - 1) | 1;
162 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
163 int16_t *block, int n, int qscale)
165 int i, level, nCoeffs;
166 const uint16_t *quant_matrix;
168 nCoeffs= s->block_last_index[n];
170 quant_matrix = s->inter_matrix;
171 for(i=0; i<=nCoeffs; i++) {
172 int j= s->intra_scantable.permutated[i];
177 level = (((level << 1) + 1) * qscale *
178 ((int) (quant_matrix[j]))) >> 4;
179 level = (level - 1) | 1;
182 level = (((level << 1) + 1) * qscale *
183 ((int) (quant_matrix[j]))) >> 4;
184 level = (level - 1) | 1;
191 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
192 int16_t *block, int n, int qscale)
194 int i, level, nCoeffs;
195 const uint16_t *quant_matrix;
197 if(s->alternate_scan) nCoeffs= 63;
198 else nCoeffs= s->block_last_index[n];
200 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
201 quant_matrix = s->intra_matrix;
202 for(i=1;i<=nCoeffs;i++) {
203 int j= s->intra_scantable.permutated[i];
208 level = (int)(level * qscale * quant_matrix[j]) >> 3;
211 level = (int)(level * qscale * quant_matrix[j]) >> 3;
218 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
219 int16_t *block, int n, int qscale)
221 int i, level, nCoeffs;
222 const uint16_t *quant_matrix;
225 if(s->alternate_scan) nCoeffs= 63;
226 else nCoeffs= s->block_last_index[n];
228 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
230 quant_matrix = s->intra_matrix;
231 for(i=1;i<=nCoeffs;i++) {
232 int j= s->intra_scantable.permutated[i];
237 level = (int)(level * qscale * quant_matrix[j]) >> 3;
240 level = (int)(level * qscale * quant_matrix[j]) >> 3;
249 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
250 int16_t *block, int n, int qscale)
252 int i, level, nCoeffs;
253 const uint16_t *quant_matrix;
256 if(s->alternate_scan) nCoeffs= 63;
257 else nCoeffs= s->block_last_index[n];
259 quant_matrix = s->inter_matrix;
260 for(i=0; i<=nCoeffs; i++) {
261 int j= s->intra_scantable.permutated[i];
266 level = (((level << 1) + 1) * qscale *
267 ((int) (quant_matrix[j]))) >> 4;
270 level = (((level << 1) + 1) * qscale *
271 ((int) (quant_matrix[j]))) >> 4;
280 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
281 int16_t *block, int n, int qscale)
283 int i, level, qmul, qadd;
286 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
291 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
292 qadd = (qscale - 1) | 1;
299 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
301 for(i=1; i<=nCoeffs; i++) {
305 level = level * qmul - qadd;
307 level = level * qmul + qadd;
314 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
315 int16_t *block, int n, int qscale)
317 int i, level, qmul, qadd;
320 av_assert2(s->block_last_index[n]>=0);
322 qadd = (qscale - 1) | 1;
325 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
327 for(i=0; i<=nCoeffs; i++) {
331 level = level * qmul - qadd;
333 level = level * qmul + qadd;
340 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
342 int mb_x, int mb_y, int mb_intra, int mb_skipped)
344 MpegEncContext *s = opaque;
347 s->mv_type = mv_type;
348 s->mb_intra = mb_intra;
349 s->mb_skipped = mb_skipped;
352 memcpy(s->mv, mv, sizeof(*mv));
354 ff_init_block_index(s);
355 ff_update_block_index(s);
357 s->bdsp.clear_blocks(s->block[0]);
359 s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
360 s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361 s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
364 av_log(s->avctx, AV_LOG_DEBUG,
365 "Interlaced error concealment is not fully implemented\n");
366 ff_mpv_decode_mb(s, s->block);
369 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
372 memset(dst + h*linesize, 128, 16);
375 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
378 memset(dst + h*linesize, 128, 8);
381 /* init common dct for both encoder and decoder */
382 static av_cold int dct_init(MpegEncContext *s)
384 ff_blockdsp_init(&s->bdsp, s->avctx);
385 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
386 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
387 ff_me_cmp_init(&s->mecc, s->avctx);
388 ff_mpegvideodsp_init(&s->mdsp);
389 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
391 if (s->avctx->debug & FF_DEBUG_NOMC) {
393 for (i=0; i<4; i++) {
394 s->hdsp.avg_pixels_tab[0][i] = gray16;
395 s->hdsp.put_pixels_tab[0][i] = gray16;
396 s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
398 s->hdsp.avg_pixels_tab[1][i] = gray8;
399 s->hdsp.put_pixels_tab[1][i] = gray8;
400 s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
404 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
405 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
406 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
407 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
408 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
409 if (s->flags & CODEC_FLAG_BITEXACT)
410 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
411 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
413 if (HAVE_INTRINSICS_NEON)
414 ff_mpv_common_init_neon(s);
417 ff_mpv_common_init_axp(s);
419 ff_mpv_common_init_arm(s);
421 ff_mpv_common_init_ppc(s);
423 ff_mpv_common_init_x86(s);
428 av_cold void ff_mpv_idct_init(MpegEncContext *s)
430 ff_idctdsp_init(&s->idsp, s->avctx);
432 /* load & permutate scantables
433 * note: only wmv uses different ones
435 if (s->alternate_scan) {
436 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
437 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
439 ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
440 ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
442 ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
443 ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
446 static int frame_size_alloc(MpegEncContext *s, int linesize)
448 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
450 if (s->avctx->hwaccel || s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
454 av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
455 return AVERROR_PATCHWELCOME;
458 // edge emu needs blocksize + filter length - 1
459 // (= 17x17 for halfpel / 21x21 for h264)
460 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
461 // at uvlinesize. It supports only YUV420 so 24x24 is enough
462 // linesize * interlaced * MBsize
463 // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
464 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size, 4 * 68,
467 FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size, 4 * 16 * 2,
469 s->me.temp = s->me.scratchpad;
470 s->rd_scratchpad = s->me.scratchpad;
471 s->b_scratchpad = s->me.scratchpad;
472 s->obmc_scratchpad = s->me.scratchpad + 16;
476 av_freep(&s->edge_emu_buffer);
477 return AVERROR(ENOMEM);
481 * Allocate a frame buffer
483 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
485 int edges_needed = av_codec_is_encoder(s->avctx->codec);
489 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
490 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
491 s->codec_id != AV_CODEC_ID_MSS2) {
493 pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
494 pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
497 r = ff_thread_get_buffer(s->avctx, &pic->tf,
498 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
500 pic->f->width = s->avctx->width;
501 pic->f->height = s->avctx->height;
502 pic->f->format = s->avctx->pix_fmt;
503 r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
506 if (r < 0 || !pic->f->buf[0]) {
507 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
514 for (i = 0; pic->f->data[i]; i++) {
515 int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
516 pic->f->linesize[i] +
517 (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
518 pic->f->data[i] += offset;
520 pic->f->width = s->avctx->width;
521 pic->f->height = s->avctx->height;
524 if (s->avctx->hwaccel) {
525 assert(!pic->hwaccel_picture_private);
526 if (s->avctx->hwaccel->frame_priv_data_size) {
527 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->frame_priv_data_size);
528 if (!pic->hwaccel_priv_buf) {
529 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
532 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
536 if (s->linesize && (s->linesize != pic->f->linesize[0] ||
537 s->uvlinesize != pic->f->linesize[1])) {
538 av_log(s->avctx, AV_LOG_ERROR,
539 "get_buffer() failed (stride changed)\n");
540 ff_mpeg_unref_picture(s, pic);
544 if (pic->f->linesize[1] != pic->f->linesize[2]) {
545 av_log(s->avctx, AV_LOG_ERROR,
546 "get_buffer() failed (uv stride mismatch)\n");
547 ff_mpeg_unref_picture(s, pic);
551 if (!s->edge_emu_buffer &&
552 (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
553 av_log(s->avctx, AV_LOG_ERROR,
554 "get_buffer() failed to allocate context scratch buffers.\n");
555 ff_mpeg_unref_picture(s, pic);
562 void ff_free_picture_tables(Picture *pic)
566 pic->alloc_mb_width =
567 pic->alloc_mb_height = 0;
569 av_buffer_unref(&pic->mb_var_buf);
570 av_buffer_unref(&pic->mc_mb_var_buf);
571 av_buffer_unref(&pic->mb_mean_buf);
572 av_buffer_unref(&pic->mbskip_table_buf);
573 av_buffer_unref(&pic->qscale_table_buf);
574 av_buffer_unref(&pic->mb_type_buf);
576 for (i = 0; i < 2; i++) {
577 av_buffer_unref(&pic->motion_val_buf[i]);
578 av_buffer_unref(&pic->ref_index_buf[i]);
582 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
584 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
585 const int mb_array_size = s->mb_stride * s->mb_height;
586 const int b8_array_size = s->b8_stride * s->mb_height * 2;
590 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
591 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
592 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
594 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
595 return AVERROR(ENOMEM);
598 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
599 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
600 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
601 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
602 return AVERROR(ENOMEM);
605 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv ||
606 (s->avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) {
607 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
608 int ref_index_size = 4 * mb_array_size;
610 for (i = 0; mv_size && i < 2; i++) {
611 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
612 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
613 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
614 return AVERROR(ENOMEM);
618 pic->alloc_mb_width = s->mb_width;
619 pic->alloc_mb_height = s->mb_height;
624 static int make_tables_writable(Picture *pic)
627 #define MAKE_WRITABLE(table) \
630 (ret = av_buffer_make_writable(&pic->table)) < 0)\
634 MAKE_WRITABLE(mb_var_buf);
635 MAKE_WRITABLE(mc_mb_var_buf);
636 MAKE_WRITABLE(mb_mean_buf);
637 MAKE_WRITABLE(mbskip_table_buf);
638 MAKE_WRITABLE(qscale_table_buf);
639 MAKE_WRITABLE(mb_type_buf);
641 for (i = 0; i < 2; i++) {
642 MAKE_WRITABLE(motion_val_buf[i]);
643 MAKE_WRITABLE(ref_index_buf[i]);
650 * Allocate a Picture.
651 * The pixels are allocated/set by calling get_buffer() if shared = 0
653 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
657 if (pic->qscale_table_buf)
658 if ( pic->alloc_mb_width != s->mb_width
659 || pic->alloc_mb_height != s->mb_height)
660 ff_free_picture_tables(pic);
663 av_assert0(pic->f->data[0]);
666 av_assert0(!pic->f->buf[0]);
668 if (alloc_frame_buffer(s, pic) < 0)
671 s->linesize = pic->f->linesize[0];
672 s->uvlinesize = pic->f->linesize[1];
675 if (!pic->qscale_table_buf)
676 ret = alloc_picture_tables(s, pic);
678 ret = make_tables_writable(pic);
683 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
684 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
685 pic->mb_mean = pic->mb_mean_buf->data;
688 pic->mbskip_table = pic->mbskip_table_buf->data;
689 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
690 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
692 if (pic->motion_val_buf[0]) {
693 for (i = 0; i < 2; i++) {
694 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
695 pic->ref_index[i] = pic->ref_index_buf[i]->data;
701 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
702 ff_mpeg_unref_picture(s, pic);
703 ff_free_picture_tables(pic);
704 return AVERROR(ENOMEM);
708 * Deallocate a picture.
710 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
712 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
715 /* WM Image / Screen codecs allocate internal buffers with different
716 * dimensions / colorspaces; ignore user-defined callbacks for these. */
717 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
718 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
719 s->codec_id != AV_CODEC_ID_MSS2)
720 ff_thread_release_buffer(s->avctx, &pic->tf);
722 av_frame_unref(pic->f);
724 av_buffer_unref(&pic->hwaccel_priv_buf);
726 if (pic->needs_realloc)
727 ff_free_picture_tables(pic);
729 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
732 static int update_picture_tables(Picture *dst, Picture *src)
736 #define UPDATE_TABLE(table)\
739 (!dst->table || dst->table->buffer != src->table->buffer)) {\
740 av_buffer_unref(&dst->table);\
741 dst->table = av_buffer_ref(src->table);\
743 ff_free_picture_tables(dst);\
744 return AVERROR(ENOMEM);\
749 UPDATE_TABLE(mb_var_buf);
750 UPDATE_TABLE(mc_mb_var_buf);
751 UPDATE_TABLE(mb_mean_buf);
752 UPDATE_TABLE(mbskip_table_buf);
753 UPDATE_TABLE(qscale_table_buf);
754 UPDATE_TABLE(mb_type_buf);
755 for (i = 0; i < 2; i++) {
756 UPDATE_TABLE(motion_val_buf[i]);
757 UPDATE_TABLE(ref_index_buf[i]);
760 dst->mb_var = src->mb_var;
761 dst->mc_mb_var = src->mc_mb_var;
762 dst->mb_mean = src->mb_mean;
763 dst->mbskip_table = src->mbskip_table;
764 dst->qscale_table = src->qscale_table;
765 dst->mb_type = src->mb_type;
766 for (i = 0; i < 2; i++) {
767 dst->motion_val[i] = src->motion_val[i];
768 dst->ref_index[i] = src->ref_index[i];
771 dst->alloc_mb_width = src->alloc_mb_width;
772 dst->alloc_mb_height = src->alloc_mb_height;
777 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
781 av_assert0(!dst->f->buf[0]);
782 av_assert0(src->f->buf[0]);
786 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
790 ret = update_picture_tables(dst, src);
794 if (src->hwaccel_picture_private) {
795 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
796 if (!dst->hwaccel_priv_buf)
798 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
801 dst->field_picture = src->field_picture;
802 dst->mb_var_sum = src->mb_var_sum;
803 dst->mc_mb_var_sum = src->mc_mb_var_sum;
804 dst->b_frame_score = src->b_frame_score;
805 dst->needs_realloc = src->needs_realloc;
806 dst->reference = src->reference;
807 dst->shared = src->shared;
811 ff_mpeg_unref_picture(s, dst);
815 static void exchange_uv(MpegEncContext *s)
820 s->pblocks[4] = s->pblocks[5];
824 static int init_duplicate_context(MpegEncContext *s)
826 int y_size = s->b8_stride * (2 * s->mb_height + 1);
827 int c_size = s->mb_stride * (s->mb_height + 1);
828 int yc_size = y_size + 2 * c_size;
831 if (s->mb_height & 1)
832 yc_size += 2*s->b8_stride + 2*s->mb_stride;
839 s->obmc_scratchpad = NULL;
842 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
843 ME_MAP_SIZE * sizeof(uint32_t), fail)
844 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
845 ME_MAP_SIZE * sizeof(uint32_t), fail)
846 if (s->avctx->noise_reduction) {
847 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
848 2 * 64 * sizeof(int), fail)
851 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
852 s->block = s->blocks[0];
854 for (i = 0; i < 12; i++) {
855 s->pblocks[i] = &s->block[i];
857 if (s->avctx->codec_tag == AV_RL32("VCR2"))
860 if (s->out_format == FMT_H263) {
862 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
863 yc_size * sizeof(int16_t) * 16, fail);
864 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
865 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
866 s->ac_val[2] = s->ac_val[1] + c_size;
871 return -1; // free() through ff_mpv_common_end()
874 static void free_duplicate_context(MpegEncContext *s)
879 av_freep(&s->edge_emu_buffer);
880 av_freep(&s->me.scratchpad);
884 s->obmc_scratchpad = NULL;
886 av_freep(&s->dct_error_sum);
887 av_freep(&s->me.map);
888 av_freep(&s->me.score_map);
889 av_freep(&s->blocks);
890 av_freep(&s->ac_val_base);
894 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
896 #define COPY(a) bak->a = src->a
897 COPY(edge_emu_buffer);
902 COPY(obmc_scratchpad);
909 COPY(me.map_generation);
921 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
925 // FIXME copy only needed parts
927 backup_duplicate_context(&bak, dst);
928 memcpy(dst, src, sizeof(MpegEncContext));
929 backup_duplicate_context(dst, &bak);
930 for (i = 0; i < 12; i++) {
931 dst->pblocks[i] = &dst->block[i];
933 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
935 if (!dst->edge_emu_buffer &&
936 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
937 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
938 "scratch buffers.\n");
941 // STOP_TIMER("update_duplicate_context")
942 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
946 int ff_mpeg_update_thread_context(AVCodecContext *dst,
947 const AVCodecContext *src)
950 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
957 // FIXME can parameters change on I-frames?
958 // in that case dst may need a reinit
959 if (!s->context_initialized) {
961 memcpy(s, s1, sizeof(MpegEncContext));
964 s->bitstream_buffer = NULL;
965 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
967 if (s1->context_initialized){
968 // s->picture_range_start += MAX_PICTURE_COUNT;
969 // s->picture_range_end += MAX_PICTURE_COUNT;
971 if((err = ff_mpv_common_init(s)) < 0){
972 memset(s, 0, sizeof(MpegEncContext));
979 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
980 s->context_reinit = 0;
981 s->height = s1->height;
982 s->width = s1->width;
983 if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
987 s->avctx->coded_height = s1->avctx->coded_height;
988 s->avctx->coded_width = s1->avctx->coded_width;
989 s->avctx->width = s1->avctx->width;
990 s->avctx->height = s1->avctx->height;
992 s->coded_picture_number = s1->coded_picture_number;
993 s->picture_number = s1->picture_number;
995 av_assert0(!s->picture || s->picture != s1->picture);
997 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
998 ff_mpeg_unref_picture(s, &s->picture[i]);
999 if (s1->picture[i].f->buf[0] &&
1000 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
1004 #define UPDATE_PICTURE(pic)\
1006 ff_mpeg_unref_picture(s, &s->pic);\
1007 if (s1->pic.f && s1->pic.f->buf[0])\
1008 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
1010 ret = update_picture_tables(&s->pic, &s1->pic);\
1015 UPDATE_PICTURE(current_picture);
1016 UPDATE_PICTURE(last_picture);
1017 UPDATE_PICTURE(next_picture);
1019 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1020 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1021 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1023 // Error/bug resilience
1024 s->next_p_frame_damaged = s1->next_p_frame_damaged;
1025 s->workaround_bugs = s1->workaround_bugs;
1026 s->padding_bug_score = s1->padding_bug_score;
1028 // MPEG4 timing info
1029 memcpy(&s->last_time_base, &s1->last_time_base,
1030 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1031 (char *) &s1->last_time_base);
1034 s->max_b_frames = s1->max_b_frames;
1035 s->low_delay = s1->low_delay;
1036 s->droppable = s1->droppable;
1038 // DivX handling (doesn't work)
1039 s->divx_packed = s1->divx_packed;
1041 if (s1->bitstream_buffer) {
1042 if (s1->bitstream_buffer_size +
1043 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
1044 av_fast_malloc(&s->bitstream_buffer,
1045 &s->allocated_bitstream_buffer_size,
1046 s1->allocated_bitstream_buffer_size);
1047 s->bitstream_buffer_size = s1->bitstream_buffer_size;
1048 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1049 s1->bitstream_buffer_size);
1050 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1051 FF_INPUT_BUFFER_PADDING_SIZE);
1054 // linesize dependend scratch buffer allocation
1055 if (!s->edge_emu_buffer)
1057 if (frame_size_alloc(s, s1->linesize) < 0) {
1058 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1059 "scratch buffers.\n");
1060 return AVERROR(ENOMEM);
1063 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1064 "be allocated due to unknown size.\n");
1067 // MPEG2/interlacing info
1068 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1069 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1071 if (!s1->first_field) {
1072 s->last_pict_type = s1->pict_type;
1073 if (s1->current_picture_ptr)
1074 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1081 * Set the given MpegEncContext to common defaults
1082 * (same for encoding and decoding).
1083 * The changed fields will not depend upon the
1084 * prior state of the MpegEncContext.
1086 void ff_mpv_common_defaults(MpegEncContext *s)
1088 s->y_dc_scale_table =
1089 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
1090 s->chroma_qscale_table = ff_default_chroma_qscale_table;
1091 s->progressive_frame = 1;
1092 s->progressive_sequence = 1;
1093 s->picture_structure = PICT_FRAME;
1095 s->coded_picture_number = 0;
1096 s->picture_number = 0;
1101 s->slice_context_count = 1;
1105 * Set the given MpegEncContext to defaults for decoding.
1106 * the changed fields will not depend upon
1107 * the prior state of the MpegEncContext.
1109 void ff_mpv_decode_defaults(MpegEncContext *s)
1111 ff_mpv_common_defaults(s);
1114 void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
1117 s->width = avctx->coded_width;
1118 s->height = avctx->coded_height;
1119 s->codec_id = avctx->codec->id;
1120 s->workaround_bugs = avctx->workaround_bugs;
1121 s->flags = avctx->flags;
1122 s->flags2 = avctx->flags2;
1124 /* convert fourcc to upper case */
1125 s->codec_tag = avpriv_toupper4(avctx->codec_tag);
1127 s->stream_codec_tag = avpriv_toupper4(avctx->stream_codec_tag);
1130 static int init_er(MpegEncContext *s)
1132 ERContext *er = &s->er;
1133 int mb_array_size = s->mb_height * s->mb_stride;
1136 er->avctx = s->avctx;
1137 er->mecc = &s->mecc;
1139 er->mb_index2xy = s->mb_index2xy;
1140 er->mb_num = s->mb_num;
1141 er->mb_width = s->mb_width;
1142 er->mb_height = s->mb_height;
1143 er->mb_stride = s->mb_stride;
1144 er->b8_stride = s->b8_stride;
1146 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
1147 er->error_status_table = av_mallocz(mb_array_size);
1148 if (!er->er_temp_buffer || !er->error_status_table)
1151 er->mbskip_table = s->mbskip_table;
1152 er->mbintra_table = s->mbintra_table;
1154 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1155 er->dc_val[i] = s->dc_val[i];
1157 er->decode_mb = mpeg_er_decode_mb;
1162 av_freep(&er->er_temp_buffer);
1163 av_freep(&er->error_status_table);
1164 return AVERROR(ENOMEM);
1168 * Initialize and allocates MpegEncContext fields dependent on the resolution.
1170 static int init_context_frame(MpegEncContext *s)
1172 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1174 s->mb_width = (s->width + 15) / 16;
1175 s->mb_stride = s->mb_width + 1;
1176 s->b8_stride = s->mb_width * 2 + 1;
1177 mb_array_size = s->mb_height * s->mb_stride;
1178 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1180 /* set default edge pos, will be overridden
1181 * in decode_header if needed */
1182 s->h_edge_pos = s->mb_width * 16;
1183 s->v_edge_pos = s->mb_height * 16;
1185 s->mb_num = s->mb_width * s->mb_height;
1190 s->block_wrap[3] = s->b8_stride;
1192 s->block_wrap[5] = s->mb_stride;
1194 y_size = s->b8_stride * (2 * s->mb_height + 1);
1195 c_size = s->mb_stride * (s->mb_height + 1);
1196 yc_size = y_size + 2 * c_size;
1198 if (s->mb_height & 1)
1199 yc_size += 2*s->b8_stride + 2*s->mb_stride;
1201 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1202 for (y = 0; y < s->mb_height; y++)
1203 for (x = 0; x < s->mb_width; x++)
1204 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1206 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1209 /* Allocate MV tables */
1210 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1211 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1212 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1213 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1214 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1215 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1216 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1217 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
1218 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
1219 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
1220 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
1221 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
1223 /* Allocate MB type table */
1224 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1226 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1228 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
1229 mb_array_size * sizeof(float), fail);
1230 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
1231 mb_array_size * sizeof(float), fail);
1235 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1236 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
1237 /* interlaced direct mode decoding tables */
1238 for (i = 0; i < 2; i++) {
1240 for (j = 0; j < 2; j++) {
1241 for (k = 0; k < 2; k++) {
1242 FF_ALLOCZ_OR_GOTO(s->avctx,
1243 s->b_field_mv_table_base[i][j][k],
1244 mv_table_size * 2 * sizeof(int16_t),
1246 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1249 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1250 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1251 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1253 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1256 if (s->out_format == FMT_H263) {
1258 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1259 s->coded_block = s->coded_block_base + s->b8_stride + 1;
1261 /* cbp, ac_pred, pred_dir */
1262 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1263 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1266 if (s->h263_pred || s->h263_plus || !s->encoding) {
1268 // MN: we need these for error resilience of intra-frames
1269 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1270 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1271 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1272 s->dc_val[2] = s->dc_val[1] + c_size;
1273 for (i = 0; i < yc_size; i++)
1274 s->dc_val_base[i] = 1024;
1277 /* which mb is a intra block */
1278 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1279 memset(s->mbintra_table, 1, mb_array_size);
1281 /* init macroblock skip table */
1282 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1283 // Note the + 1 is for a quicker mpeg4 slice_end detection
1287 return AVERROR(ENOMEM);
1291 * init common structure for both encoder and decoder.
1292 * this assumes that some variables like width/height are already set
1294 av_cold int ff_mpv_common_init(MpegEncContext *s)
1297 int nb_slices = (HAVE_THREADS &&
1298 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1299 s->avctx->thread_count : 1;
1301 if (s->encoding && s->avctx->slices)
1302 nb_slices = s->avctx->slices;
1304 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1305 s->mb_height = (s->height + 31) / 32 * 2;
1307 s->mb_height = (s->height + 15) / 16;
1309 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1310 av_log(s->avctx, AV_LOG_ERROR,
1311 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1315 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1318 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1320 max_slices = MAX_THREADS;
1321 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1322 " reducing to %d\n", nb_slices, max_slices);
1323 nb_slices = max_slices;
1326 if ((s->width || s->height) &&
1327 av_image_check_size(s->width, s->height, 0, s->avctx))
1332 s->flags = s->avctx->flags;
1333 s->flags2 = s->avctx->flags2;
1335 /* set chroma shifts */
1336 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1338 &s->chroma_y_shift);
1341 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1342 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1343 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1344 s->picture[i].f = av_frame_alloc();
1345 if (!s->picture[i].f)
1348 memset(&s->next_picture, 0, sizeof(s->next_picture));
1349 memset(&s->last_picture, 0, sizeof(s->last_picture));
1350 memset(&s->current_picture, 0, sizeof(s->current_picture));
1351 memset(&s->new_picture, 0, sizeof(s->new_picture));
1352 s->next_picture.f = av_frame_alloc();
1353 if (!s->next_picture.f)
1355 s->last_picture.f = av_frame_alloc();
1356 if (!s->last_picture.f)
1358 s->current_picture.f = av_frame_alloc();
1359 if (!s->current_picture.f)
1361 s->new_picture.f = av_frame_alloc();
1362 if (!s->new_picture.f)
1365 if (init_context_frame(s))
1368 s->parse_context.state = -1;
1370 s->context_initialized = 1;
1371 s->thread_context[0] = s;
1373 // if (s->width && s->height) {
1374 if (nb_slices > 1) {
1375 for (i = 1; i < nb_slices; i++) {
1376 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1377 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1380 for (i = 0; i < nb_slices; i++) {
1381 if (init_duplicate_context(s->thread_context[i]) < 0)
1383 s->thread_context[i]->start_mb_y =
1384 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1385 s->thread_context[i]->end_mb_y =
1386 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1389 if (init_duplicate_context(s) < 0)
1392 s->end_mb_y = s->mb_height;
1394 s->slice_context_count = nb_slices;
1399 ff_mpv_common_end(s);
1404 * Frees and resets MpegEncContext fields depending on the resolution.
1405 * Is used during resolution changes to avoid a full reinitialization of the
1408 static void free_context_frame(MpegEncContext *s)
1412 av_freep(&s->mb_type);
1413 av_freep(&s->p_mv_table_base);
1414 av_freep(&s->b_forw_mv_table_base);
1415 av_freep(&s->b_back_mv_table_base);
1416 av_freep(&s->b_bidir_forw_mv_table_base);
1417 av_freep(&s->b_bidir_back_mv_table_base);
1418 av_freep(&s->b_direct_mv_table_base);
1419 s->p_mv_table = NULL;
1420 s->b_forw_mv_table = NULL;
1421 s->b_back_mv_table = NULL;
1422 s->b_bidir_forw_mv_table = NULL;
1423 s->b_bidir_back_mv_table = NULL;
1424 s->b_direct_mv_table = NULL;
1425 for (i = 0; i < 2; i++) {
1426 for (j = 0; j < 2; j++) {
1427 for (k = 0; k < 2; k++) {
1428 av_freep(&s->b_field_mv_table_base[i][j][k]);
1429 s->b_field_mv_table[i][j][k] = NULL;
1431 av_freep(&s->b_field_select_table[i][j]);
1432 av_freep(&s->p_field_mv_table_base[i][j]);
1433 s->p_field_mv_table[i][j] = NULL;
1435 av_freep(&s->p_field_select_table[i]);
1438 av_freep(&s->dc_val_base);
1439 av_freep(&s->coded_block_base);
1440 av_freep(&s->mbintra_table);
1441 av_freep(&s->cbp_table);
1442 av_freep(&s->pred_dir_table);
1444 av_freep(&s->mbskip_table);
1446 av_freep(&s->er.error_status_table);
1447 av_freep(&s->er.er_temp_buffer);
1448 av_freep(&s->mb_index2xy);
1449 av_freep(&s->lambda_table);
1451 av_freep(&s->cplx_tab);
1452 av_freep(&s->bits_tab);
1454 s->linesize = s->uvlinesize = 0;
1457 int ff_mpv_common_frame_size_change(MpegEncContext *s)
1461 if (!s->context_initialized)
1462 return AVERROR(EINVAL);
1464 if (s->slice_context_count > 1) {
1465 for (i = 0; i < s->slice_context_count; i++) {
1466 free_duplicate_context(s->thread_context[i]);
1468 for (i = 1; i < s->slice_context_count; i++) {
1469 av_freep(&s->thread_context[i]);
1472 free_duplicate_context(s);
1474 free_context_frame(s);
1477 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1478 s->picture[i].needs_realloc = 1;
1481 s->last_picture_ptr =
1482 s->next_picture_ptr =
1483 s->current_picture_ptr = NULL;
1486 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1487 s->mb_height = (s->height + 31) / 32 * 2;
1489 s->mb_height = (s->height + 15) / 16;
1491 if ((s->width || s->height) &&
1492 (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1495 if ((err = init_context_frame(s)))
1498 s->thread_context[0] = s;
1500 if (s->width && s->height) {
1501 int nb_slices = s->slice_context_count;
1502 if (nb_slices > 1) {
1503 for (i = 1; i < nb_slices; i++) {
1504 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1505 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1508 for (i = 0; i < nb_slices; i++) {
1509 if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1511 s->thread_context[i]->start_mb_y =
1512 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1513 s->thread_context[i]->end_mb_y =
1514 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1517 err = init_duplicate_context(s);
1521 s->end_mb_y = s->mb_height;
1523 s->slice_context_count = nb_slices;
1528 ff_mpv_common_end(s);
1532 /* init common structure for both encoder and decoder */
1533 void ff_mpv_common_end(MpegEncContext *s)
1537 if (s->slice_context_count > 1) {
1538 for (i = 0; i < s->slice_context_count; i++) {
1539 free_duplicate_context(s->thread_context[i]);
1541 for (i = 1; i < s->slice_context_count; i++) {
1542 av_freep(&s->thread_context[i]);
1544 s->slice_context_count = 1;
1545 } else free_duplicate_context(s);
1547 av_freep(&s->parse_context.buffer);
1548 s->parse_context.buffer_size = 0;
1550 av_freep(&s->bitstream_buffer);
1551 s->allocated_bitstream_buffer_size = 0;
1554 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1555 ff_free_picture_tables(&s->picture[i]);
1556 ff_mpeg_unref_picture(s, &s->picture[i]);
1557 av_frame_free(&s->picture[i].f);
1560 av_freep(&s->picture);
1561 ff_free_picture_tables(&s->last_picture);
1562 ff_mpeg_unref_picture(s, &s->last_picture);
1563 av_frame_free(&s->last_picture.f);
1564 ff_free_picture_tables(&s->current_picture);
1565 ff_mpeg_unref_picture(s, &s->current_picture);
1566 av_frame_free(&s->current_picture.f);
1567 ff_free_picture_tables(&s->next_picture);
1568 ff_mpeg_unref_picture(s, &s->next_picture);
1569 av_frame_free(&s->next_picture.f);
1570 ff_free_picture_tables(&s->new_picture);
1571 ff_mpeg_unref_picture(s, &s->new_picture);
1572 av_frame_free(&s->new_picture.f);
1574 free_context_frame(s);
1576 s->context_initialized = 0;
1577 s->last_picture_ptr =
1578 s->next_picture_ptr =
1579 s->current_picture_ptr = NULL;
1580 s->linesize = s->uvlinesize = 0;
1583 av_cold void ff_init_rl(RLTable *rl,
1584 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1586 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1587 uint8_t index_run[MAX_RUN + 1];
1588 int last, run, level, start, end, i;
1590 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1591 if (static_store && rl->max_level[0])
1594 /* compute max_level[], max_run[] and index_run[] */
1595 for (last = 0; last < 2; last++) {
1604 memset(max_level, 0, MAX_RUN + 1);
1605 memset(max_run, 0, MAX_LEVEL + 1);
1606 memset(index_run, rl->n, MAX_RUN + 1);
1607 for (i = start; i < end; i++) {
1608 run = rl->table_run[i];
1609 level = rl->table_level[i];
1610 if (index_run[run] == rl->n)
1612 if (level > max_level[run])
1613 max_level[run] = level;
1614 if (run > max_run[level])
1615 max_run[level] = run;
1618 rl->max_level[last] = static_store[last];
1620 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1621 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1623 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1625 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1626 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1628 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1630 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1631 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1635 av_cold void ff_init_vlc_rl(RLTable *rl, unsigned static_size)
1638 VLC_TYPE table[1500][2] = {{0}};
1639 VLC vlc = { .table = table, .table_allocated = static_size };
1640 av_assert0(static_size <= FF_ARRAY_ELEMS(table));
1641 init_vlc(&vlc, 9, rl->n + 1, &rl->table_vlc[0][1], 4, 2, &rl->table_vlc[0][0], 4, 2, INIT_VLC_USE_NEW_STATIC);
1643 for (q = 0; q < 32; q++) {
1645 int qadd = (q - 1) | 1;
1651 for (i = 0; i < vlc.table_size; i++) {
1652 int code = vlc.table[i][0];
1653 int len = vlc.table[i][1];
1656 if (len == 0) { // illegal code
1659 } else if (len < 0) { // more bits needed
1663 if (code == rl->n) { // esc
1667 run = rl->table_run[code] + 1;
1668 level = rl->table_level[code] * qmul + qadd;
1669 if (code >= rl->last) run += 192;
1672 rl->rl_vlc[q][i].len = len;
1673 rl->rl_vlc[q][i].level = level;
1674 rl->rl_vlc[q][i].run = run;
1679 static void release_unused_pictures(MpegEncContext *s)
1683 /* release non reference frames */
1684 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1685 if (!s->picture[i].reference)
1686 ff_mpeg_unref_picture(s, &s->picture[i]);
1690 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1692 if (pic == s->last_picture_ptr)
1694 if (!pic->f->buf[0])
1696 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1701 static int find_unused_picture(MpegEncContext *s, int shared)
1706 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1707 if (!s->picture[i].f->buf[0] && &s->picture[i] != s->last_picture_ptr)
1711 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1712 if (pic_is_unused(s, &s->picture[i]))
1717 av_log(s->avctx, AV_LOG_FATAL,
1718 "Internal error, picture buffer overflow\n");
1719 /* We could return -1, but the codec would crash trying to draw into a
1720 * non-existing frame anyway. This is safer than waiting for a random crash.
1721 * Also the return of this is never useful, an encoder must only allocate
1722 * as much as allowed in the specification. This has no relationship to how
1723 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1724 * enough for such valid streams).
1725 * Plus, a decoder has to check stream validity and remove frames if too
1726 * many reference frames are around. Waiting for "OOM" is not correct at
1727 * all. Similarly, missing reference frames have to be replaced by
1728 * interpolated/MC frames, anything else is a bug in the codec ...
1734 int ff_find_unused_picture(MpegEncContext *s, int shared)
1736 int ret = find_unused_picture(s, shared);
1738 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1739 if (s->picture[ret].needs_realloc) {
1740 s->picture[ret].needs_realloc = 0;
1741 ff_free_picture_tables(&s->picture[ret]);
1742 ff_mpeg_unref_picture(s, &s->picture[ret]);
1748 static void gray_frame(AVFrame *frame)
1750 int i, h_chroma_shift, v_chroma_shift;
1752 av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1754 for(i=0; i<frame->height; i++)
1755 memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1756 for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1757 memset(frame->data[1] + frame->linesize[1]*i,
1758 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1759 memset(frame->data[2] + frame->linesize[2]*i,
1760 0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1765 * generic function called after decoding
1766 * the header and before a frame is decoded.
1768 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1774 if (!ff_thread_can_start_frame(avctx)) {
1775 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1779 /* mark & release old frames */
1780 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1781 s->last_picture_ptr != s->next_picture_ptr &&
1782 s->last_picture_ptr->f->buf[0]) {
1783 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1786 /* release forgotten pictures */
1787 /* if (mpeg124/h263) */
1788 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1789 if (&s->picture[i] != s->last_picture_ptr &&
1790 &s->picture[i] != s->next_picture_ptr &&
1791 s->picture[i].reference && !s->picture[i].needs_realloc) {
1792 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1793 av_log(avctx, AV_LOG_ERROR,
1794 "releasing zombie picture\n");
1795 ff_mpeg_unref_picture(s, &s->picture[i]);
1799 ff_mpeg_unref_picture(s, &s->current_picture);
1801 release_unused_pictures(s);
1803 if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1804 // we already have a unused image
1805 // (maybe it was set before reading the header)
1806 pic = s->current_picture_ptr;
1808 i = ff_find_unused_picture(s, 0);
1810 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1813 pic = &s->picture[i];
1817 if (!s->droppable) {
1818 if (s->pict_type != AV_PICTURE_TYPE_B)
1822 pic->f->coded_picture_number = s->coded_picture_number++;
1824 if (ff_alloc_picture(s, pic, 0) < 0)
1827 s->current_picture_ptr = pic;
1828 // FIXME use only the vars from current_pic
1829 s->current_picture_ptr->f->top_field_first = s->top_field_first;
1830 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1831 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1832 if (s->picture_structure != PICT_FRAME)
1833 s->current_picture_ptr->f->top_field_first =
1834 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1836 s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1837 !s->progressive_sequence;
1838 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1840 s->current_picture_ptr->f->pict_type = s->pict_type;
1841 // if (s->flags && CODEC_FLAG_QSCALE)
1842 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1843 s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1845 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1846 s->current_picture_ptr)) < 0)
1849 if (s->pict_type != AV_PICTURE_TYPE_B) {
1850 s->last_picture_ptr = s->next_picture_ptr;
1852 s->next_picture_ptr = s->current_picture_ptr;
1854 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1855 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1856 s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1857 s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1858 s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1859 s->pict_type, s->droppable);
1861 if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1862 (s->pict_type != AV_PICTURE_TYPE_I ||
1863 s->picture_structure != PICT_FRAME)) {
1864 int h_chroma_shift, v_chroma_shift;
1865 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1866 &h_chroma_shift, &v_chroma_shift);
1867 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1868 av_log(avctx, AV_LOG_DEBUG,
1869 "allocating dummy last picture for B frame\n");
1870 else if (s->pict_type != AV_PICTURE_TYPE_I)
1871 av_log(avctx, AV_LOG_ERROR,
1872 "warning: first frame is no keyframe\n");
1873 else if (s->picture_structure != PICT_FRAME)
1874 av_log(avctx, AV_LOG_DEBUG,
1875 "allocate dummy last picture for field based first keyframe\n");
1877 /* Allocate a dummy frame */
1878 i = ff_find_unused_picture(s, 0);
1880 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1883 s->last_picture_ptr = &s->picture[i];
1885 s->last_picture_ptr->reference = 3;
1886 s->last_picture_ptr->f->key_frame = 0;
1887 s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1889 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1890 s->last_picture_ptr = NULL;
1894 if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1895 for(i=0; i<avctx->height; i++)
1896 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1897 0x80, avctx->width);
1898 for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1899 memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1900 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1901 memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1902 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1905 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1906 for(i=0; i<avctx->height; i++)
1907 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1911 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1912 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1914 if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1915 s->pict_type == AV_PICTURE_TYPE_B) {
1916 /* Allocate a dummy frame */
1917 i = ff_find_unused_picture(s, 0);
1919 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1922 s->next_picture_ptr = &s->picture[i];
1924 s->next_picture_ptr->reference = 3;
1925 s->next_picture_ptr->f->key_frame = 0;
1926 s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1928 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1929 s->next_picture_ptr = NULL;
1932 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1933 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1936 #if 0 // BUFREF-FIXME
1937 memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1938 memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1940 if (s->last_picture_ptr) {
1941 ff_mpeg_unref_picture(s, &s->last_picture);
1942 if (s->last_picture_ptr->f->buf[0] &&
1943 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1944 s->last_picture_ptr)) < 0)
1947 if (s->next_picture_ptr) {
1948 ff_mpeg_unref_picture(s, &s->next_picture);
1949 if (s->next_picture_ptr->f->buf[0] &&
1950 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1951 s->next_picture_ptr)) < 0)
1955 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1956 s->last_picture_ptr->f->buf[0]));
1958 if (s->picture_structure!= PICT_FRAME) {
1960 for (i = 0; i < 4; i++) {
1961 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1962 s->current_picture.f->data[i] +=
1963 s->current_picture.f->linesize[i];
1965 s->current_picture.f->linesize[i] *= 2;
1966 s->last_picture.f->linesize[i] *= 2;
1967 s->next_picture.f->linesize[i] *= 2;
1971 s->err_recognition = avctx->err_recognition;
1973 /* set dequantizer, we can't do it during init as
1974 * it might change for mpeg4 and we can't do it in the header
1975 * decode as init is not called for mpeg4 there yet */
1976 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1977 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1978 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1979 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1980 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1981 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1983 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1984 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1987 if (s->avctx->debug & FF_DEBUG_NOMC) {
1988 gray_frame(s->current_picture_ptr->f);
1994 /* called after a frame has been decoded. */
1995 void ff_mpv_frame_end(MpegEncContext *s)
1999 if (s->current_picture.reference)
2000 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
2005 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
2008 return clip_line(ex, ey, sx, sy, maxx);
2013 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
2020 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
2028 * Draw a line from (ex, ey) -> (sx, sy).
2029 * @param w width of the image
2030 * @param h height of the image
2031 * @param stride stride/linesize of the image
2032 * @param color color of the arrow
2034 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2035 int w, int h, int stride, int color)
2039 if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2041 if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2044 sx = av_clip(sx, 0, w - 1);
2045 sy = av_clip(sy, 0, h - 1);
2046 ex = av_clip(ex, 0, w - 1);
2047 ey = av_clip(ey, 0, h - 1);
2049 buf[sy * stride + sx] += color;
2051 if (FFABS(ex - sx) > FFABS(ey - sy)) {
2053 FFSWAP(int, sx, ex);
2054 FFSWAP(int, sy, ey);
2056 buf += sx + sy * stride;
2058 f = ((ey - sy) << 16) / ex;
2059 for (x = 0; x <= ex; x++) {
2061 fr = (x * f) & 0xFFFF;
2062 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2063 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2067 FFSWAP(int, sx, ex);
2068 FFSWAP(int, sy, ey);
2070 buf += sx + sy * stride;
2073 f = ((ex - sx) << 16) / ey;
2076 for(y= 0; y <= ey; y++){
2078 fr = (y*f) & 0xFFFF;
2079 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2080 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2086 * Draw an arrow from (ex, ey) -> (sx, sy).
2087 * @param w width of the image
2088 * @param h height of the image
2089 * @param stride stride/linesize of the image
2090 * @param color color of the arrow
2092 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2093 int ey, int w, int h, int stride, int color, int tail, int direction)
2098 FFSWAP(int, sx, ex);
2099 FFSWAP(int, sy, ey);
2102 sx = av_clip(sx, -100, w + 100);
2103 sy = av_clip(sy, -100, h + 100);
2104 ex = av_clip(ex, -100, w + 100);
2105 ey = av_clip(ey, -100, h + 100);
2110 if (dx * dx + dy * dy > 3 * 3) {
2113 int length = ff_sqrt((rx * rx + ry * ry) << 8);
2115 // FIXME subpixel accuracy
2116 rx = ROUNDED_DIV(rx * 3 << 4, length);
2117 ry = ROUNDED_DIV(ry * 3 << 4, length);
2124 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2125 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2127 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2131 static int add_mb(AVMotionVector *mb, uint32_t mb_type,
2132 int dst_x, int dst_y,
2133 int src_x, int src_y,
2136 if (dst_x == src_x && dst_y == src_y)
2138 mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
2139 mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
2144 mb->source = direction ? 1 : -1;
2145 mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
2150 * Print debugging info for the given picture.
2152 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2153 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2155 int mb_width, int mb_height, int mb_stride, int quarter_sample)
2157 if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
2158 const int shift = 1 + quarter_sample;
2159 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2160 const int mv_stride = (mb_width << mv_sample_log2) +
2161 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2162 int mb_x, mb_y, mbcount = 0;
2164 /* size is width * height * 2 * 4 where 2 is for directions and 4 is
2165 * for the maximum number of MB (4 MB in case of IS_8x8) */
2166 AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
2170 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2171 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2172 int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2173 for (direction = 0; direction < 2; direction++) {
2174 if (!USES_LIST(mb_type, direction))
2176 if (IS_8X8(mb_type)) {
2177 for (i = 0; i < 4; i++) {
2178 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2179 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2180 int xy = (mb_x * 2 + (i & 1) +
2181 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2182 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2183 int my = (motion_val[direction][xy][1] >> shift) + sy;
2184 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2186 } else if (IS_16X8(mb_type)) {
2187 for (i = 0; i < 2; i++) {
2188 int sx = mb_x * 16 + 8;
2189 int sy = mb_y * 16 + 4 + 8 * i;
2190 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2191 int mx = (motion_val[direction][xy][0] >> shift);
2192 int my = (motion_val[direction][xy][1] >> shift);
2194 if (IS_INTERLACED(mb_type))
2197 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2199 } else if (IS_8X16(mb_type)) {
2200 for (i = 0; i < 2; i++) {
2201 int sx = mb_x * 16 + 4 + 8 * i;
2202 int sy = mb_y * 16 + 8;
2203 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2204 int mx = motion_val[direction][xy][0] >> shift;
2205 int my = motion_val[direction][xy][1] >> shift;
2207 if (IS_INTERLACED(mb_type))
2210 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2213 int sx = mb_x * 16 + 8;
2214 int sy = mb_y * 16 + 8;
2215 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2216 int mx = (motion_val[direction][xy][0]>>shift) + sx;
2217 int my = (motion_val[direction][xy][1]>>shift) + sy;
2218 mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2225 AVFrameSideData *sd;
2227 av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
2228 sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
2231 memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
2237 /* TODO: export all the following to make them accessible for users (and filters) */
2238 if (avctx->hwaccel || !mbtype_table
2239 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
2243 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2246 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2247 av_get_picture_type_char(pict->pict_type));
2248 for (y = 0; y < mb_height; y++) {
2249 for (x = 0; x < mb_width; x++) {
2250 if (avctx->debug & FF_DEBUG_SKIP) {
2251 int count = mbskip_table[x + y * mb_stride];
2254 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2256 if (avctx->debug & FF_DEBUG_QP) {
2257 av_log(avctx, AV_LOG_DEBUG, "%2d",
2258 qscale_table[x + y * mb_stride]);
2260 if (avctx->debug & FF_DEBUG_MB_TYPE) {
2261 int mb_type = mbtype_table[x + y * mb_stride];
2262 // Type & MV direction
2263 if (IS_PCM(mb_type))
2264 av_log(avctx, AV_LOG_DEBUG, "P");
2265 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2266 av_log(avctx, AV_LOG_DEBUG, "A");
2267 else if (IS_INTRA4x4(mb_type))
2268 av_log(avctx, AV_LOG_DEBUG, "i");
2269 else if (IS_INTRA16x16(mb_type))
2270 av_log(avctx, AV_LOG_DEBUG, "I");
2271 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2272 av_log(avctx, AV_LOG_DEBUG, "d");
2273 else if (IS_DIRECT(mb_type))
2274 av_log(avctx, AV_LOG_DEBUG, "D");
2275 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2276 av_log(avctx, AV_LOG_DEBUG, "g");
2277 else if (IS_GMC(mb_type))
2278 av_log(avctx, AV_LOG_DEBUG, "G");
2279 else if (IS_SKIP(mb_type))
2280 av_log(avctx, AV_LOG_DEBUG, "S");
2281 else if (!USES_LIST(mb_type, 1))
2282 av_log(avctx, AV_LOG_DEBUG, ">");
2283 else if (!USES_LIST(mb_type, 0))
2284 av_log(avctx, AV_LOG_DEBUG, "<");
2286 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2287 av_log(avctx, AV_LOG_DEBUG, "X");
2291 if (IS_8X8(mb_type))
2292 av_log(avctx, AV_LOG_DEBUG, "+");
2293 else if (IS_16X8(mb_type))
2294 av_log(avctx, AV_LOG_DEBUG, "-");
2295 else if (IS_8X16(mb_type))
2296 av_log(avctx, AV_LOG_DEBUG, "|");
2297 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2298 av_log(avctx, AV_LOG_DEBUG, " ");
2300 av_log(avctx, AV_LOG_DEBUG, "?");
2303 if (IS_INTERLACED(mb_type))
2304 av_log(avctx, AV_LOG_DEBUG, "=");
2306 av_log(avctx, AV_LOG_DEBUG, " ");
2309 av_log(avctx, AV_LOG_DEBUG, "\n");
2313 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2314 (avctx->debug_mv)) {
2317 int h_chroma_shift, v_chroma_shift, block_height;
2319 const int shift = 1 + quarter_sample;
2321 const int width = avctx->width;
2322 const int height = avctx->height;
2324 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2325 const int mv_stride = (mb_width << mv_sample_log2) +
2326 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2328 *low_delay = 0; // needed to see the vectors without trashing the buffers
2330 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2332 av_frame_make_writable(pict);
2334 pict->opaque = NULL;
2336 ptr = pict->data[0];
2338 block_height = 16 >> v_chroma_shift;
2340 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2342 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2343 const int mb_index = mb_x + mb_y * mb_stride;
2345 if ((avctx->debug_mv) && motion_val[0]) {
2347 for (type = 0; type < 3; type++) {
2351 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2352 (pict->pict_type!= AV_PICTURE_TYPE_P))
2357 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2358 (pict->pict_type!= AV_PICTURE_TYPE_B))
2363 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2364 (pict->pict_type!= AV_PICTURE_TYPE_B))
2369 if (!USES_LIST(mbtype_table[mb_index], direction))
2372 if (IS_8X8(mbtype_table[mb_index])) {
2374 for (i = 0; i < 4; i++) {
2375 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2376 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2377 int xy = (mb_x * 2 + (i & 1) +
2378 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2379 int mx = (motion_val[direction][xy][0] >> shift) + sx;
2380 int my = (motion_val[direction][xy][1] >> shift) + sy;
2381 draw_arrow(ptr, sx, sy, mx, my, width,
2382 height, pict->linesize[0], 100, 0, direction);
2384 } else if (IS_16X8(mbtype_table[mb_index])) {
2386 for (i = 0; i < 2; i++) {
2387 int sx = mb_x * 16 + 8;
2388 int sy = mb_y * 16 + 4 + 8 * i;
2389 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2390 int mx = (motion_val[direction][xy][0] >> shift);
2391 int my = (motion_val[direction][xy][1] >> shift);
2393 if (IS_INTERLACED(mbtype_table[mb_index]))
2396 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2397 height, pict->linesize[0], 100, 0, direction);
2399 } else if (IS_8X16(mbtype_table[mb_index])) {
2401 for (i = 0; i < 2; i++) {
2402 int sx = mb_x * 16 + 4 + 8 * i;
2403 int sy = mb_y * 16 + 8;
2404 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2405 int mx = motion_val[direction][xy][0] >> shift;
2406 int my = motion_val[direction][xy][1] >> shift;
2408 if (IS_INTERLACED(mbtype_table[mb_index]))
2411 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2412 height, pict->linesize[0], 100, 0, direction);
2415 int sx= mb_x * 16 + 8;
2416 int sy= mb_y * 16 + 8;
2417 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2418 int mx= (motion_val[direction][xy][0]>>shift) + sx;
2419 int my= (motion_val[direction][xy][1]>>shift) + sy;
2420 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2425 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2426 uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2427 0x0101010101010101ULL;
2429 for (y = 0; y < block_height; y++) {
2430 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2431 (block_height * mb_y + y) *
2432 pict->linesize[1]) = c;
2433 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2434 (block_height * mb_y + y) *
2435 pict->linesize[2]) = c;
2438 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2440 int mb_type = mbtype_table[mb_index];
2443 #define COLOR(theta, r) \
2444 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2445 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2449 if (IS_PCM(mb_type)) {
2451 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2452 IS_INTRA16x16(mb_type)) {
2454 } else if (IS_INTRA4x4(mb_type)) {
2456 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2458 } else if (IS_DIRECT(mb_type)) {
2460 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2462 } else if (IS_GMC(mb_type)) {
2464 } else if (IS_SKIP(mb_type)) {
2466 } else if (!USES_LIST(mb_type, 1)) {
2468 } else if (!USES_LIST(mb_type, 0)) {
2471 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2475 u *= 0x0101010101010101ULL;
2476 v *= 0x0101010101010101ULL;
2477 for (y = 0; y < block_height; y++) {
2478 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2479 (block_height * mb_y + y) * pict->linesize[1]) = u;
2480 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2481 (block_height * mb_y + y) * pict->linesize[2]) = v;
2485 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2486 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2487 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2488 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2489 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2491 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2492 for (y = 0; y < 16; y++)
2493 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2494 pict->linesize[0]] ^= 0x80;
2496 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2497 int dm = 1 << (mv_sample_log2 - 2);
2498 for (i = 0; i < 4; i++) {
2499 int sx = mb_x * 16 + 8 * (i & 1);
2500 int sy = mb_y * 16 + 8 * (i >> 1);
2501 int xy = (mb_x * 2 + (i & 1) +
2502 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2504 int32_t *mv = (int32_t *) &motion_val[0][xy];
2505 if (mv[0] != mv[dm] ||
2506 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2507 for (y = 0; y < 8; y++)
2508 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2509 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2510 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2511 pict->linesize[0]) ^= 0x8080808080808080ULL;
2515 if (IS_INTERLACED(mb_type) &&
2516 avctx->codec->id == AV_CODEC_ID_H264) {
2520 mbskip_table[mb_index] = 0;
2526 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2528 ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
2529 p->qscale_table, p->motion_val, &s->low_delay,
2530 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2533 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2535 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2536 int offset = 2*s->mb_stride + 1;
2538 return AVERROR(ENOMEM);
2539 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2540 ref->size -= offset;
2541 ref->data += offset;
2542 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2545 static inline int hpel_motion_lowres(MpegEncContext *s,
2546 uint8_t *dest, uint8_t *src,
2547 int field_based, int field_select,
2548 int src_x, int src_y,
2549 int width, int height, ptrdiff_t stride,
2550 int h_edge_pos, int v_edge_pos,
2551 int w, int h, h264_chroma_mc_func *pix_op,
2552 int motion_x, int motion_y)
2554 const int lowres = s->avctx->lowres;
2555 const int op_index = FFMIN(lowres, 3);
2556 const int s_mask = (2 << lowres) - 1;
2560 if (s->quarter_sample) {
2565 sx = motion_x & s_mask;
2566 sy = motion_y & s_mask;
2567 src_x += motion_x >> lowres + 1;
2568 src_y += motion_y >> lowres + 1;
2570 src += src_y * stride + src_x;
2572 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2573 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2574 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2575 s->linesize, s->linesize,
2576 w + 1, (h + 1) << field_based,
2577 src_x, src_y << field_based,
2578 h_edge_pos, v_edge_pos);
2579 src = s->edge_emu_buffer;
2583 sx = (sx << 2) >> lowres;
2584 sy = (sy << 2) >> lowres;
2587 pix_op[op_index](dest, src, stride, h, sx, sy);
2591 /* apply one mpeg motion vector to the three components */
2592 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2599 uint8_t **ref_picture,
2600 h264_chroma_mc_func *pix_op,
2601 int motion_x, int motion_y,
2604 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2605 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2606 ptrdiff_t uvlinesize, linesize;
2607 const int lowres = s->avctx->lowres;
2608 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2609 const int block_s = 8>>lowres;
2610 const int s_mask = (2 << lowres) - 1;
2611 const int h_edge_pos = s->h_edge_pos >> lowres;
2612 const int v_edge_pos = s->v_edge_pos >> lowres;
2613 linesize = s->current_picture.f->linesize[0] << field_based;
2614 uvlinesize = s->current_picture.f->linesize[1] << field_based;
2616 // FIXME obviously not perfect but qpel will not work in lowres anyway
2617 if (s->quarter_sample) {
2623 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2626 sx = motion_x & s_mask;
2627 sy = motion_y & s_mask;
2628 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2629 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2631 if (s->out_format == FMT_H263) {
2632 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2633 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2634 uvsrc_x = src_x >> 1;
2635 uvsrc_y = src_y >> 1;
2636 } else if (s->out_format == FMT_H261) {
2637 // even chroma mv's are full pel in H261
2640 uvsx = (2 * mx) & s_mask;
2641 uvsy = (2 * my) & s_mask;
2642 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2643 uvsrc_y = mb_y * block_s + (my >> lowres);
2645 if(s->chroma_y_shift){
2650 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2651 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2653 if(s->chroma_x_shift){
2657 uvsy = motion_y & s_mask;
2659 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2662 uvsx = motion_x & s_mask;
2663 uvsy = motion_y & s_mask;
2670 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2671 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2672 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2674 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2675 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2676 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2677 linesize >> field_based, linesize >> field_based,
2678 17, 17 + field_based,
2679 src_x, src_y << field_based, h_edge_pos,
2681 ptr_y = s->edge_emu_buffer;
2682 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2683 uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2684 uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2685 s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2686 uvlinesize >> field_based, uvlinesize >> field_based,
2688 uvsrc_x, uvsrc_y << field_based,
2689 h_edge_pos >> 1, v_edge_pos >> 1);
2690 s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2691 uvlinesize >> field_based,uvlinesize >> field_based,
2693 uvsrc_x, uvsrc_y << field_based,
2694 h_edge_pos >> 1, v_edge_pos >> 1);
2700 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2702 dest_y += s->linesize;
2703 dest_cb += s->uvlinesize;
2704 dest_cr += s->uvlinesize;
2708 ptr_y += s->linesize;
2709 ptr_cb += s->uvlinesize;
2710 ptr_cr += s->uvlinesize;
2713 sx = (sx << 2) >> lowres;
2714 sy = (sy << 2) >> lowres;
2715 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2717 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2718 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2719 uvsx = (uvsx << 2) >> lowres;
2720 uvsy = (uvsy << 2) >> lowres;
2722 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2723 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2726 // FIXME h261 lowres loop filter
2729 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2730 uint8_t *dest_cb, uint8_t *dest_cr,
2731 uint8_t **ref_picture,
2732 h264_chroma_mc_func * pix_op,
2735 const int lowres = s->avctx->lowres;
2736 const int op_index = FFMIN(lowres, 3);
2737 const int block_s = 8 >> lowres;
2738 const int s_mask = (2 << lowres) - 1;
2739 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2740 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2741 int emu = 0, src_x, src_y, sx, sy;
2745 if (s->quarter_sample) {
2750 /* In case of 8X8, we construct a single chroma motion vector
2751 with a special rounding */
2752 mx = ff_h263_round_chroma(mx);
2753 my = ff_h263_round_chroma(my);
2757 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2758 src_y = s->mb_y * block_s + (my >> lowres + 1);
2760 offset = src_y * s->uvlinesize + src_x;
2761 ptr = ref_picture[1] + offset;
2762 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2763 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2764 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2765 s->uvlinesize, s->uvlinesize,
2767 src_x, src_y, h_edge_pos, v_edge_pos);
2768 ptr = s->edge_emu_buffer;
2771 sx = (sx << 2) >> lowres;
2772 sy = (sy << 2) >> lowres;
2773 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2775 ptr = ref_picture[2] + offset;
2777 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2778 s->uvlinesize, s->uvlinesize,
2780 src_x, src_y, h_edge_pos, v_edge_pos);
2781 ptr = s->edge_emu_buffer;
2783 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2787 * motion compensation of a single macroblock
2789 * @param dest_y luma destination pointer
2790 * @param dest_cb chroma cb/u destination pointer
2791 * @param dest_cr chroma cr/v destination pointer
2792 * @param dir direction (0->forward, 1->backward)
2793 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2794 * @param pix_op halfpel motion compensation function (average or put normally)
2795 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2797 static inline void MPV_motion_lowres(MpegEncContext *s,
2798 uint8_t *dest_y, uint8_t *dest_cb,
2800 int dir, uint8_t **ref_picture,
2801 h264_chroma_mc_func *pix_op)
2805 const int lowres = s->avctx->lowres;
2806 const int block_s = 8 >>lowres;
2811 switch (s->mv_type) {
2813 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2815 ref_picture, pix_op,
2816 s->mv[dir][0][0], s->mv[dir][0][1],
2822 for (i = 0; i < 4; i++) {
2823 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2824 s->linesize) * block_s,
2825 ref_picture[0], 0, 0,
2826 (2 * mb_x + (i & 1)) * block_s,
2827 (2 * mb_y + (i >> 1)) * block_s,
2828 s->width, s->height, s->linesize,
2829 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2830 block_s, block_s, pix_op,
2831 s->mv[dir][i][0], s->mv[dir][i][1]);
2833 mx += s->mv[dir][i][0];
2834 my += s->mv[dir][i][1];
2837 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2838 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2842 if (s->picture_structure == PICT_FRAME) {
2844 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2845 1, 0, s->field_select[dir][0],
2846 ref_picture, pix_op,
2847 s->mv[dir][0][0], s->mv[dir][0][1],
2850 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2851 1, 1, s->field_select[dir][1],
2852 ref_picture, pix_op,
2853 s->mv[dir][1][0], s->mv[dir][1][1],
2856 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2857 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2858 ref_picture = s->current_picture_ptr->f->data;
2861 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2862 0, 0, s->field_select[dir][0],
2863 ref_picture, pix_op,
2865 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2869 for (i = 0; i < 2; i++) {
2870 uint8_t **ref2picture;
2872 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2873 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2874 ref2picture = ref_picture;
2876 ref2picture = s->current_picture_ptr->f->data;
2879 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2880 0, 0, s->field_select[dir][i],
2881 ref2picture, pix_op,
2882 s->mv[dir][i][0], s->mv[dir][i][1] +
2883 2 * block_s * i, block_s, mb_y >> 1);
2885 dest_y += 2 * block_s * s->linesize;
2886 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2887 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2891 if (s->picture_structure == PICT_FRAME) {
2892 for (i = 0; i < 2; i++) {
2894 for (j = 0; j < 2; j++) {
2895 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2897 ref_picture, pix_op,
2898 s->mv[dir][2 * i + j][0],
2899 s->mv[dir][2 * i + j][1],
2902 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2905 for (i = 0; i < 2; i++) {
2906 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2907 0, 0, s->picture_structure != i + 1,
2908 ref_picture, pix_op,
2909 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2910 2 * block_s, mb_y >> 1);
2912 // after put we make avg of the same block
2913 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2915 // opposite parity is always in the same
2916 // frame if this is second field
2917 if (!s->first_field) {
2918 ref_picture = s->current_picture_ptr->f->data;
2929 * find the lowest MB row referenced in the MVs
2931 int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
2933 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2934 int my, off, i, mvs;
2936 if (s->picture_structure != PICT_FRAME || s->mcsel)
2939 switch (s->mv_type) {
2953 for (i = 0; i < mvs; i++) {
2954 my = s->mv[dir][i][1]<<qpel_shift;
2955 my_max = FFMAX(my_max, my);
2956 my_min = FFMIN(my_min, my);
2959 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2961 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2963 return s->mb_height-1;
2966 /* put block[] to dest[] */
2967 static inline void put_dct(MpegEncContext *s,
2968 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2970 s->dct_unquantize_intra(s, block, i, qscale);
2971 s->idsp.idct_put(dest, line_size, block);
2974 /* add block[] to dest[] */
2975 static inline void add_dct(MpegEncContext *s,
2976 int16_t *block, int i, uint8_t *dest, int line_size)
2978 if (s->block_last_index[i] >= 0) {
2979 s->idsp.idct_add(dest, line_size, block);
2983 static inline void add_dequant_dct(MpegEncContext *s,
2984 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2986 if (s->block_last_index[i] >= 0) {
2987 s->dct_unquantize_inter(s, block, i, qscale);
2989 s->idsp.idct_add(dest, line_size, block);
2994 * Clean dc, ac, coded_block for the current non-intra MB.
2996 void ff_clean_intra_table_entries(MpegEncContext *s)
2998 int wrap = s->b8_stride;
2999 int xy = s->block_index[0];
3002 s->dc_val[0][xy + 1 ] =
3003 s->dc_val[0][xy + wrap] =
3004 s->dc_val[0][xy + 1 + wrap] = 1024;
3006 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
3007 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
3008 if (s->msmpeg4_version>=3) {
3009 s->coded_block[xy ] =
3010 s->coded_block[xy + 1 ] =
3011 s->coded_block[xy + wrap] =
3012 s->coded_block[xy + 1 + wrap] = 0;
3015 wrap = s->mb_stride;
3016 xy = s->mb_x + s->mb_y * wrap;
3018 s->dc_val[2][xy] = 1024;
3020 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
3021 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
3023 s->mbintra_table[xy]= 0;
3026 /* generic function called after a macroblock has been parsed by the
3027 decoder or after it has been encoded by the encoder.
3029 Important variables used:
3030 s->mb_intra : true if intra macroblock
3031 s->mv_dir : motion vector direction
3032 s->mv_type : motion vector type
3033 s->mv : motion vector
3034 s->interlaced_dct : true if interlaced dct used (mpeg2)
3036 static av_always_inline
3037 void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
3038 int lowres_flag, int is_mpeg12)
3040 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
3043 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
3044 s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
3048 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
3049 /* print DCT coefficients */
3051 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
3053 for(j=0; j<64; j++){
3054 av_log(s->avctx, AV_LOG_DEBUG, "%5d",
3055 block[i][s->idsp.idct_permutation[j]]);
3057 av_log(s->avctx, AV_LOG_DEBUG, "\n");
3061 s->current_picture.qscale_table[mb_xy] = s->qscale;
3063 /* update DC predictors for P macroblocks */
3065 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
3066 if(s->mbintra_table[mb_xy])
3067 ff_clean_intra_table_entries(s);
3071 s->last_dc[2] = 128 << s->intra_dc_precision;
3074 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
3075 s->mbintra_table[mb_xy]=1;
3077 if ( (s->flags&CODEC_FLAG_PSNR)
3078 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
3079 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
3080 uint8_t *dest_y, *dest_cb, *dest_cr;
3081 int dct_linesize, dct_offset;
3082 op_pixels_func (*op_pix)[4];
3083 qpel_mc_func (*op_qpix)[16];
3084 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3085 const int uvlinesize = s->current_picture.f->linesize[1];
3086 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
3087 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
3089 /* avoid copy if macroblock skipped in last frame too */
3090 /* skip only during decoding as we might trash the buffers during encoding a bit */
3092 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
3094 if (s->mb_skipped) {
3096 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
3098 } else if(!s->current_picture.reference) {
3101 *mbskip_ptr = 0; /* not skipped */
3105 dct_linesize = linesize << s->interlaced_dct;
3106 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
3110 dest_cb= s->dest[1];
3111 dest_cr= s->dest[2];
3113 dest_y = s->b_scratchpad;
3114 dest_cb= s->b_scratchpad+16*linesize;
3115 dest_cr= s->b_scratchpad+32*linesize;
3119 /* motion handling */
3120 /* decoding or more than one mb_type (MC was already done otherwise) */
3123 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
3124 if (s->mv_dir & MV_DIR_FORWARD) {
3125 ff_thread_await_progress(&s->last_picture_ptr->tf,
3126 ff_mpv_lowest_referenced_row(s, 0),
3129 if (s->mv_dir & MV_DIR_BACKWARD) {
3130 ff_thread_await_progress(&s->next_picture_ptr->tf,
3131 ff_mpv_lowest_referenced_row(s, 1),
3137 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
3139 if (s->mv_dir & MV_DIR_FORWARD) {
3140 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3141 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
3143 if (s->mv_dir & MV_DIR_BACKWARD) {
3144 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3147 op_qpix = s->me.qpel_put;
3148 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3149 op_pix = s->hdsp.put_pixels_tab;
3151 op_pix = s->hdsp.put_no_rnd_pixels_tab;
3153 if (s->mv_dir & MV_DIR_FORWARD) {
3154 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3155 op_pix = s->hdsp.avg_pixels_tab;
3156 op_qpix= s->me.qpel_avg;
3158 if (s->mv_dir & MV_DIR_BACKWARD) {
3159 ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3164 /* skip dequant / idct if we are really late ;) */
3165 if(s->avctx->skip_idct){
3166 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
3167 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
3168 || s->avctx->skip_idct >= AVDISCARD_ALL)
3172 /* add dct residue */
3173 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
3174 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3175 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3176 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3177 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3178 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3180 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3181 if (s->chroma_y_shift){
3182 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3183 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3187 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3188 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3189 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3190 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3193 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3194 add_dct(s, block[0], 0, dest_y , dct_linesize);
3195 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3196 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3197 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3199 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3200 if(s->chroma_y_shift){//Chroma420
3201 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3202 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3205 dct_linesize = uvlinesize << s->interlaced_dct;
3206 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3208 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3209 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3210 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3211 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3212 if(!s->chroma_x_shift){//Chroma444
3213 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3214 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3215 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3216 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3221 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
3222 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3225 /* dct only in intra block */
3226 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
3227 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3228 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3229 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3230 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3232 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3233 if(s->chroma_y_shift){
3234 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3235 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3239 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3240 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3241 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3242 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3246 s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3247 s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3248 s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3249 s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3251 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3252 if(s->chroma_y_shift){
3253 s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3254 s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3257 dct_linesize = uvlinesize << s->interlaced_dct;
3258 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3260 s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3261 s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3262 s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3263 s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3264 if(!s->chroma_x_shift){//Chroma444
3265 s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3266 s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3267 s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3268 s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3276 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3277 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3278 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3283 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
3286 if(s->out_format == FMT_MPEG1) {
3287 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
3288 else mpv_decode_mb_internal(s, block, 0, 1);
3291 if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
3292 else mpv_decode_mb_internal(s, block, 0, 0);
3295 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3297 ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
3298 s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
3299 s->first_field, s->low_delay);
3302 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3303 const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3304 const int uvlinesize = s->current_picture.f->linesize[1];
3305 const int mb_size= 4 - s->avctx->lowres;
3307 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3308 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3309 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3310 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3311 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3312 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3313 //block_index is not used by mpeg2, so it is not affected by chroma_format
3315 s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3316 s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3317 s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3319 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3321 if(s->picture_structure==PICT_FRAME){
3322 s->dest[0] += s->mb_y * linesize << mb_size;
3323 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3324 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3326 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3327 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3328 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3329 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3335 * Permute an 8x8 block.
3336 * @param block the block which will be permuted according to the given permutation vector
3337 * @param permutation the permutation vector
3338 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3339 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3340 * (inverse) permutated to scantable order!
3342 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3348 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3350 for(i=0; i<=last; i++){
3351 const int j= scantable[i];
3356 for(i=0; i<=last; i++){
3357 const int j= scantable[i];
3358 const int perm_j= permutation[j];
3359 block[perm_j]= temp[j];
3363 void ff_mpeg_flush(AVCodecContext *avctx){
3365 MpegEncContext *s = avctx->priv_data;
3367 if (!s || !s->picture)
3370 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3371 ff_mpeg_unref_picture(s, &s->picture[i]);
3372 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3374 ff_mpeg_unref_picture(s, &s->current_picture);
3375 ff_mpeg_unref_picture(s, &s->last_picture);
3376 ff_mpeg_unref_picture(s, &s->next_picture);
3378 s->mb_x= s->mb_y= 0;
3381 s->parse_context.state= -1;
3382 s->parse_context.frame_start_found= 0;
3383 s->parse_context.overread= 0;
3384 s->parse_context.overread_index= 0;
3385 s->parse_context.index= 0;
3386 s->parse_context.last_index= 0;
3387 s->bitstream_buffer_size=0;
3392 * set qscale and update qscale dependent variables.
3394 void ff_set_qscale(MpegEncContext * s, int qscale)
3398 else if (qscale > 31)
3402 s->chroma_qscale= s->chroma_qscale_table[qscale];
3404 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3405 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3408 void ff_mpv_report_decode_progress(MpegEncContext *s)
3410 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3411 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);