2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
36 #include "h264chroma.h"
39 #include "mpegvideo.h"
42 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 int16_t *block, int n, int qscale);
61 static const uint8_t ff_default_chroma_qscale_table[32] = {
62 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
63 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
64 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
67 const uint8_t ff_mpeg1_dc_scale_table[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 static const uint8_t mpeg2_dc_scale_table1[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 static const uint8_t mpeg2_dc_scale_table2[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 static const uint8_t mpeg2_dc_scale_table3[128] = {
104 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
116 ff_mpeg1_dc_scale_table,
117 mpeg2_dc_scale_table1,
118 mpeg2_dc_scale_table2,
119 mpeg2_dc_scale_table3,
122 const enum AVPixelFormat ff_pixfmt_list_420[] = {
127 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
129 int mb_x, int mb_y, int mb_intra, int mb_skipped)
131 MpegEncContext *s = opaque;
134 s->mv_type = mv_type;
135 s->mb_intra = mb_intra;
136 s->mb_skipped = mb_skipped;
139 memcpy(s->mv, mv, sizeof(*mv));
141 ff_init_block_index(s);
142 ff_update_block_index(s);
144 s->dsp.clear_blocks(s->block[0]);
146 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
147 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
148 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
151 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
152 ff_MPV_decode_mb(s, s->block);
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 ff_dsputil_init(&s->dsp, s->avctx);
159 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
160 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
161 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
163 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
164 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
165 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
166 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
167 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
168 if (s->flags & CODEC_FLAG_BITEXACT)
169 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
170 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
173 ff_MPV_common_init_axp(s);
175 ff_MPV_common_init_arm(s);
177 ff_MPV_common_init_bfin(s);
179 ff_MPV_common_init_ppc(s);
181 ff_MPV_common_init_x86(s);
183 /* load & permutate scantables
184 * note: only wmv uses different ones
186 if (s->alternate_scan) {
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 static int frame_size_alloc(MpegEncContext *s, int linesize)
201 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
203 // edge emu needs blocksize + filter length - 1
204 // (= 17x17 for halfpel / 21x21 for h264)
205 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
206 // at uvlinesize. It supports only YUV420 so 24x24 is enough
207 // linesize * interlaced * MBsize
208 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
211 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
213 s->me.temp = s->me.scratchpad;
214 s->rd_scratchpad = s->me.scratchpad;
215 s->b_scratchpad = s->me.scratchpad;
216 s->obmc_scratchpad = s->me.scratchpad + 16;
220 av_freep(&s->edge_emu_buffer);
221 return AVERROR(ENOMEM);
225 * Allocate a frame buffer
227 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
232 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
233 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
234 s->codec_id != AV_CODEC_ID_MSS2)
235 r = ff_thread_get_buffer(s->avctx, &pic->tf,
236 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
238 pic->f.width = s->avctx->width;
239 pic->f.height = s->avctx->height;
240 pic->f.format = s->avctx->pix_fmt;
241 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
244 if (r < 0 || !pic->f.buf[0]) {
245 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
250 if (s->avctx->hwaccel) {
251 assert(!pic->hwaccel_picture_private);
252 if (s->avctx->hwaccel->priv_data_size) {
253 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
254 if (!pic->hwaccel_priv_buf) {
255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
258 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
262 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
263 s->uvlinesize != pic->f.linesize[1])) {
264 av_log(s->avctx, AV_LOG_ERROR,
265 "get_buffer() failed (stride changed)\n");
266 ff_mpeg_unref_picture(s, pic);
270 if (pic->f.linesize[1] != pic->f.linesize[2]) {
271 av_log(s->avctx, AV_LOG_ERROR,
272 "get_buffer() failed (uv stride mismatch)\n");
273 ff_mpeg_unref_picture(s, pic);
277 if (!s->edge_emu_buffer &&
278 (ret = frame_size_alloc(s, pic->f.linesize[0])) < 0) {
279 av_log(s->avctx, AV_LOG_ERROR,
280 "get_buffer() failed to allocate context scratch buffers.\n");
281 ff_mpeg_unref_picture(s, pic);
288 void ff_free_picture_tables(Picture *pic)
292 pic->alloc_mb_width =
293 pic->alloc_mb_height = 0;
295 av_buffer_unref(&pic->mb_var_buf);
296 av_buffer_unref(&pic->mc_mb_var_buf);
297 av_buffer_unref(&pic->mb_mean_buf);
298 av_buffer_unref(&pic->mbskip_table_buf);
299 av_buffer_unref(&pic->qscale_table_buf);
300 av_buffer_unref(&pic->mb_type_buf);
302 for (i = 0; i < 2; i++) {
303 av_buffer_unref(&pic->motion_val_buf[i]);
304 av_buffer_unref(&pic->ref_index_buf[i]);
308 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
310 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
311 const int mb_array_size = s->mb_stride * s->mb_height;
312 const int b8_array_size = s->b8_stride * s->mb_height * 2;
316 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
317 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
318 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
320 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
321 return AVERROR(ENOMEM);
324 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
326 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
327 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
328 return AVERROR(ENOMEM);
331 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
332 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
333 int ref_index_size = 4 * mb_array_size;
335 for (i = 0; mv_size && i < 2; i++) {
336 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
338 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339 return AVERROR(ENOMEM);
343 pic->alloc_mb_width = s->mb_width;
344 pic->alloc_mb_height = s->mb_height;
349 static int make_tables_writable(Picture *pic)
352 #define MAKE_WRITABLE(table) \
355 (ret = av_buffer_make_writable(&pic->table)) < 0)\
359 MAKE_WRITABLE(mb_var_buf);
360 MAKE_WRITABLE(mc_mb_var_buf);
361 MAKE_WRITABLE(mb_mean_buf);
362 MAKE_WRITABLE(mbskip_table_buf);
363 MAKE_WRITABLE(qscale_table_buf);
364 MAKE_WRITABLE(mb_type_buf);
366 for (i = 0; i < 2; i++) {
367 MAKE_WRITABLE(motion_val_buf[i]);
368 MAKE_WRITABLE(ref_index_buf[i]);
375 * Allocate a Picture.
376 * The pixels are allocated/set by calling get_buffer() if shared = 0
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
382 if (pic->qscale_table_buf)
383 if ( pic->alloc_mb_width != s->mb_width
384 || pic->alloc_mb_height != s->mb_height)
385 ff_free_picture_tables(pic);
388 av_assert0(pic->f.data[0]);
391 av_assert0(!pic->f.buf[0]);
393 if (alloc_frame_buffer(s, pic) < 0)
396 s->linesize = pic->f.linesize[0];
397 s->uvlinesize = pic->f.linesize[1];
400 if (!pic->qscale_table_buf)
401 ret = alloc_picture_tables(s, pic);
403 ret = make_tables_writable(pic);
408 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
409 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410 pic->mb_mean = pic->mb_mean_buf->data;
413 pic->mbskip_table = pic->mbskip_table_buf->data;
414 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
417 if (pic->motion_val_buf[0]) {
418 for (i = 0; i < 2; i++) {
419 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420 pic->ref_index[i] = pic->ref_index_buf[i]->data;
426 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427 ff_mpeg_unref_picture(s, pic);
428 ff_free_picture_tables(pic);
429 return AVERROR(ENOMEM);
433 * Deallocate a picture.
435 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
437 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
440 /* WM Image / Screen codecs allocate internal buffers with different
441 * dimensions / colorspaces; ignore user-defined callbacks for these. */
442 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
443 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
444 s->codec_id != AV_CODEC_ID_MSS2)
445 ff_thread_release_buffer(s->avctx, &pic->tf);
447 av_frame_unref(&pic->f);
449 av_buffer_unref(&pic->hwaccel_priv_buf);
451 if (pic->needs_realloc)
452 ff_free_picture_tables(pic);
454 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
457 static int update_picture_tables(Picture *dst, Picture *src)
461 #define UPDATE_TABLE(table)\
464 (!dst->table || dst->table->buffer != src->table->buffer)) {\
465 av_buffer_unref(&dst->table);\
466 dst->table = av_buffer_ref(src->table);\
468 ff_free_picture_tables(dst);\
469 return AVERROR(ENOMEM);\
474 UPDATE_TABLE(mb_var_buf);
475 UPDATE_TABLE(mc_mb_var_buf);
476 UPDATE_TABLE(mb_mean_buf);
477 UPDATE_TABLE(mbskip_table_buf);
478 UPDATE_TABLE(qscale_table_buf);
479 UPDATE_TABLE(mb_type_buf);
480 for (i = 0; i < 2; i++) {
481 UPDATE_TABLE(motion_val_buf[i]);
482 UPDATE_TABLE(ref_index_buf[i]);
485 dst->mb_var = src->mb_var;
486 dst->mc_mb_var = src->mc_mb_var;
487 dst->mb_mean = src->mb_mean;
488 dst->mbskip_table = src->mbskip_table;
489 dst->qscale_table = src->qscale_table;
490 dst->mb_type = src->mb_type;
491 for (i = 0; i < 2; i++) {
492 dst->motion_val[i] = src->motion_val[i];
493 dst->ref_index[i] = src->ref_index[i];
496 dst->alloc_mb_width = src->alloc_mb_width;
497 dst->alloc_mb_height = src->alloc_mb_height;
502 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
506 av_assert0(!dst->f.buf[0]);
507 av_assert0(src->f.buf[0]);
511 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
515 ret = update_picture_tables(dst, src);
519 if (src->hwaccel_picture_private) {
520 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
521 if (!dst->hwaccel_priv_buf)
523 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
526 dst->field_picture = src->field_picture;
527 dst->mb_var_sum = src->mb_var_sum;
528 dst->mc_mb_var_sum = src->mc_mb_var_sum;
529 dst->b_frame_score = src->b_frame_score;
530 dst->needs_realloc = src->needs_realloc;
531 dst->reference = src->reference;
532 dst->shared = src->shared;
536 ff_mpeg_unref_picture(s, dst);
540 static void exchange_uv(MpegEncContext *s)
545 s->pblocks[4] = s->pblocks[5];
549 static int init_duplicate_context(MpegEncContext *s)
551 int y_size = s->b8_stride * (2 * s->mb_height + 1);
552 int c_size = s->mb_stride * (s->mb_height + 1);
553 int yc_size = y_size + 2 * c_size;
561 s->obmc_scratchpad = NULL;
564 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
565 ME_MAP_SIZE * sizeof(uint32_t), fail)
566 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
567 ME_MAP_SIZE * sizeof(uint32_t), fail)
568 if (s->avctx->noise_reduction) {
569 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
570 2 * 64 * sizeof(int), fail)
573 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
574 s->block = s->blocks[0];
576 for (i = 0; i < 12; i++) {
577 s->pblocks[i] = &s->block[i];
579 if (s->avctx->codec_tag == AV_RL32("VCR2"))
582 if (s->out_format == FMT_H263) {
584 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
585 yc_size * sizeof(int16_t) * 16, fail);
586 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
587 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
588 s->ac_val[2] = s->ac_val[1] + c_size;
593 return -1; // free() through ff_MPV_common_end()
596 static void free_duplicate_context(MpegEncContext *s)
601 av_freep(&s->edge_emu_buffer);
602 av_freep(&s->me.scratchpad);
606 s->obmc_scratchpad = NULL;
608 av_freep(&s->dct_error_sum);
609 av_freep(&s->me.map);
610 av_freep(&s->me.score_map);
611 av_freep(&s->blocks);
612 av_freep(&s->ac_val_base);
616 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
618 #define COPY(a) bak->a = src->a
619 COPY(edge_emu_buffer);
624 COPY(obmc_scratchpad);
631 COPY(me.map_generation);
643 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
647 // FIXME copy only needed parts
649 backup_duplicate_context(&bak, dst);
650 memcpy(dst, src, sizeof(MpegEncContext));
651 backup_duplicate_context(dst, &bak);
652 for (i = 0; i < 12; i++) {
653 dst->pblocks[i] = &dst->block[i];
655 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
657 if (!dst->edge_emu_buffer &&
658 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
659 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
660 "scratch buffers.\n");
663 // STOP_TIMER("update_duplicate_context")
664 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
668 int ff_mpeg_update_thread_context(AVCodecContext *dst,
669 const AVCodecContext *src)
672 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
679 // FIXME can parameters change on I-frames?
680 // in that case dst may need a reinit
681 if (!s->context_initialized) {
682 memcpy(s, s1, sizeof(MpegEncContext));
685 s->bitstream_buffer = NULL;
686 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
688 if (s1->context_initialized){
689 // s->picture_range_start += MAX_PICTURE_COUNT;
690 // s->picture_range_end += MAX_PICTURE_COUNT;
691 if((ret = ff_MPV_common_init(s)) < 0){
692 memset(s, 0, sizeof(MpegEncContext));
699 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
700 s->context_reinit = 0;
701 s->height = s1->height;
702 s->width = s1->width;
703 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
707 s->avctx->coded_height = s1->avctx->coded_height;
708 s->avctx->coded_width = s1->avctx->coded_width;
709 s->avctx->width = s1->avctx->width;
710 s->avctx->height = s1->avctx->height;
712 s->coded_picture_number = s1->coded_picture_number;
713 s->picture_number = s1->picture_number;
715 av_assert0(!s->picture || s->picture != s1->picture);
717 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
718 ff_mpeg_unref_picture(s, &s->picture[i]);
719 if (s1->picture[i].f.buf[0] &&
720 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
724 #define UPDATE_PICTURE(pic)\
726 ff_mpeg_unref_picture(s, &s->pic);\
727 if (s1->pic.f.buf[0])\
728 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
730 ret = update_picture_tables(&s->pic, &s1->pic);\
735 UPDATE_PICTURE(current_picture);
736 UPDATE_PICTURE(last_picture);
737 UPDATE_PICTURE(next_picture);
739 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
740 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
741 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
743 // Error/bug resilience
744 s->next_p_frame_damaged = s1->next_p_frame_damaged;
745 s->workaround_bugs = s1->workaround_bugs;
746 s->padding_bug_score = s1->padding_bug_score;
749 memcpy(&s->last_time_base, &s1->last_time_base,
750 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
751 (char *) &s1->last_time_base);
754 s->max_b_frames = s1->max_b_frames;
755 s->low_delay = s1->low_delay;
756 s->droppable = s1->droppable;
758 // DivX handling (doesn't work)
759 s->divx_packed = s1->divx_packed;
761 if (s1->bitstream_buffer) {
762 if (s1->bitstream_buffer_size +
763 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
764 av_fast_malloc(&s->bitstream_buffer,
765 &s->allocated_bitstream_buffer_size,
766 s1->allocated_bitstream_buffer_size);
767 s->bitstream_buffer_size = s1->bitstream_buffer_size;
768 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
769 s1->bitstream_buffer_size);
770 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
771 FF_INPUT_BUFFER_PADDING_SIZE);
774 // linesize dependend scratch buffer allocation
775 if (!s->edge_emu_buffer)
777 if (frame_size_alloc(s, s1->linesize) < 0) {
778 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
779 "scratch buffers.\n");
780 return AVERROR(ENOMEM);
783 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
784 "be allocated due to unknown size.\n");
787 // MPEG2/interlacing info
788 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
789 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
791 if (!s1->first_field) {
792 s->last_pict_type = s1->pict_type;
793 if (s1->current_picture_ptr)
794 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
801 * Set the given MpegEncContext to common defaults
802 * (same for encoding and decoding).
803 * The changed fields will not depend upon the
804 * prior state of the MpegEncContext.
806 void ff_MPV_common_defaults(MpegEncContext *s)
808 s->y_dc_scale_table =
809 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
810 s->chroma_qscale_table = ff_default_chroma_qscale_table;
811 s->progressive_frame = 1;
812 s->progressive_sequence = 1;
813 s->picture_structure = PICT_FRAME;
815 s->coded_picture_number = 0;
816 s->picture_number = 0;
821 s->slice_context_count = 1;
825 * Set the given MpegEncContext to defaults for decoding.
826 * the changed fields will not depend upon
827 * the prior state of the MpegEncContext.
829 void ff_MPV_decode_defaults(MpegEncContext *s)
831 ff_MPV_common_defaults(s);
834 static int init_er(MpegEncContext *s)
836 ERContext *er = &s->er;
837 int mb_array_size = s->mb_height * s->mb_stride;
840 er->avctx = s->avctx;
843 er->mb_index2xy = s->mb_index2xy;
844 er->mb_num = s->mb_num;
845 er->mb_width = s->mb_width;
846 er->mb_height = s->mb_height;
847 er->mb_stride = s->mb_stride;
848 er->b8_stride = s->b8_stride;
850 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
851 er->error_status_table = av_mallocz(mb_array_size);
852 if (!er->er_temp_buffer || !er->error_status_table)
855 er->mbskip_table = s->mbskip_table;
856 er->mbintra_table = s->mbintra_table;
858 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
859 er->dc_val[i] = s->dc_val[i];
861 er->decode_mb = mpeg_er_decode_mb;
866 av_freep(&er->er_temp_buffer);
867 av_freep(&er->error_status_table);
868 return AVERROR(ENOMEM);
872 * Initialize and allocates MpegEncContext fields dependent on the resolution.
874 static int init_context_frame(MpegEncContext *s)
876 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
878 s->mb_width = (s->width + 15) / 16;
879 s->mb_stride = s->mb_width + 1;
880 s->b8_stride = s->mb_width * 2 + 1;
881 s->b4_stride = s->mb_width * 4 + 1;
882 mb_array_size = s->mb_height * s->mb_stride;
883 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
885 /* set default edge pos, will be overriden
886 * in decode_header if needed */
887 s->h_edge_pos = s->mb_width * 16;
888 s->v_edge_pos = s->mb_height * 16;
890 s->mb_num = s->mb_width * s->mb_height;
895 s->block_wrap[3] = s->b8_stride;
897 s->block_wrap[5] = s->mb_stride;
899 y_size = s->b8_stride * (2 * s->mb_height + 1);
900 c_size = s->mb_stride * (s->mb_height + 1);
901 yc_size = y_size + 2 * c_size;
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
904 for (y = 0; y < s->mb_height; y++)
905 for (x = 0; x < s->mb_width; x++)
906 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
908 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
911 /* Allocate MV tables */
912 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
913 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
914 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
915 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
916 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
917 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
918 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
919 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
920 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
921 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
922 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
923 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
925 /* Allocate MB type table */
926 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
928 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
930 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
931 mb_array_size * sizeof(float), fail);
932 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
933 mb_array_size * sizeof(float), fail);
937 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
938 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
939 /* interlaced direct mode decoding tables */
940 for (i = 0; i < 2; i++) {
942 for (j = 0; j < 2; j++) {
943 for (k = 0; k < 2; k++) {
944 FF_ALLOCZ_OR_GOTO(s->avctx,
945 s->b_field_mv_table_base[i][j][k],
946 mv_table_size * 2 * sizeof(int16_t),
948 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
951 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
952 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
953 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
958 if (s->out_format == FMT_H263) {
960 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
961 s->coded_block = s->coded_block_base + s->b8_stride + 1;
963 /* cbp, ac_pred, pred_dir */
964 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
965 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
968 if (s->h263_pred || s->h263_plus || !s->encoding) {
970 // MN: we need these for error resilience of intra-frames
971 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
972 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
973 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
974 s->dc_val[2] = s->dc_val[1] + c_size;
975 for (i = 0; i < yc_size; i++)
976 s->dc_val_base[i] = 1024;
979 /* which mb is a intra block */
980 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
981 memset(s->mbintra_table, 1, mb_array_size);
983 /* init macroblock skip table */
984 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
985 // Note the + 1 is for a quicker mpeg4 slice_end detection
989 return AVERROR(ENOMEM);
993 * init common structure for both encoder and decoder.
994 * this assumes that some variables like width/height are already set
996 av_cold int ff_MPV_common_init(MpegEncContext *s)
999 int nb_slices = (HAVE_THREADS &&
1000 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1001 s->avctx->thread_count : 1;
1003 if (s->encoding && s->avctx->slices)
1004 nb_slices = s->avctx->slices;
1006 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1007 s->mb_height = (s->height + 31) / 32 * 2;
1009 s->mb_height = (s->height + 15) / 16;
1011 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1012 av_log(s->avctx, AV_LOG_ERROR,
1013 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1017 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1020 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1022 max_slices = MAX_THREADS;
1023 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1024 " reducing to %d\n", nb_slices, max_slices);
1025 nb_slices = max_slices;
1028 if ((s->width || s->height) &&
1029 av_image_check_size(s->width, s->height, 0, s->avctx))
1032 ff_dct_common_init(s);
1034 s->flags = s->avctx->flags;
1035 s->flags2 = s->avctx->flags2;
1037 /* set chroma shifts */
1038 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1040 &s->chroma_y_shift);
1042 /* convert fourcc to upper case */
1043 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1045 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1047 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1048 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1049 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1050 av_frame_unref(&s->picture[i].f);
1052 memset(&s->next_picture, 0, sizeof(s->next_picture));
1053 memset(&s->last_picture, 0, sizeof(s->last_picture));
1054 memset(&s->current_picture, 0, sizeof(s->current_picture));
1055 av_frame_unref(&s->next_picture.f);
1056 av_frame_unref(&s->last_picture.f);
1057 av_frame_unref(&s->current_picture.f);
1059 if (init_context_frame(s))
1062 s->parse_context.state = -1;
1064 s->context_initialized = 1;
1065 s->thread_context[0] = s;
1067 // if (s->width && s->height) {
1068 if (nb_slices > 1) {
1069 for (i = 1; i < nb_slices; i++) {
1070 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1071 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1074 for (i = 0; i < nb_slices; i++) {
1075 if (init_duplicate_context(s->thread_context[i]) < 0)
1077 s->thread_context[i]->start_mb_y =
1078 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1079 s->thread_context[i]->end_mb_y =
1080 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1083 if (init_duplicate_context(s) < 0)
1086 s->end_mb_y = s->mb_height;
1088 s->slice_context_count = nb_slices;
1093 ff_MPV_common_end(s);
1098 * Frees and resets MpegEncContext fields depending on the resolution.
1099 * Is used during resolution changes to avoid a full reinitialization of the
1102 static int free_context_frame(MpegEncContext *s)
1106 av_freep(&s->mb_type);
1107 av_freep(&s->p_mv_table_base);
1108 av_freep(&s->b_forw_mv_table_base);
1109 av_freep(&s->b_back_mv_table_base);
1110 av_freep(&s->b_bidir_forw_mv_table_base);
1111 av_freep(&s->b_bidir_back_mv_table_base);
1112 av_freep(&s->b_direct_mv_table_base);
1113 s->p_mv_table = NULL;
1114 s->b_forw_mv_table = NULL;
1115 s->b_back_mv_table = NULL;
1116 s->b_bidir_forw_mv_table = NULL;
1117 s->b_bidir_back_mv_table = NULL;
1118 s->b_direct_mv_table = NULL;
1119 for (i = 0; i < 2; i++) {
1120 for (j = 0; j < 2; j++) {
1121 for (k = 0; k < 2; k++) {
1122 av_freep(&s->b_field_mv_table_base[i][j][k]);
1123 s->b_field_mv_table[i][j][k] = NULL;
1125 av_freep(&s->b_field_select_table[i][j]);
1126 av_freep(&s->p_field_mv_table_base[i][j]);
1127 s->p_field_mv_table[i][j] = NULL;
1129 av_freep(&s->p_field_select_table[i]);
1132 av_freep(&s->dc_val_base);
1133 av_freep(&s->coded_block_base);
1134 av_freep(&s->mbintra_table);
1135 av_freep(&s->cbp_table);
1136 av_freep(&s->pred_dir_table);
1138 av_freep(&s->mbskip_table);
1140 av_freep(&s->er.error_status_table);
1141 av_freep(&s->er.er_temp_buffer);
1142 av_freep(&s->mb_index2xy);
1143 av_freep(&s->lambda_table);
1145 av_freep(&s->cplx_tab);
1146 av_freep(&s->bits_tab);
1148 s->linesize = s->uvlinesize = 0;
1153 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1157 if (s->slice_context_count > 1) {
1158 for (i = 0; i < s->slice_context_count; i++) {
1159 free_duplicate_context(s->thread_context[i]);
1161 for (i = 1; i < s->slice_context_count; i++) {
1162 av_freep(&s->thread_context[i]);
1165 free_duplicate_context(s);
1167 if ((err = free_context_frame(s)) < 0)
1171 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1172 s->picture[i].needs_realloc = 1;
1175 s->last_picture_ptr =
1176 s->next_picture_ptr =
1177 s->current_picture_ptr = NULL;
1180 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1181 s->mb_height = (s->height + 31) / 32 * 2;
1183 s->mb_height = (s->height + 15) / 16;
1185 if ((s->width || s->height) &&
1186 av_image_check_size(s->width, s->height, 0, s->avctx))
1187 return AVERROR_INVALIDDATA;
1189 if ((err = init_context_frame(s)))
1192 s->thread_context[0] = s;
1194 if (s->width && s->height) {
1195 int nb_slices = s->slice_context_count;
1196 if (nb_slices > 1) {
1197 for (i = 1; i < nb_slices; i++) {
1198 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1199 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1202 for (i = 0; i < nb_slices; i++) {
1203 if (init_duplicate_context(s->thread_context[i]) < 0)
1205 s->thread_context[i]->start_mb_y =
1206 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1207 s->thread_context[i]->end_mb_y =
1208 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1211 err = init_duplicate_context(s);
1215 s->end_mb_y = s->mb_height;
1217 s->slice_context_count = nb_slices;
1222 ff_MPV_common_end(s);
1226 /* init common structure for both encoder and decoder */
1227 void ff_MPV_common_end(MpegEncContext *s)
1231 if (s->slice_context_count > 1) {
1232 for (i = 0; i < s->slice_context_count; i++) {
1233 free_duplicate_context(s->thread_context[i]);
1235 for (i = 1; i < s->slice_context_count; i++) {
1236 av_freep(&s->thread_context[i]);
1238 s->slice_context_count = 1;
1239 } else free_duplicate_context(s);
1241 av_freep(&s->parse_context.buffer);
1242 s->parse_context.buffer_size = 0;
1244 av_freep(&s->bitstream_buffer);
1245 s->allocated_bitstream_buffer_size = 0;
1248 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1249 ff_free_picture_tables(&s->picture[i]);
1250 ff_mpeg_unref_picture(s, &s->picture[i]);
1253 av_freep(&s->picture);
1254 ff_free_picture_tables(&s->last_picture);
1255 ff_mpeg_unref_picture(s, &s->last_picture);
1256 ff_free_picture_tables(&s->current_picture);
1257 ff_mpeg_unref_picture(s, &s->current_picture);
1258 ff_free_picture_tables(&s->next_picture);
1259 ff_mpeg_unref_picture(s, &s->next_picture);
1261 free_context_frame(s);
1263 s->context_initialized = 0;
1264 s->last_picture_ptr =
1265 s->next_picture_ptr =
1266 s->current_picture_ptr = NULL;
1267 s->linesize = s->uvlinesize = 0;
1270 av_cold void ff_init_rl(RLTable *rl,
1271 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1273 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1274 uint8_t index_run[MAX_RUN + 1];
1275 int last, run, level, start, end, i;
1277 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1278 if (static_store && rl->max_level[0])
1281 /* compute max_level[], max_run[] and index_run[] */
1282 for (last = 0; last < 2; last++) {
1291 memset(max_level, 0, MAX_RUN + 1);
1292 memset(max_run, 0, MAX_LEVEL + 1);
1293 memset(index_run, rl->n, MAX_RUN + 1);
1294 for (i = start; i < end; i++) {
1295 run = rl->table_run[i];
1296 level = rl->table_level[i];
1297 if (index_run[run] == rl->n)
1299 if (level > max_level[run])
1300 max_level[run] = level;
1301 if (run > max_run[level])
1302 max_run[level] = run;
1305 rl->max_level[last] = static_store[last];
1307 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1308 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1310 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1312 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1313 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1315 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1317 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1318 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1322 av_cold void ff_init_vlc_rl(RLTable *rl)
1326 for (q = 0; q < 32; q++) {
1328 int qadd = (q - 1) | 1;
1334 for (i = 0; i < rl->vlc.table_size; i++) {
1335 int code = rl->vlc.table[i][0];
1336 int len = rl->vlc.table[i][1];
1339 if (len == 0) { // illegal code
1342 } else if (len < 0) { // more bits needed
1346 if (code == rl->n) { // esc
1350 run = rl->table_run[code] + 1;
1351 level = rl->table_level[code] * qmul + qadd;
1352 if (code >= rl->last) run += 192;
1355 rl->rl_vlc[q][i].len = len;
1356 rl->rl_vlc[q][i].level = level;
1357 rl->rl_vlc[q][i].run = run;
1362 static void release_unused_pictures(MpegEncContext *s)
1366 /* release non reference frames */
1367 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1368 if (!s->picture[i].reference)
1369 ff_mpeg_unref_picture(s, &s->picture[i]);
1373 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1375 if (pic == s->last_picture_ptr)
1377 if (pic->f.buf[0] == NULL)
1379 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1384 static int find_unused_picture(MpegEncContext *s, int shared)
1389 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1390 if (s->picture[i].f.buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1394 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1395 if (pic_is_unused(s, &s->picture[i]))
1400 av_log(s->avctx, AV_LOG_FATAL,
1401 "Internal error, picture buffer overflow\n");
1402 /* We could return -1, but the codec would crash trying to draw into a
1403 * non-existing frame anyway. This is safer than waiting for a random crash.
1404 * Also the return of this is never useful, an encoder must only allocate
1405 * as much as allowed in the specification. This has no relationship to how
1406 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1407 * enough for such valid streams).
1408 * Plus, a decoder has to check stream validity and remove frames if too
1409 * many reference frames are around. Waiting for "OOM" is not correct at
1410 * all. Similarly, missing reference frames have to be replaced by
1411 * interpolated/MC frames, anything else is a bug in the codec ...
1417 int ff_find_unused_picture(MpegEncContext *s, int shared)
1419 int ret = find_unused_picture(s, shared);
1421 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1422 if (s->picture[ret].needs_realloc) {
1423 s->picture[ret].needs_realloc = 0;
1424 ff_free_picture_tables(&s->picture[ret]);
1425 ff_mpeg_unref_picture(s, &s->picture[ret]);
1431 static void update_noise_reduction(MpegEncContext *s)
1435 for (intra = 0; intra < 2; intra++) {
1436 if (s->dct_count[intra] > (1 << 16)) {
1437 for (i = 0; i < 64; i++) {
1438 s->dct_error_sum[intra][i] >>= 1;
1440 s->dct_count[intra] >>= 1;
1443 for (i = 0; i < 64; i++) {
1444 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1445 s->dct_count[intra] +
1446 s->dct_error_sum[intra][i] / 2) /
1447 (s->dct_error_sum[intra][i] + 1);
1453 * generic function for encode/decode called after coding/decoding
1454 * the header and before a frame is coded/decoded.
1456 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1462 if (!ff_thread_can_start_frame(avctx)) {
1463 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1467 /* mark & release old frames */
1468 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1469 s->last_picture_ptr != s->next_picture_ptr &&
1470 s->last_picture_ptr->f.buf[0]) {
1471 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1474 /* release forgotten pictures */
1475 /* if (mpeg124/h263) */
1477 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1478 if (&s->picture[i] != s->last_picture_ptr &&
1479 &s->picture[i] != s->next_picture_ptr &&
1480 s->picture[i].reference && !s->picture[i].needs_realloc) {
1481 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1482 av_log(avctx, AV_LOG_ERROR,
1483 "releasing zombie picture\n");
1484 ff_mpeg_unref_picture(s, &s->picture[i]);
1489 ff_mpeg_unref_picture(s, &s->current_picture);
1492 release_unused_pictures(s);
1494 if (s->current_picture_ptr &&
1495 s->current_picture_ptr->f.buf[0] == NULL) {
1496 // we already have a unused image
1497 // (maybe it was set before reading the header)
1498 pic = s->current_picture_ptr;
1500 i = ff_find_unused_picture(s, 0);
1502 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1505 pic = &s->picture[i];
1509 if (!s->droppable) {
1510 if (s->pict_type != AV_PICTURE_TYPE_B)
1514 pic->f.coded_picture_number = s->coded_picture_number++;
1516 if (ff_alloc_picture(s, pic, 0) < 0)
1519 s->current_picture_ptr = pic;
1520 // FIXME use only the vars from current_pic
1521 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1522 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1523 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1524 if (s->picture_structure != PICT_FRAME)
1525 s->current_picture_ptr->f.top_field_first =
1526 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1528 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1529 !s->progressive_sequence;
1530 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1533 s->current_picture_ptr->f.pict_type = s->pict_type;
1534 // if (s->flags && CODEC_FLAG_QSCALE)
1535 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1536 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1538 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1539 s->current_picture_ptr)) < 0)
1542 if (s->pict_type != AV_PICTURE_TYPE_B) {
1543 s->last_picture_ptr = s->next_picture_ptr;
1545 s->next_picture_ptr = s->current_picture_ptr;
1547 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1548 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1549 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1550 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1551 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1552 s->pict_type, s->droppable);
1554 if ((s->last_picture_ptr == NULL ||
1555 s->last_picture_ptr->f.buf[0] == NULL) &&
1556 (s->pict_type != AV_PICTURE_TYPE_I ||
1557 s->picture_structure != PICT_FRAME)) {
1558 int h_chroma_shift, v_chroma_shift;
1559 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1560 &h_chroma_shift, &v_chroma_shift);
1561 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.buf[0])
1562 av_log(avctx, AV_LOG_DEBUG,
1563 "allocating dummy last picture for B frame\n");
1564 else if (s->pict_type != AV_PICTURE_TYPE_I)
1565 av_log(avctx, AV_LOG_ERROR,
1566 "warning: first frame is no keyframe\n");
1567 else if (s->picture_structure != PICT_FRAME)
1568 av_log(avctx, AV_LOG_DEBUG,
1569 "allocate dummy last picture for field based first keyframe\n");
1571 /* Allocate a dummy frame */
1572 i = ff_find_unused_picture(s, 0);
1574 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1577 s->last_picture_ptr = &s->picture[i];
1578 s->last_picture_ptr->f.key_frame = 0;
1579 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1580 s->last_picture_ptr = NULL;
1584 memset(s->last_picture_ptr->f.data[0], 0x80,
1585 avctx->height * s->last_picture_ptr->f.linesize[0]);
1586 memset(s->last_picture_ptr->f.data[1], 0x80,
1587 (avctx->height >> v_chroma_shift) *
1588 s->last_picture_ptr->f.linesize[1]);
1589 memset(s->last_picture_ptr->f.data[2], 0x80,
1590 (avctx->height >> v_chroma_shift) *
1591 s->last_picture_ptr->f.linesize[2]);
1593 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1594 for(i=0; i<avctx->height; i++)
1595 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1598 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1599 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1601 if ((s->next_picture_ptr == NULL ||
1602 s->next_picture_ptr->f.buf[0] == NULL) &&
1603 s->pict_type == AV_PICTURE_TYPE_B) {
1604 /* Allocate a dummy frame */
1605 i = ff_find_unused_picture(s, 0);
1607 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1610 s->next_picture_ptr = &s->picture[i];
1611 s->next_picture_ptr->f.key_frame = 0;
1612 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1613 s->next_picture_ptr = NULL;
1616 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1617 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1620 #if 0 // BUFREF-FIXME
1621 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1622 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1624 if (s->last_picture_ptr) {
1625 ff_mpeg_unref_picture(s, &s->last_picture);
1626 if (s->last_picture_ptr->f.buf[0] &&
1627 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1628 s->last_picture_ptr)) < 0)
1631 if (s->next_picture_ptr) {
1632 ff_mpeg_unref_picture(s, &s->next_picture);
1633 if (s->next_picture_ptr->f.buf[0] &&
1634 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1635 s->next_picture_ptr)) < 0)
1639 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1640 s->last_picture_ptr->f.buf[0]));
1642 if (s->picture_structure!= PICT_FRAME) {
1644 for (i = 0; i < 4; i++) {
1645 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1646 s->current_picture.f.data[i] +=
1647 s->current_picture.f.linesize[i];
1649 s->current_picture.f.linesize[i] *= 2;
1650 s->last_picture.f.linesize[i] *= 2;
1651 s->next_picture.f.linesize[i] *= 2;
1655 s->err_recognition = avctx->err_recognition;
1657 /* set dequantizer, we can't do it during init as
1658 * it might change for mpeg4 and we can't do it in the header
1659 * decode as init is not called for mpeg4 there yet */
1660 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1661 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1662 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1663 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1664 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1665 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1667 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1668 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1671 if (s->dct_error_sum) {
1672 av_assert2(s->avctx->noise_reduction && s->encoding);
1673 update_noise_reduction(s);
1677 FF_DISABLE_DEPRECATION_WARNINGS
1678 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1679 return ff_xvmc_field_start(s, avctx);
1680 FF_ENABLE_DEPRECATION_WARNINGS
1681 #endif /* FF_API_XVMC */
1686 /* called after a frame has been decoded. */
1687 void ff_MPV_frame_end(MpegEncContext *s)
1690 FF_DISABLE_DEPRECATION_WARNINGS
1691 /* redraw edges for the frame if decoding didn't complete */
1692 // just to make sure that all data is rendered.
1693 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1694 ff_xvmc_field_end(s);
1696 FF_ENABLE_DEPRECATION_WARNINGS
1697 #endif /* FF_API_XVMC */
1698 if ((s->er.error_count || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1699 !s->avctx->hwaccel &&
1700 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1701 s->unrestricted_mv &&
1702 s->current_picture.reference &&
1704 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1707 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1708 int hshift = desc->log2_chroma_w;
1709 int vshift = desc->log2_chroma_h;
1710 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1711 s->h_edge_pos, s->v_edge_pos,
1712 EDGE_WIDTH, EDGE_WIDTH,
1713 EDGE_TOP | EDGE_BOTTOM);
1714 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1715 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1716 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1717 EDGE_TOP | EDGE_BOTTOM);
1718 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1719 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1720 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1721 EDGE_TOP | EDGE_BOTTOM);
1726 if (s->current_picture.reference)
1727 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1731 * Draw a line from (ex, ey) -> (sx, sy).
1732 * @param w width of the image
1733 * @param h height of the image
1734 * @param stride stride/linesize of the image
1735 * @param color color of the arrow
1737 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1738 int w, int h, int stride, int color)
1742 sx = av_clip(sx, 0, w - 1);
1743 sy = av_clip(sy, 0, h - 1);
1744 ex = av_clip(ex, 0, w - 1);
1745 ey = av_clip(ey, 0, h - 1);
1747 buf[sy * stride + sx] += color;
1749 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1751 FFSWAP(int, sx, ex);
1752 FFSWAP(int, sy, ey);
1754 buf += sx + sy * stride;
1756 f = ((ey - sy) << 16) / ex;
1757 for (x = 0; x <= ex; x++) {
1759 fr = (x * f) & 0xFFFF;
1760 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1761 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1765 FFSWAP(int, sx, ex);
1766 FFSWAP(int, sy, ey);
1768 buf += sx + sy * stride;
1771 f = ((ex - sx) << 16) / ey;
1774 for(y= 0; y <= ey; y++){
1776 fr = (y*f) & 0xFFFF;
1777 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1778 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1784 * Draw an arrow from (ex, ey) -> (sx, sy).
1785 * @param w width of the image
1786 * @param h height of the image
1787 * @param stride stride/linesize of the image
1788 * @param color color of the arrow
1790 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1791 int ey, int w, int h, int stride, int color)
1795 sx = av_clip(sx, -100, w + 100);
1796 sy = av_clip(sy, -100, h + 100);
1797 ex = av_clip(ex, -100, w + 100);
1798 ey = av_clip(ey, -100, h + 100);
1803 if (dx * dx + dy * dy > 3 * 3) {
1806 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1808 // FIXME subpixel accuracy
1809 rx = ROUNDED_DIV(rx * 3 << 4, length);
1810 ry = ROUNDED_DIV(ry * 3 << 4, length);
1812 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1813 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1815 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1819 * Print debugging info for the given picture.
1821 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1823 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1825 if (avctx->hwaccel || !p || !p->mb_type
1826 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1830 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1833 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1834 av_get_picture_type_char(pict->pict_type));
1835 for (y = 0; y < mb_height; y++) {
1836 for (x = 0; x < mb_width; x++) {
1837 if (avctx->debug & FF_DEBUG_SKIP) {
1838 int count = mbskip_table[x + y * mb_stride];
1841 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1843 if (avctx->debug & FF_DEBUG_QP) {
1844 av_log(avctx, AV_LOG_DEBUG, "%2d",
1845 p->qscale_table[x + y * mb_stride]);
1847 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1848 int mb_type = p->mb_type[x + y * mb_stride];
1849 // Type & MV direction
1850 if (IS_PCM(mb_type))
1851 av_log(avctx, AV_LOG_DEBUG, "P");
1852 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1853 av_log(avctx, AV_LOG_DEBUG, "A");
1854 else if (IS_INTRA4x4(mb_type))
1855 av_log(avctx, AV_LOG_DEBUG, "i");
1856 else if (IS_INTRA16x16(mb_type))
1857 av_log(avctx, AV_LOG_DEBUG, "I");
1858 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1859 av_log(avctx, AV_LOG_DEBUG, "d");
1860 else if (IS_DIRECT(mb_type))
1861 av_log(avctx, AV_LOG_DEBUG, "D");
1862 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1863 av_log(avctx, AV_LOG_DEBUG, "g");
1864 else if (IS_GMC(mb_type))
1865 av_log(avctx, AV_LOG_DEBUG, "G");
1866 else if (IS_SKIP(mb_type))
1867 av_log(avctx, AV_LOG_DEBUG, "S");
1868 else if (!USES_LIST(mb_type, 1))
1869 av_log(avctx, AV_LOG_DEBUG, ">");
1870 else if (!USES_LIST(mb_type, 0))
1871 av_log(avctx, AV_LOG_DEBUG, "<");
1873 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1874 av_log(avctx, AV_LOG_DEBUG, "X");
1878 if (IS_8X8(mb_type))
1879 av_log(avctx, AV_LOG_DEBUG, "+");
1880 else if (IS_16X8(mb_type))
1881 av_log(avctx, AV_LOG_DEBUG, "-");
1882 else if (IS_8X16(mb_type))
1883 av_log(avctx, AV_LOG_DEBUG, "|");
1884 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1885 av_log(avctx, AV_LOG_DEBUG, " ");
1887 av_log(avctx, AV_LOG_DEBUG, "?");
1890 if (IS_INTERLACED(mb_type))
1891 av_log(avctx, AV_LOG_DEBUG, "=");
1893 av_log(avctx, AV_LOG_DEBUG, " ");
1896 av_log(avctx, AV_LOG_DEBUG, "\n");
1900 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1901 (avctx->debug_mv)) {
1902 const int shift = 1 + quarter_sample;
1906 int h_chroma_shift, v_chroma_shift, block_height;
1907 const int width = avctx->width;
1908 const int height = avctx->height;
1909 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1910 const int mv_stride = (mb_width << mv_sample_log2) +
1911 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1913 *low_delay = 0; // needed to see the vectors without trashing the buffers
1915 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1917 av_frame_make_writable(pict);
1919 pict->opaque = NULL;
1920 ptr = pict->data[0];
1921 block_height = 16 >> v_chroma_shift;
1923 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1925 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1926 const int mb_index = mb_x + mb_y * mb_stride;
1927 if ((avctx->debug_mv) && p->motion_val[0]) {
1929 for (type = 0; type < 3; type++) {
1933 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1934 (pict->pict_type!= AV_PICTURE_TYPE_P))
1939 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1940 (pict->pict_type!= AV_PICTURE_TYPE_B))
1945 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1946 (pict->pict_type!= AV_PICTURE_TYPE_B))
1951 if (!USES_LIST(p->mb_type[mb_index], direction))
1954 if (IS_8X8(p->mb_type[mb_index])) {
1956 for (i = 0; i < 4; i++) {
1957 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1958 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1959 int xy = (mb_x * 2 + (i & 1) +
1960 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1961 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
1962 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
1963 draw_arrow(ptr, sx, sy, mx, my, width,
1964 height, pict->linesize[0], 100);
1966 } else if (IS_16X8(p->mb_type[mb_index])) {
1968 for (i = 0; i < 2; i++) {
1969 int sx = mb_x * 16 + 8;
1970 int sy = mb_y * 16 + 4 + 8 * i;
1971 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1972 int mx = (p->motion_val[direction][xy][0] >> shift);
1973 int my = (p->motion_val[direction][xy][1] >> shift);
1975 if (IS_INTERLACED(p->mb_type[mb_index]))
1978 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1979 height, pict->linesize[0], 100);
1981 } else if (IS_8X16(p->mb_type[mb_index])) {
1983 for (i = 0; i < 2; i++) {
1984 int sx = mb_x * 16 + 4 + 8 * i;
1985 int sy = mb_y * 16 + 8;
1986 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1987 int mx = p->motion_val[direction][xy][0] >> shift;
1988 int my = p->motion_val[direction][xy][1] >> shift;
1990 if (IS_INTERLACED(p->mb_type[mb_index]))
1993 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1994 height, pict->linesize[0], 100);
1997 int sx= mb_x * 16 + 8;
1998 int sy= mb_y * 16 + 8;
1999 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2000 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2001 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2002 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2006 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2007 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2008 0x0101010101010101ULL;
2010 for (y = 0; y < block_height; y++) {
2011 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2012 (block_height * mb_y + y) *
2013 pict->linesize[1]) = c;
2014 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2015 (block_height * mb_y + y) *
2016 pict->linesize[2]) = c;
2019 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2021 int mb_type = p->mb_type[mb_index];
2024 #define COLOR(theta, r) \
2025 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2026 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2030 if (IS_PCM(mb_type)) {
2032 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2033 IS_INTRA16x16(mb_type)) {
2035 } else if (IS_INTRA4x4(mb_type)) {
2037 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2039 } else if (IS_DIRECT(mb_type)) {
2041 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2043 } else if (IS_GMC(mb_type)) {
2045 } else if (IS_SKIP(mb_type)) {
2047 } else if (!USES_LIST(mb_type, 1)) {
2049 } else if (!USES_LIST(mb_type, 0)) {
2052 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2056 u *= 0x0101010101010101ULL;
2057 v *= 0x0101010101010101ULL;
2058 for (y = 0; y < block_height; y++) {
2059 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2060 (block_height * mb_y + y) * pict->linesize[1]) = u;
2061 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2062 (block_height * mb_y + y) * pict->linesize[2]) = v;
2066 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2067 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2068 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2069 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2070 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2072 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2073 for (y = 0; y < 16; y++)
2074 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2075 pict->linesize[0]] ^= 0x80;
2077 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2078 int dm = 1 << (mv_sample_log2 - 2);
2079 for (i = 0; i < 4; i++) {
2080 int sx = mb_x * 16 + 8 * (i & 1);
2081 int sy = mb_y * 16 + 8 * (i >> 1);
2082 int xy = (mb_x * 2 + (i & 1) +
2083 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2085 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2086 if (mv[0] != mv[dm] ||
2087 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2088 for (y = 0; y < 8; y++)
2089 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2090 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2091 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2092 pict->linesize[0]) ^= 0x8080808080808080ULL;
2096 if (IS_INTERLACED(mb_type) &&
2097 avctx->codec->id == AV_CODEC_ID_H264) {
2101 mbskip_table[mb_index] = 0;
2107 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2109 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2110 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2113 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2115 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2116 int offset = 2*s->mb_stride + 1;
2118 return AVERROR(ENOMEM);
2119 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2120 ref->size -= offset;
2121 ref->data += offset;
2122 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2125 static inline int hpel_motion_lowres(MpegEncContext *s,
2126 uint8_t *dest, uint8_t *src,
2127 int field_based, int field_select,
2128 int src_x, int src_y,
2129 int width, int height, ptrdiff_t stride,
2130 int h_edge_pos, int v_edge_pos,
2131 int w, int h, h264_chroma_mc_func *pix_op,
2132 int motion_x, int motion_y)
2134 const int lowres = s->avctx->lowres;
2135 const int op_index = FFMIN(lowres, 3);
2136 const int s_mask = (2 << lowres) - 1;
2140 if (s->quarter_sample) {
2145 sx = motion_x & s_mask;
2146 sy = motion_y & s_mask;
2147 src_x += motion_x >> lowres + 1;
2148 src_y += motion_y >> lowres + 1;
2150 src += src_y * stride + src_x;
2152 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2153 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2154 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2155 s->linesize, s->linesize,
2156 w + 1, (h + 1) << field_based,
2157 src_x, src_y << field_based,
2158 h_edge_pos, v_edge_pos);
2159 src = s->edge_emu_buffer;
2163 sx = (sx << 2) >> lowres;
2164 sy = (sy << 2) >> lowres;
2167 pix_op[op_index](dest, src, stride, h, sx, sy);
2171 /* apply one mpeg motion vector to the three components */
2172 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2179 uint8_t **ref_picture,
2180 h264_chroma_mc_func *pix_op,
2181 int motion_x, int motion_y,
2184 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2185 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2186 ptrdiff_t uvlinesize, linesize;
2187 const int lowres = s->avctx->lowres;
2188 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2189 const int block_s = 8>>lowres;
2190 const int s_mask = (2 << lowres) - 1;
2191 const int h_edge_pos = s->h_edge_pos >> lowres;
2192 const int v_edge_pos = s->v_edge_pos >> lowres;
2193 linesize = s->current_picture.f.linesize[0] << field_based;
2194 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2196 // FIXME obviously not perfect but qpel will not work in lowres anyway
2197 if (s->quarter_sample) {
2203 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2206 sx = motion_x & s_mask;
2207 sy = motion_y & s_mask;
2208 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2209 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2211 if (s->out_format == FMT_H263) {
2212 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2213 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2214 uvsrc_x = src_x >> 1;
2215 uvsrc_y = src_y >> 1;
2216 } else if (s->out_format == FMT_H261) {
2217 // even chroma mv's are full pel in H261
2220 uvsx = (2 * mx) & s_mask;
2221 uvsy = (2 * my) & s_mask;
2222 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2223 uvsrc_y = mb_y * block_s + (my >> lowres);
2225 if(s->chroma_y_shift){
2230 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2231 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2233 if(s->chroma_x_shift){
2237 uvsy = motion_y & s_mask;
2239 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2242 uvsx = motion_x & s_mask;
2243 uvsy = motion_y & s_mask;
2250 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2251 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2252 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2254 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2255 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2256 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2257 linesize >> field_based, linesize >> field_based,
2258 17, 17 + field_based,
2259 src_x, src_y << field_based, h_edge_pos,
2261 ptr_y = s->edge_emu_buffer;
2262 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2263 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2264 s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
2265 uvlinesize >> field_based, uvlinesize >> field_based,
2267 uvsrc_x, uvsrc_y << field_based,
2268 h_edge_pos >> 1, v_edge_pos >> 1);
2269 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
2270 uvlinesize >> field_based,uvlinesize >> field_based,
2272 uvsrc_x, uvsrc_y << field_based,
2273 h_edge_pos >> 1, v_edge_pos >> 1);
2275 ptr_cr = uvbuf + 16;
2279 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2281 dest_y += s->linesize;
2282 dest_cb += s->uvlinesize;
2283 dest_cr += s->uvlinesize;
2287 ptr_y += s->linesize;
2288 ptr_cb += s->uvlinesize;
2289 ptr_cr += s->uvlinesize;
2292 sx = (sx << 2) >> lowres;
2293 sy = (sy << 2) >> lowres;
2294 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2296 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2297 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2298 uvsx = (uvsx << 2) >> lowres;
2299 uvsy = (uvsy << 2) >> lowres;
2301 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2302 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2305 // FIXME h261 lowres loop filter
2308 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2309 uint8_t *dest_cb, uint8_t *dest_cr,
2310 uint8_t **ref_picture,
2311 h264_chroma_mc_func * pix_op,
2314 const int lowres = s->avctx->lowres;
2315 const int op_index = FFMIN(lowres, 3);
2316 const int block_s = 8 >> lowres;
2317 const int s_mask = (2 << lowres) - 1;
2318 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2319 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2320 int emu = 0, src_x, src_y, sx, sy;
2324 if (s->quarter_sample) {
2329 /* In case of 8X8, we construct a single chroma motion vector
2330 with a special rounding */
2331 mx = ff_h263_round_chroma(mx);
2332 my = ff_h263_round_chroma(my);
2336 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2337 src_y = s->mb_y * block_s + (my >> lowres + 1);
2339 offset = src_y * s->uvlinesize + src_x;
2340 ptr = ref_picture[1] + offset;
2341 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2342 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2343 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2344 s->uvlinesize, s->uvlinesize,
2346 src_x, src_y, h_edge_pos, v_edge_pos);
2347 ptr = s->edge_emu_buffer;
2350 sx = (sx << 2) >> lowres;
2351 sy = (sy << 2) >> lowres;
2352 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2354 ptr = ref_picture[2] + offset;
2356 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2357 s->uvlinesize, s->uvlinesize,
2359 src_x, src_y, h_edge_pos, v_edge_pos);
2360 ptr = s->edge_emu_buffer;
2362 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2366 * motion compensation of a single macroblock
2368 * @param dest_y luma destination pointer
2369 * @param dest_cb chroma cb/u destination pointer
2370 * @param dest_cr chroma cr/v destination pointer
2371 * @param dir direction (0->forward, 1->backward)
2372 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2373 * @param pix_op halfpel motion compensation function (average or put normally)
2374 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2376 static inline void MPV_motion_lowres(MpegEncContext *s,
2377 uint8_t *dest_y, uint8_t *dest_cb,
2379 int dir, uint8_t **ref_picture,
2380 h264_chroma_mc_func *pix_op)
2384 const int lowres = s->avctx->lowres;
2385 const int block_s = 8 >>lowres;
2390 switch (s->mv_type) {
2392 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2394 ref_picture, pix_op,
2395 s->mv[dir][0][0], s->mv[dir][0][1],
2401 for (i = 0; i < 4; i++) {
2402 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2403 s->linesize) * block_s,
2404 ref_picture[0], 0, 0,
2405 (2 * mb_x + (i & 1)) * block_s,
2406 (2 * mb_y + (i >> 1)) * block_s,
2407 s->width, s->height, s->linesize,
2408 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2409 block_s, block_s, pix_op,
2410 s->mv[dir][i][0], s->mv[dir][i][1]);
2412 mx += s->mv[dir][i][0];
2413 my += s->mv[dir][i][1];
2416 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2417 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2421 if (s->picture_structure == PICT_FRAME) {
2423 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2424 1, 0, s->field_select[dir][0],
2425 ref_picture, pix_op,
2426 s->mv[dir][0][0], s->mv[dir][0][1],
2429 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2430 1, 1, s->field_select[dir][1],
2431 ref_picture, pix_op,
2432 s->mv[dir][1][0], s->mv[dir][1][1],
2435 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2436 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2437 ref_picture = s->current_picture_ptr->f.data;
2440 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2441 0, 0, s->field_select[dir][0],
2442 ref_picture, pix_op,
2444 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2448 for (i = 0; i < 2; i++) {
2449 uint8_t **ref2picture;
2451 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2452 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2453 ref2picture = ref_picture;
2455 ref2picture = s->current_picture_ptr->f.data;
2458 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2459 0, 0, s->field_select[dir][i],
2460 ref2picture, pix_op,
2461 s->mv[dir][i][0], s->mv[dir][i][1] +
2462 2 * block_s * i, block_s, mb_y >> 1);
2464 dest_y += 2 * block_s * s->linesize;
2465 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2466 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2470 if (s->picture_structure == PICT_FRAME) {
2471 for (i = 0; i < 2; i++) {
2473 for (j = 0; j < 2; j++) {
2474 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2476 ref_picture, pix_op,
2477 s->mv[dir][2 * i + j][0],
2478 s->mv[dir][2 * i + j][1],
2481 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2484 for (i = 0; i < 2; i++) {
2485 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2486 0, 0, s->picture_structure != i + 1,
2487 ref_picture, pix_op,
2488 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2489 2 * block_s, mb_y >> 1);
2491 // after put we make avg of the same block
2492 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2494 // opposite parity is always in the same
2495 // frame if this is second field
2496 if (!s->first_field) {
2497 ref_picture = s->current_picture_ptr->f.data;
2508 * find the lowest MB row referenced in the MVs
2510 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2512 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2513 int my, off, i, mvs;
2515 if (s->picture_structure != PICT_FRAME || s->mcsel)
2518 switch (s->mv_type) {
2532 for (i = 0; i < mvs; i++) {
2533 my = s->mv[dir][i][1]<<qpel_shift;
2534 my_max = FFMAX(my_max, my);
2535 my_min = FFMIN(my_min, my);
2538 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2540 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2542 return s->mb_height-1;
2545 /* put block[] to dest[] */
2546 static inline void put_dct(MpegEncContext *s,
2547 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2549 s->dct_unquantize_intra(s, block, i, qscale);
2550 s->dsp.idct_put (dest, line_size, block);
2553 /* add block[] to dest[] */
2554 static inline void add_dct(MpegEncContext *s,
2555 int16_t *block, int i, uint8_t *dest, int line_size)
2557 if (s->block_last_index[i] >= 0) {
2558 s->dsp.idct_add (dest, line_size, block);
2562 static inline void add_dequant_dct(MpegEncContext *s,
2563 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2565 if (s->block_last_index[i] >= 0) {
2566 s->dct_unquantize_inter(s, block, i, qscale);
2568 s->dsp.idct_add (dest, line_size, block);
2573 * Clean dc, ac, coded_block for the current non-intra MB.
2575 void ff_clean_intra_table_entries(MpegEncContext *s)
2577 int wrap = s->b8_stride;
2578 int xy = s->block_index[0];
2581 s->dc_val[0][xy + 1 ] =
2582 s->dc_val[0][xy + wrap] =
2583 s->dc_val[0][xy + 1 + wrap] = 1024;
2585 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2586 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2587 if (s->msmpeg4_version>=3) {
2588 s->coded_block[xy ] =
2589 s->coded_block[xy + 1 ] =
2590 s->coded_block[xy + wrap] =
2591 s->coded_block[xy + 1 + wrap] = 0;
2594 wrap = s->mb_stride;
2595 xy = s->mb_x + s->mb_y * wrap;
2597 s->dc_val[2][xy] = 1024;
2599 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2600 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2602 s->mbintra_table[xy]= 0;
2605 /* generic function called after a macroblock has been parsed by the
2606 decoder or after it has been encoded by the encoder.
2608 Important variables used:
2609 s->mb_intra : true if intra macroblock
2610 s->mv_dir : motion vector direction
2611 s->mv_type : motion vector type
2612 s->mv : motion vector
2613 s->interlaced_dct : true if interlaced dct used (mpeg2)
2615 static av_always_inline
2616 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2617 int lowres_flag, int is_mpeg12)
2619 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2622 FF_DISABLE_DEPRECATION_WARNINGS
2623 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2624 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2627 FF_ENABLE_DEPRECATION_WARNINGS
2628 #endif /* FF_API_XVMC */
2630 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2631 /* print DCT coefficients */
2633 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2635 for(j=0; j<64; j++){
2636 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2638 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2642 s->current_picture.qscale_table[mb_xy] = s->qscale;
2644 /* update DC predictors for P macroblocks */
2646 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2647 if(s->mbintra_table[mb_xy])
2648 ff_clean_intra_table_entries(s);
2652 s->last_dc[2] = 128 << s->intra_dc_precision;
2655 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2656 s->mbintra_table[mb_xy]=1;
2658 if ( (s->flags&CODEC_FLAG_PSNR)
2659 || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor
2660 || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2661 uint8_t *dest_y, *dest_cb, *dest_cr;
2662 int dct_linesize, dct_offset;
2663 op_pixels_func (*op_pix)[4];
2664 qpel_mc_func (*op_qpix)[16];
2665 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2666 const int uvlinesize = s->current_picture.f.linesize[1];
2667 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2668 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2670 /* avoid copy if macroblock skipped in last frame too */
2671 /* skip only during decoding as we might trash the buffers during encoding a bit */
2673 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2675 if (s->mb_skipped) {
2677 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2679 } else if(!s->current_picture.reference) {
2682 *mbskip_ptr = 0; /* not skipped */
2686 dct_linesize = linesize << s->interlaced_dct;
2687 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2691 dest_cb= s->dest[1];
2692 dest_cr= s->dest[2];
2694 dest_y = s->b_scratchpad;
2695 dest_cb= s->b_scratchpad+16*linesize;
2696 dest_cr= s->b_scratchpad+32*linesize;
2700 /* motion handling */
2701 /* decoding or more than one mb_type (MC was already done otherwise) */
2704 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2705 if (s->mv_dir & MV_DIR_FORWARD) {
2706 ff_thread_await_progress(&s->last_picture_ptr->tf,
2707 ff_MPV_lowest_referenced_row(s, 0),
2710 if (s->mv_dir & MV_DIR_BACKWARD) {
2711 ff_thread_await_progress(&s->next_picture_ptr->tf,
2712 ff_MPV_lowest_referenced_row(s, 1),
2718 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2720 if (s->mv_dir & MV_DIR_FORWARD) {
2721 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2722 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2724 if (s->mv_dir & MV_DIR_BACKWARD) {
2725 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2728 op_qpix = s->me.qpel_put;
2729 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2730 op_pix = s->hdsp.put_pixels_tab;
2732 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2734 if (s->mv_dir & MV_DIR_FORWARD) {
2735 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2736 op_pix = s->hdsp.avg_pixels_tab;
2737 op_qpix= s->me.qpel_avg;
2739 if (s->mv_dir & MV_DIR_BACKWARD) {
2740 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2745 /* skip dequant / idct if we are really late ;) */
2746 if(s->avctx->skip_idct){
2747 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2748 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2749 || s->avctx->skip_idct >= AVDISCARD_ALL)
2753 /* add dct residue */
2754 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2755 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2756 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2757 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2758 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2759 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2761 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2762 if (s->chroma_y_shift){
2763 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2764 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2768 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2769 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2770 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2771 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2774 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2775 add_dct(s, block[0], 0, dest_y , dct_linesize);
2776 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2777 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2778 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2780 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2781 if(s->chroma_y_shift){//Chroma420
2782 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2783 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2786 dct_linesize = uvlinesize << s->interlaced_dct;
2787 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2789 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2790 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2791 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2792 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2793 if(!s->chroma_x_shift){//Chroma444
2794 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2795 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2796 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2797 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2802 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2803 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2806 /* dct only in intra block */
2807 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2808 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2809 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2810 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2811 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2813 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2814 if(s->chroma_y_shift){
2815 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2816 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2820 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2821 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2822 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2823 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2827 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2828 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2829 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2830 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2832 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2833 if(s->chroma_y_shift){
2834 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2835 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2838 dct_linesize = uvlinesize << s->interlaced_dct;
2839 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2841 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2842 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2843 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2844 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2845 if(!s->chroma_x_shift){//Chroma444
2846 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2847 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2848 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2849 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2857 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2858 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2859 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2864 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2866 if(s->out_format == FMT_MPEG1) {
2867 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2868 else MPV_decode_mb_internal(s, block, 0, 1);
2871 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2872 else MPV_decode_mb_internal(s, block, 0, 0);
2876 * @param h is the normal height, this will be reduced automatically if needed for the last row
2878 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2879 Picture *last, int y, int h, int picture_structure,
2880 int first_field, int draw_edges, int low_delay,
2881 int v_edge_pos, int h_edge_pos)
2883 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2884 int hshift = desc->log2_chroma_w;
2885 int vshift = desc->log2_chroma_h;
2886 const int field_pic = picture_structure != PICT_FRAME;
2892 if (!avctx->hwaccel &&
2893 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2896 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2897 int *linesize = cur->f.linesize;
2898 int sides = 0, edge_h;
2899 if (y==0) sides |= EDGE_TOP;
2900 if (y + h >= v_edge_pos)
2901 sides |= EDGE_BOTTOM;
2903 edge_h= FFMIN(h, v_edge_pos - y);
2905 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2906 linesize[0], h_edge_pos, edge_h,
2907 EDGE_WIDTH, EDGE_WIDTH, sides);
2908 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2909 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2910 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2911 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2912 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2913 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2916 h = FFMIN(h, avctx->height - y);
2918 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2920 if (avctx->draw_horiz_band) {
2922 int offset[AV_NUM_DATA_POINTERS];
2925 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2926 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2933 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2934 picture_structure == PICT_FRAME &&
2935 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2936 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2939 offset[0]= y * src->linesize[0];
2941 offset[2]= (y >> vshift) * src->linesize[1];
2942 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2948 avctx->draw_horiz_band(avctx, src, offset,
2949 y, picture_structure, h);
2953 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2955 int draw_edges = s->unrestricted_mv && !s->intra_only;
2956 ff_draw_horiz_band(s->avctx, &s->dsp, s->current_picture_ptr,
2957 s->last_picture_ptr, y, h, s->picture_structure,
2958 s->first_field, draw_edges, s->low_delay,
2959 s->v_edge_pos, s->h_edge_pos);
2962 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2963 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2964 const int uvlinesize = s->current_picture.f.linesize[1];
2965 const int mb_size= 4 - s->avctx->lowres;
2967 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2968 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2969 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2970 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2971 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2972 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2973 //block_index is not used by mpeg2, so it is not affected by chroma_format
2975 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2976 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2977 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2979 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2981 if(s->picture_structure==PICT_FRAME){
2982 s->dest[0] += s->mb_y * linesize << mb_size;
2983 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2984 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2986 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2987 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2988 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2989 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2995 * Permute an 8x8 block.
2996 * @param block the block which will be permuted according to the given permutation vector
2997 * @param permutation the permutation vector
2998 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2999 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3000 * (inverse) permutated to scantable order!
3002 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3008 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3010 for(i=0; i<=last; i++){
3011 const int j= scantable[i];
3016 for(i=0; i<=last; i++){
3017 const int j= scantable[i];
3018 const int perm_j= permutation[j];
3019 block[perm_j]= temp[j];
3023 void ff_mpeg_flush(AVCodecContext *avctx){
3025 MpegEncContext *s = avctx->priv_data;
3027 if(s==NULL || s->picture==NULL)
3030 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3031 ff_mpeg_unref_picture(s, &s->picture[i]);
3032 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3034 ff_mpeg_unref_picture(s, &s->current_picture);
3035 ff_mpeg_unref_picture(s, &s->last_picture);
3036 ff_mpeg_unref_picture(s, &s->next_picture);
3038 s->mb_x= s->mb_y= 0;
3041 s->parse_context.state= -1;
3042 s->parse_context.frame_start_found= 0;
3043 s->parse_context.overread= 0;
3044 s->parse_context.overread_index= 0;
3045 s->parse_context.index= 0;
3046 s->parse_context.last_index= 0;
3047 s->bitstream_buffer_size=0;
3051 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3052 int16_t *block, int n, int qscale)
3054 int i, level, nCoeffs;
3055 const uint16_t *quant_matrix;
3057 nCoeffs= s->block_last_index[n];
3059 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3060 /* XXX: only mpeg1 */
3061 quant_matrix = s->intra_matrix;
3062 for(i=1;i<=nCoeffs;i++) {
3063 int j= s->intra_scantable.permutated[i];
3068 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3069 level = (level - 1) | 1;
3072 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3073 level = (level - 1) | 1;
3080 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3081 int16_t *block, int n, int qscale)
3083 int i, level, nCoeffs;
3084 const uint16_t *quant_matrix;
3086 nCoeffs= s->block_last_index[n];
3088 quant_matrix = s->inter_matrix;
3089 for(i=0; i<=nCoeffs; i++) {
3090 int j= s->intra_scantable.permutated[i];
3095 level = (((level << 1) + 1) * qscale *
3096 ((int) (quant_matrix[j]))) >> 4;
3097 level = (level - 1) | 1;
3100 level = (((level << 1) + 1) * qscale *
3101 ((int) (quant_matrix[j]))) >> 4;
3102 level = (level - 1) | 1;
3109 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3110 int16_t *block, int n, int qscale)
3112 int i, level, nCoeffs;
3113 const uint16_t *quant_matrix;
3115 if(s->alternate_scan) nCoeffs= 63;
3116 else nCoeffs= s->block_last_index[n];
3118 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3119 quant_matrix = s->intra_matrix;
3120 for(i=1;i<=nCoeffs;i++) {
3121 int j= s->intra_scantable.permutated[i];
3126 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3129 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3136 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3137 int16_t *block, int n, int qscale)
3139 int i, level, nCoeffs;
3140 const uint16_t *quant_matrix;
3143 if(s->alternate_scan) nCoeffs= 63;
3144 else nCoeffs= s->block_last_index[n];
3146 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3148 quant_matrix = s->intra_matrix;
3149 for(i=1;i<=nCoeffs;i++) {
3150 int j= s->intra_scantable.permutated[i];
3155 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3158 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3167 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3168 int16_t *block, int n, int qscale)
3170 int i, level, nCoeffs;
3171 const uint16_t *quant_matrix;
3174 if(s->alternate_scan) nCoeffs= 63;
3175 else nCoeffs= s->block_last_index[n];
3177 quant_matrix = s->inter_matrix;
3178 for(i=0; i<=nCoeffs; i++) {
3179 int j= s->intra_scantable.permutated[i];
3184 level = (((level << 1) + 1) * qscale *
3185 ((int) (quant_matrix[j]))) >> 4;
3188 level = (((level << 1) + 1) * qscale *
3189 ((int) (quant_matrix[j]))) >> 4;
3198 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3199 int16_t *block, int n, int qscale)
3201 int i, level, qmul, qadd;
3204 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
3209 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3210 qadd = (qscale - 1) | 1;
3217 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3219 for(i=1; i<=nCoeffs; i++) {
3223 level = level * qmul - qadd;
3225 level = level * qmul + qadd;
3232 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3233 int16_t *block, int n, int qscale)
3235 int i, level, qmul, qadd;
3238 av_assert2(s->block_last_index[n]>=0);
3240 qadd = (qscale - 1) | 1;
3243 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3245 for(i=0; i<=nCoeffs; i++) {
3249 level = level * qmul - qadd;
3251 level = level * qmul + qadd;
3259 * set qscale and update qscale dependent variables.
3261 void ff_set_qscale(MpegEncContext * s, int qscale)
3265 else if (qscale > 31)
3269 s->chroma_qscale= s->chroma_qscale_table[qscale];
3271 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3272 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3275 void ff_MPV_report_decode_progress(MpegEncContext *s)
3277 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3278 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3281 #if CONFIG_ERROR_RESILIENCE
3282 void ff_mpeg_er_frame_start(MpegEncContext *s)
3284 ERContext *er = &s->er;
3286 er->cur_pic = s->current_picture_ptr;
3287 er->last_pic = s->last_picture_ptr;
3288 er->next_pic = s->next_picture_ptr;
3290 er->pp_time = s->pp_time;
3291 er->pb_time = s->pb_time;
3292 er->quarter_sample = s->quarter_sample;
3293 er->partitioned_frame = s->partitioned_frame;
3295 ff_er_frame_start(er);
3297 #endif /* CONFIG_ERROR_RESILIENCE */