2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
36 #include "h264chroma.h"
39 #include "mpegvideo.h"
42 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 int16_t *block, int n, int qscale);
61 static const uint8_t ff_default_chroma_qscale_table[32] = {
62 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
63 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
64 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
67 const uint8_t ff_mpeg1_dc_scale_table[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 static const uint8_t mpeg2_dc_scale_table1[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 static const uint8_t mpeg2_dc_scale_table2[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 static const uint8_t mpeg2_dc_scale_table3[128] = {
104 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
116 ff_mpeg1_dc_scale_table,
117 mpeg2_dc_scale_table1,
118 mpeg2_dc_scale_table2,
119 mpeg2_dc_scale_table3,
122 const enum AVPixelFormat ff_pixfmt_list_420[] = {
127 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
129 int mb_x, int mb_y, int mb_intra, int mb_skipped)
131 MpegEncContext *s = opaque;
134 s->mv_type = mv_type;
135 s->mb_intra = mb_intra;
136 s->mb_skipped = mb_skipped;
139 memcpy(s->mv, mv, sizeof(*mv));
141 ff_init_block_index(s);
142 ff_update_block_index(s);
144 s->dsp.clear_blocks(s->block[0]);
146 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
147 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
148 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
151 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
152 ff_MPV_decode_mb(s, s->block);
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 ff_dsputil_init(&s->dsp, s->avctx);
159 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
160 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
161 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
163 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
164 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
165 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
166 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
167 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
168 if (s->flags & CODEC_FLAG_BITEXACT)
169 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
170 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
173 ff_MPV_common_init_axp(s);
175 ff_MPV_common_init_arm(s);
177 ff_MPV_common_init_bfin(s);
179 ff_MPV_common_init_ppc(s);
181 ff_MPV_common_init_x86(s);
183 /* load & permutate scantables
184 * note: only wmv uses different ones
186 if (s->alternate_scan) {
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 static int frame_size_alloc(MpegEncContext *s, int linesize)
201 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
203 // edge emu needs blocksize + filter length - 1
204 // (= 17x17 for halfpel / 21x21 for h264)
205 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
206 // at uvlinesize. It supports only YUV420 so 24x24 is enough
207 // linesize * interlaced * MBsize
208 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
211 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
213 s->me.temp = s->me.scratchpad;
214 s->rd_scratchpad = s->me.scratchpad;
215 s->b_scratchpad = s->me.scratchpad;
216 s->obmc_scratchpad = s->me.scratchpad + 16;
220 av_freep(&s->edge_emu_buffer);
221 return AVERROR(ENOMEM);
225 * Allocate a frame buffer
227 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
232 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
233 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
234 s->codec_id != AV_CODEC_ID_MSS2)
235 r = ff_thread_get_buffer(s->avctx, &pic->tf,
236 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
238 pic->f.width = s->avctx->width;
239 pic->f.height = s->avctx->height;
240 pic->f.format = s->avctx->pix_fmt;
241 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
244 if (r < 0 || !pic->f.buf[0]) {
245 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
250 if (s->avctx->hwaccel) {
251 assert(!pic->hwaccel_picture_private);
252 if (s->avctx->hwaccel->priv_data_size) {
253 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
254 if (!pic->hwaccel_priv_buf) {
255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
258 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
262 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
263 s->uvlinesize != pic->f.linesize[1])) {
264 av_log(s->avctx, AV_LOG_ERROR,
265 "get_buffer() failed (stride changed)\n");
266 ff_mpeg_unref_picture(s, pic);
270 if (pic->f.linesize[1] != pic->f.linesize[2]) {
271 av_log(s->avctx, AV_LOG_ERROR,
272 "get_buffer() failed (uv stride mismatch)\n");
273 ff_mpeg_unref_picture(s, pic);
277 if (!s->edge_emu_buffer &&
278 (ret = frame_size_alloc(s, pic->f.linesize[0])) < 0) {
279 av_log(s->avctx, AV_LOG_ERROR,
280 "get_buffer() failed to allocate context scratch buffers.\n");
281 ff_mpeg_unref_picture(s, pic);
288 void ff_free_picture_tables(Picture *pic)
292 pic->alloc_mb_width =
293 pic->alloc_mb_height = 0;
295 av_buffer_unref(&pic->mb_var_buf);
296 av_buffer_unref(&pic->mc_mb_var_buf);
297 av_buffer_unref(&pic->mb_mean_buf);
298 av_buffer_unref(&pic->mbskip_table_buf);
299 av_buffer_unref(&pic->qscale_table_buf);
300 av_buffer_unref(&pic->mb_type_buf);
302 for (i = 0; i < 2; i++) {
303 av_buffer_unref(&pic->motion_val_buf[i]);
304 av_buffer_unref(&pic->ref_index_buf[i]);
308 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
310 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
311 const int mb_array_size = s->mb_stride * s->mb_height;
312 const int b8_array_size = s->b8_stride * s->mb_height * 2;
316 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
317 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
318 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
320 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
321 return AVERROR(ENOMEM);
324 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
326 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
327 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
328 return AVERROR(ENOMEM);
331 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
332 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
333 int ref_index_size = 4 * mb_array_size;
335 for (i = 0; mv_size && i < 2; i++) {
336 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
338 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339 return AVERROR(ENOMEM);
343 pic->alloc_mb_width = s->mb_width;
344 pic->alloc_mb_height = s->mb_height;
349 static int make_tables_writable(Picture *pic)
352 #define MAKE_WRITABLE(table) \
355 (ret = av_buffer_make_writable(&pic->table)) < 0)\
359 MAKE_WRITABLE(mb_var_buf);
360 MAKE_WRITABLE(mc_mb_var_buf);
361 MAKE_WRITABLE(mb_mean_buf);
362 MAKE_WRITABLE(mbskip_table_buf);
363 MAKE_WRITABLE(qscale_table_buf);
364 MAKE_WRITABLE(mb_type_buf);
366 for (i = 0; i < 2; i++) {
367 MAKE_WRITABLE(motion_val_buf[i]);
368 MAKE_WRITABLE(ref_index_buf[i]);
375 * Allocate a Picture.
376 * The pixels are allocated/set by calling get_buffer() if shared = 0
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
382 if (pic->qscale_table_buf)
383 if ( pic->alloc_mb_width != s->mb_width
384 || pic->alloc_mb_height != s->mb_height)
385 ff_free_picture_tables(pic);
388 av_assert0(pic->f.data[0]);
391 av_assert0(!pic->f.buf[0]);
393 if (alloc_frame_buffer(s, pic) < 0)
396 s->linesize = pic->f.linesize[0];
397 s->uvlinesize = pic->f.linesize[1];
400 if (!pic->qscale_table_buf)
401 ret = alloc_picture_tables(s, pic);
403 ret = make_tables_writable(pic);
408 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
409 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410 pic->mb_mean = pic->mb_mean_buf->data;
413 pic->mbskip_table = pic->mbskip_table_buf->data;
414 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
417 if (pic->motion_val_buf[0]) {
418 for (i = 0; i < 2; i++) {
419 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420 pic->ref_index[i] = pic->ref_index_buf[i]->data;
426 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427 ff_mpeg_unref_picture(s, pic);
428 ff_free_picture_tables(pic);
429 return AVERROR(ENOMEM);
433 * Deallocate a picture.
435 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
437 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
440 /* WM Image / Screen codecs allocate internal buffers with different
441 * dimensions / colorspaces; ignore user-defined callbacks for these. */
442 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
443 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
444 s->codec_id != AV_CODEC_ID_MSS2)
445 ff_thread_release_buffer(s->avctx, &pic->tf);
447 av_frame_unref(&pic->f);
449 av_buffer_unref(&pic->hwaccel_priv_buf);
451 if (pic->needs_realloc)
452 ff_free_picture_tables(pic);
454 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
457 static int update_picture_tables(Picture *dst, Picture *src)
461 #define UPDATE_TABLE(table)\
464 (!dst->table || dst->table->buffer != src->table->buffer)) {\
465 av_buffer_unref(&dst->table);\
466 dst->table = av_buffer_ref(src->table);\
468 ff_free_picture_tables(dst);\
469 return AVERROR(ENOMEM);\
474 UPDATE_TABLE(mb_var_buf);
475 UPDATE_TABLE(mc_mb_var_buf);
476 UPDATE_TABLE(mb_mean_buf);
477 UPDATE_TABLE(mbskip_table_buf);
478 UPDATE_TABLE(qscale_table_buf);
479 UPDATE_TABLE(mb_type_buf);
480 for (i = 0; i < 2; i++) {
481 UPDATE_TABLE(motion_val_buf[i]);
482 UPDATE_TABLE(ref_index_buf[i]);
485 dst->mb_var = src->mb_var;
486 dst->mc_mb_var = src->mc_mb_var;
487 dst->mb_mean = src->mb_mean;
488 dst->mbskip_table = src->mbskip_table;
489 dst->qscale_table = src->qscale_table;
490 dst->mb_type = src->mb_type;
491 for (i = 0; i < 2; i++) {
492 dst->motion_val[i] = src->motion_val[i];
493 dst->ref_index[i] = src->ref_index[i];
496 dst->alloc_mb_width = src->alloc_mb_width;
497 dst->alloc_mb_height = src->alloc_mb_height;
502 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
506 av_assert0(!dst->f.buf[0]);
507 av_assert0(src->f.buf[0]);
511 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
515 ret = update_picture_tables(dst, src);
519 if (src->hwaccel_picture_private) {
520 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
521 if (!dst->hwaccel_priv_buf)
523 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
526 dst->field_picture = src->field_picture;
527 dst->mb_var_sum = src->mb_var_sum;
528 dst->mc_mb_var_sum = src->mc_mb_var_sum;
529 dst->b_frame_score = src->b_frame_score;
530 dst->needs_realloc = src->needs_realloc;
531 dst->reference = src->reference;
532 dst->shared = src->shared;
536 ff_mpeg_unref_picture(s, dst);
540 static void exchange_uv(MpegEncContext *s)
545 s->pblocks[4] = s->pblocks[5];
549 static int init_duplicate_context(MpegEncContext *s)
551 int y_size = s->b8_stride * (2 * s->mb_height + 1);
552 int c_size = s->mb_stride * (s->mb_height + 1);
553 int yc_size = y_size + 2 * c_size;
561 s->obmc_scratchpad = NULL;
564 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
565 ME_MAP_SIZE * sizeof(uint32_t), fail)
566 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
567 ME_MAP_SIZE * sizeof(uint32_t), fail)
568 if (s->avctx->noise_reduction) {
569 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
570 2 * 64 * sizeof(int), fail)
573 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
574 s->block = s->blocks[0];
576 for (i = 0; i < 12; i++) {
577 s->pblocks[i] = &s->block[i];
579 if (s->avctx->codec_tag == AV_RL32("VCR2"))
582 if (s->out_format == FMT_H263) {
584 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
585 yc_size * sizeof(int16_t) * 16, fail);
586 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
587 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
588 s->ac_val[2] = s->ac_val[1] + c_size;
593 return -1; // free() through ff_MPV_common_end()
596 static void free_duplicate_context(MpegEncContext *s)
601 av_freep(&s->edge_emu_buffer);
602 av_freep(&s->me.scratchpad);
606 s->obmc_scratchpad = NULL;
608 av_freep(&s->dct_error_sum);
609 av_freep(&s->me.map);
610 av_freep(&s->me.score_map);
611 av_freep(&s->blocks);
612 av_freep(&s->ac_val_base);
616 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
618 #define COPY(a) bak->a = src->a
619 COPY(edge_emu_buffer);
624 COPY(obmc_scratchpad);
631 COPY(me.map_generation);
643 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
647 // FIXME copy only needed parts
649 backup_duplicate_context(&bak, dst);
650 memcpy(dst, src, sizeof(MpegEncContext));
651 backup_duplicate_context(dst, &bak);
652 for (i = 0; i < 12; i++) {
653 dst->pblocks[i] = &dst->block[i];
655 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
657 if (!dst->edge_emu_buffer &&
658 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
659 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
660 "scratch buffers.\n");
663 // STOP_TIMER("update_duplicate_context")
664 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
668 int ff_mpeg_update_thread_context(AVCodecContext *dst,
669 const AVCodecContext *src)
672 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
679 // FIXME can parameters change on I-frames?
680 // in that case dst may need a reinit
681 if (!s->context_initialized) {
682 memcpy(s, s1, sizeof(MpegEncContext));
685 s->bitstream_buffer = NULL;
686 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
688 if (s1->context_initialized){
689 // s->picture_range_start += MAX_PICTURE_COUNT;
690 // s->picture_range_end += MAX_PICTURE_COUNT;
691 if((ret = ff_MPV_common_init(s)) < 0){
692 memset(s, 0, sizeof(MpegEncContext));
699 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
700 s->context_reinit = 0;
701 s->height = s1->height;
702 s->width = s1->width;
703 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
707 s->avctx->coded_height = s1->avctx->coded_height;
708 s->avctx->coded_width = s1->avctx->coded_width;
709 s->avctx->width = s1->avctx->width;
710 s->avctx->height = s1->avctx->height;
712 s->coded_picture_number = s1->coded_picture_number;
713 s->picture_number = s1->picture_number;
715 av_assert0(!s->picture || s->picture != s1->picture);
717 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
718 ff_mpeg_unref_picture(s, &s->picture[i]);
719 if (s1->picture[i].f.buf[0] &&
720 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
724 #define UPDATE_PICTURE(pic)\
726 ff_mpeg_unref_picture(s, &s->pic);\
727 if (s1->pic.f.buf[0])\
728 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
730 ret = update_picture_tables(&s->pic, &s1->pic);\
735 UPDATE_PICTURE(current_picture);
736 UPDATE_PICTURE(last_picture);
737 UPDATE_PICTURE(next_picture);
739 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
740 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
741 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
743 // Error/bug resilience
744 s->next_p_frame_damaged = s1->next_p_frame_damaged;
745 s->workaround_bugs = s1->workaround_bugs;
746 s->padding_bug_score = s1->padding_bug_score;
749 memcpy(&s->last_time_base, &s1->last_time_base,
750 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
751 (char *) &s1->last_time_base);
754 s->max_b_frames = s1->max_b_frames;
755 s->low_delay = s1->low_delay;
756 s->droppable = s1->droppable;
758 // DivX handling (doesn't work)
759 s->divx_packed = s1->divx_packed;
761 if (s1->bitstream_buffer) {
762 if (s1->bitstream_buffer_size +
763 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
764 av_fast_malloc(&s->bitstream_buffer,
765 &s->allocated_bitstream_buffer_size,
766 s1->allocated_bitstream_buffer_size);
767 s->bitstream_buffer_size = s1->bitstream_buffer_size;
768 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
769 s1->bitstream_buffer_size);
770 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
771 FF_INPUT_BUFFER_PADDING_SIZE);
774 // linesize dependend scratch buffer allocation
775 if (!s->edge_emu_buffer)
777 if (frame_size_alloc(s, s1->linesize) < 0) {
778 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
779 "scratch buffers.\n");
780 return AVERROR(ENOMEM);
783 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
784 "be allocated due to unknown size.\n");
787 // MPEG2/interlacing info
788 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
789 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
791 if (!s1->first_field) {
792 s->last_pict_type = s1->pict_type;
793 if (s1->current_picture_ptr)
794 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
801 * Set the given MpegEncContext to common defaults
802 * (same for encoding and decoding).
803 * The changed fields will not depend upon the
804 * prior state of the MpegEncContext.
806 void ff_MPV_common_defaults(MpegEncContext *s)
808 s->y_dc_scale_table =
809 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
810 s->chroma_qscale_table = ff_default_chroma_qscale_table;
811 s->progressive_frame = 1;
812 s->progressive_sequence = 1;
813 s->picture_structure = PICT_FRAME;
815 s->coded_picture_number = 0;
816 s->picture_number = 0;
821 s->slice_context_count = 1;
825 * Set the given MpegEncContext to defaults for decoding.
826 * the changed fields will not depend upon
827 * the prior state of the MpegEncContext.
829 void ff_MPV_decode_defaults(MpegEncContext *s)
831 ff_MPV_common_defaults(s);
834 static int init_er(MpegEncContext *s)
836 ERContext *er = &s->er;
837 int mb_array_size = s->mb_height * s->mb_stride;
840 er->avctx = s->avctx;
843 er->mb_index2xy = s->mb_index2xy;
844 er->mb_num = s->mb_num;
845 er->mb_width = s->mb_width;
846 er->mb_height = s->mb_height;
847 er->mb_stride = s->mb_stride;
848 er->b8_stride = s->b8_stride;
850 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
851 er->error_status_table = av_mallocz(mb_array_size);
852 if (!er->er_temp_buffer || !er->error_status_table)
855 er->mbskip_table = s->mbskip_table;
856 er->mbintra_table = s->mbintra_table;
858 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
859 er->dc_val[i] = s->dc_val[i];
861 er->decode_mb = mpeg_er_decode_mb;
866 av_freep(&er->er_temp_buffer);
867 av_freep(&er->error_status_table);
868 return AVERROR(ENOMEM);
872 * Initialize and allocates MpegEncContext fields dependent on the resolution.
874 static int init_context_frame(MpegEncContext *s)
876 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
878 s->mb_width = (s->width + 15) / 16;
879 s->mb_stride = s->mb_width + 1;
880 s->b8_stride = s->mb_width * 2 + 1;
881 s->b4_stride = s->mb_width * 4 + 1;
882 mb_array_size = s->mb_height * s->mb_stride;
883 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
885 /* set default edge pos, will be overriden
886 * in decode_header if needed */
887 s->h_edge_pos = s->mb_width * 16;
888 s->v_edge_pos = s->mb_height * 16;
890 s->mb_num = s->mb_width * s->mb_height;
895 s->block_wrap[3] = s->b8_stride;
897 s->block_wrap[5] = s->mb_stride;
899 y_size = s->b8_stride * (2 * s->mb_height + 1);
900 c_size = s->mb_stride * (s->mb_height + 1);
901 yc_size = y_size + 2 * c_size;
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
904 for (y = 0; y < s->mb_height; y++)
905 for (x = 0; x < s->mb_width; x++)
906 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
908 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
911 /* Allocate MV tables */
912 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
913 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
914 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
915 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
916 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
917 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
918 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
919 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
920 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
921 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
922 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
923 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
925 /* Allocate MB type table */
926 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
928 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
930 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
931 mb_array_size * sizeof(float), fail);
932 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
933 mb_array_size * sizeof(float), fail);
937 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
938 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
939 /* interlaced direct mode decoding tables */
940 for (i = 0; i < 2; i++) {
942 for (j = 0; j < 2; j++) {
943 for (k = 0; k < 2; k++) {
944 FF_ALLOCZ_OR_GOTO(s->avctx,
945 s->b_field_mv_table_base[i][j][k],
946 mv_table_size * 2 * sizeof(int16_t),
948 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
951 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
952 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
953 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
958 if (s->out_format == FMT_H263) {
960 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
961 s->coded_block = s->coded_block_base + s->b8_stride + 1;
963 /* cbp, ac_pred, pred_dir */
964 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
965 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
968 if (s->h263_pred || s->h263_plus || !s->encoding) {
970 // MN: we need these for error resilience of intra-frames
971 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
972 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
973 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
974 s->dc_val[2] = s->dc_val[1] + c_size;
975 for (i = 0; i < yc_size; i++)
976 s->dc_val_base[i] = 1024;
979 /* which mb is a intra block */
980 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
981 memset(s->mbintra_table, 1, mb_array_size);
983 /* init macroblock skip table */
984 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
985 // Note the + 1 is for a quicker mpeg4 slice_end detection
989 return AVERROR(ENOMEM);
993 * init common structure for both encoder and decoder.
994 * this assumes that some variables like width/height are already set
996 av_cold int ff_MPV_common_init(MpegEncContext *s)
999 int nb_slices = (HAVE_THREADS &&
1000 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1001 s->avctx->thread_count : 1;
1003 if (s->encoding && s->avctx->slices)
1004 nb_slices = s->avctx->slices;
1006 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1007 s->mb_height = (s->height + 31) / 32 * 2;
1009 s->mb_height = (s->height + 15) / 16;
1011 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1012 av_log(s->avctx, AV_LOG_ERROR,
1013 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1017 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1020 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1022 max_slices = MAX_THREADS;
1023 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1024 " reducing to %d\n", nb_slices, max_slices);
1025 nb_slices = max_slices;
1028 if ((s->width || s->height) &&
1029 av_image_check_size(s->width, s->height, 0, s->avctx))
1032 ff_dct_common_init(s);
1034 s->flags = s->avctx->flags;
1035 s->flags2 = s->avctx->flags2;
1037 /* set chroma shifts */
1038 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1040 &s->chroma_y_shift);
1042 /* convert fourcc to upper case */
1043 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1045 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1047 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1048 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1049 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1050 avcodec_get_frame_defaults(&s->picture[i].f);
1052 memset(&s->next_picture, 0, sizeof(s->next_picture));
1053 memset(&s->last_picture, 0, sizeof(s->last_picture));
1054 memset(&s->current_picture, 0, sizeof(s->current_picture));
1055 avcodec_get_frame_defaults(&s->next_picture.f);
1056 avcodec_get_frame_defaults(&s->last_picture.f);
1057 avcodec_get_frame_defaults(&s->current_picture.f);
1059 if (init_context_frame(s))
1062 s->parse_context.state = -1;
1064 s->context_initialized = 1;
1065 s->thread_context[0] = s;
1067 // if (s->width && s->height) {
1068 if (nb_slices > 1) {
1069 for (i = 1; i < nb_slices; i++) {
1070 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1071 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1074 for (i = 0; i < nb_slices; i++) {
1075 if (init_duplicate_context(s->thread_context[i]) < 0)
1077 s->thread_context[i]->start_mb_y =
1078 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1079 s->thread_context[i]->end_mb_y =
1080 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1083 if (init_duplicate_context(s) < 0)
1086 s->end_mb_y = s->mb_height;
1088 s->slice_context_count = nb_slices;
1093 ff_MPV_common_end(s);
1098 * Frees and resets MpegEncContext fields depending on the resolution.
1099 * Is used during resolution changes to avoid a full reinitialization of the
1102 static int free_context_frame(MpegEncContext *s)
1106 av_freep(&s->mb_type);
1107 av_freep(&s->p_mv_table_base);
1108 av_freep(&s->b_forw_mv_table_base);
1109 av_freep(&s->b_back_mv_table_base);
1110 av_freep(&s->b_bidir_forw_mv_table_base);
1111 av_freep(&s->b_bidir_back_mv_table_base);
1112 av_freep(&s->b_direct_mv_table_base);
1113 s->p_mv_table = NULL;
1114 s->b_forw_mv_table = NULL;
1115 s->b_back_mv_table = NULL;
1116 s->b_bidir_forw_mv_table = NULL;
1117 s->b_bidir_back_mv_table = NULL;
1118 s->b_direct_mv_table = NULL;
1119 for (i = 0; i < 2; i++) {
1120 for (j = 0; j < 2; j++) {
1121 for (k = 0; k < 2; k++) {
1122 av_freep(&s->b_field_mv_table_base[i][j][k]);
1123 s->b_field_mv_table[i][j][k] = NULL;
1125 av_freep(&s->b_field_select_table[i][j]);
1126 av_freep(&s->p_field_mv_table_base[i][j]);
1127 s->p_field_mv_table[i][j] = NULL;
1129 av_freep(&s->p_field_select_table[i]);
1132 av_freep(&s->dc_val_base);
1133 av_freep(&s->coded_block_base);
1134 av_freep(&s->mbintra_table);
1135 av_freep(&s->cbp_table);
1136 av_freep(&s->pred_dir_table);
1138 av_freep(&s->mbskip_table);
1140 av_freep(&s->er.error_status_table);
1141 av_freep(&s->er.er_temp_buffer);
1142 av_freep(&s->mb_index2xy);
1143 av_freep(&s->lambda_table);
1145 av_freep(&s->cplx_tab);
1146 av_freep(&s->bits_tab);
1148 s->linesize = s->uvlinesize = 0;
1153 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1157 if (s->slice_context_count > 1) {
1158 for (i = 0; i < s->slice_context_count; i++) {
1159 free_duplicate_context(s->thread_context[i]);
1161 for (i = 1; i < s->slice_context_count; i++) {
1162 av_freep(&s->thread_context[i]);
1165 free_duplicate_context(s);
1167 if ((err = free_context_frame(s)) < 0)
1171 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1172 s->picture[i].needs_realloc = 1;
1175 s->last_picture_ptr =
1176 s->next_picture_ptr =
1177 s->current_picture_ptr = NULL;
1180 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1181 s->mb_height = (s->height + 31) / 32 * 2;
1183 s->mb_height = (s->height + 15) / 16;
1185 if ((s->width || s->height) &&
1186 av_image_check_size(s->width, s->height, 0, s->avctx))
1187 return AVERROR_INVALIDDATA;
1189 if ((err = init_context_frame(s)))
1192 s->thread_context[0] = s;
1194 if (s->width && s->height) {
1195 int nb_slices = s->slice_context_count;
1196 if (nb_slices > 1) {
1197 for (i = 1; i < nb_slices; i++) {
1198 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1199 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1202 for (i = 0; i < nb_slices; i++) {
1203 if (init_duplicate_context(s->thread_context[i]) < 0)
1205 s->thread_context[i]->start_mb_y =
1206 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1207 s->thread_context[i]->end_mb_y =
1208 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1211 err = init_duplicate_context(s);
1215 s->end_mb_y = s->mb_height;
1217 s->slice_context_count = nb_slices;
1222 ff_MPV_common_end(s);
1226 /* init common structure for both encoder and decoder */
1227 void ff_MPV_common_end(MpegEncContext *s)
1231 if (s->slice_context_count > 1) {
1232 for (i = 0; i < s->slice_context_count; i++) {
1233 free_duplicate_context(s->thread_context[i]);
1235 for (i = 1; i < s->slice_context_count; i++) {
1236 av_freep(&s->thread_context[i]);
1238 s->slice_context_count = 1;
1239 } else free_duplicate_context(s);
1241 av_freep(&s->parse_context.buffer);
1242 s->parse_context.buffer_size = 0;
1244 av_freep(&s->bitstream_buffer);
1245 s->allocated_bitstream_buffer_size = 0;
1248 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1249 ff_free_picture_tables(&s->picture[i]);
1250 ff_mpeg_unref_picture(s, &s->picture[i]);
1253 av_freep(&s->picture);
1254 ff_free_picture_tables(&s->last_picture);
1255 ff_mpeg_unref_picture(s, &s->last_picture);
1256 ff_free_picture_tables(&s->current_picture);
1257 ff_mpeg_unref_picture(s, &s->current_picture);
1258 ff_free_picture_tables(&s->next_picture);
1259 ff_mpeg_unref_picture(s, &s->next_picture);
1261 free_context_frame(s);
1263 s->context_initialized = 0;
1264 s->last_picture_ptr =
1265 s->next_picture_ptr =
1266 s->current_picture_ptr = NULL;
1267 s->linesize = s->uvlinesize = 0;
1270 av_cold void ff_init_rl(RLTable *rl,
1271 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1273 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1274 uint8_t index_run[MAX_RUN + 1];
1275 int last, run, level, start, end, i;
1277 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1278 if (static_store && rl->max_level[0])
1281 /* compute max_level[], max_run[] and index_run[] */
1282 for (last = 0; last < 2; last++) {
1291 memset(max_level, 0, MAX_RUN + 1);
1292 memset(max_run, 0, MAX_LEVEL + 1);
1293 memset(index_run, rl->n, MAX_RUN + 1);
1294 for (i = start; i < end; i++) {
1295 run = rl->table_run[i];
1296 level = rl->table_level[i];
1297 if (index_run[run] == rl->n)
1299 if (level > max_level[run])
1300 max_level[run] = level;
1301 if (run > max_run[level])
1302 max_run[level] = run;
1305 rl->max_level[last] = static_store[last];
1307 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1308 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1310 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1312 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1313 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1315 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1317 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1318 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1322 av_cold void ff_init_vlc_rl(RLTable *rl)
1326 for (q = 0; q < 32; q++) {
1328 int qadd = (q - 1) | 1;
1334 for (i = 0; i < rl->vlc.table_size; i++) {
1335 int code = rl->vlc.table[i][0];
1336 int len = rl->vlc.table[i][1];
1339 if (len == 0) { // illegal code
1342 } else if (len < 0) { // more bits needed
1346 if (code == rl->n) { // esc
1350 run = rl->table_run[code] + 1;
1351 level = rl->table_level[code] * qmul + qadd;
1352 if (code >= rl->last) run += 192;
1355 rl->rl_vlc[q][i].len = len;
1356 rl->rl_vlc[q][i].level = level;
1357 rl->rl_vlc[q][i].run = run;
1362 static void release_unused_pictures(MpegEncContext *s)
1366 /* release non reference frames */
1367 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1368 if (!s->picture[i].reference)
1369 ff_mpeg_unref_picture(s, &s->picture[i]);
1373 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1375 if (pic == s->last_picture_ptr)
1377 if (pic->f.buf[0] == NULL)
1379 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1384 static int find_unused_picture(MpegEncContext *s, int shared)
1389 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1390 if (s->picture[i].f.buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1394 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1395 if (pic_is_unused(s, &s->picture[i]))
1400 av_log(s->avctx, AV_LOG_FATAL,
1401 "Internal error, picture buffer overflow\n");
1402 /* We could return -1, but the codec would crash trying to draw into a
1403 * non-existing frame anyway. This is safer than waiting for a random crash.
1404 * Also the return of this is never useful, an encoder must only allocate
1405 * as much as allowed in the specification. This has no relationship to how
1406 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1407 * enough for such valid streams).
1408 * Plus, a decoder has to check stream validity and remove frames if too
1409 * many reference frames are around. Waiting for "OOM" is not correct at
1410 * all. Similarly, missing reference frames have to be replaced by
1411 * interpolated/MC frames, anything else is a bug in the codec ...
1417 int ff_find_unused_picture(MpegEncContext *s, int shared)
1419 int ret = find_unused_picture(s, shared);
1421 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1422 if (s->picture[ret].needs_realloc) {
1423 s->picture[ret].needs_realloc = 0;
1424 ff_free_picture_tables(&s->picture[ret]);
1425 ff_mpeg_unref_picture(s, &s->picture[ret]);
1426 avcodec_get_frame_defaults(&s->picture[ret].f);
1432 static void update_noise_reduction(MpegEncContext *s)
1436 for (intra = 0; intra < 2; intra++) {
1437 if (s->dct_count[intra] > (1 << 16)) {
1438 for (i = 0; i < 64; i++) {
1439 s->dct_error_sum[intra][i] >>= 1;
1441 s->dct_count[intra] >>= 1;
1444 for (i = 0; i < 64; i++) {
1445 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1446 s->dct_count[intra] +
1447 s->dct_error_sum[intra][i] / 2) /
1448 (s->dct_error_sum[intra][i] + 1);
1454 * generic function for encode/decode called after coding/decoding
1455 * the header and before a frame is coded/decoded.
1457 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1463 if (!ff_thread_can_start_frame(avctx)) {
1464 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1468 /* mark & release old frames */
1469 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1470 s->last_picture_ptr != s->next_picture_ptr &&
1471 s->last_picture_ptr->f.buf[0]) {
1472 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1475 /* release forgotten pictures */
1476 /* if (mpeg124/h263) */
1478 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1479 if (&s->picture[i] != s->last_picture_ptr &&
1480 &s->picture[i] != s->next_picture_ptr &&
1481 s->picture[i].reference && !s->picture[i].needs_realloc) {
1482 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1483 av_log(avctx, AV_LOG_ERROR,
1484 "releasing zombie picture\n");
1485 ff_mpeg_unref_picture(s, &s->picture[i]);
1490 ff_mpeg_unref_picture(s, &s->current_picture);
1493 release_unused_pictures(s);
1495 if (s->current_picture_ptr &&
1496 s->current_picture_ptr->f.buf[0] == NULL) {
1497 // we already have a unused image
1498 // (maybe it was set before reading the header)
1499 pic = s->current_picture_ptr;
1501 i = ff_find_unused_picture(s, 0);
1503 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1506 pic = &s->picture[i];
1510 if (!s->droppable) {
1511 if (s->pict_type != AV_PICTURE_TYPE_B)
1515 pic->f.coded_picture_number = s->coded_picture_number++;
1517 if (ff_alloc_picture(s, pic, 0) < 0)
1520 s->current_picture_ptr = pic;
1521 // FIXME use only the vars from current_pic
1522 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1523 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1524 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1525 if (s->picture_structure != PICT_FRAME)
1526 s->current_picture_ptr->f.top_field_first =
1527 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1529 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1530 !s->progressive_sequence;
1531 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1534 s->current_picture_ptr->f.pict_type = s->pict_type;
1535 // if (s->flags && CODEC_FLAG_QSCALE)
1536 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1537 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1539 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1540 s->current_picture_ptr)) < 0)
1543 if (s->pict_type != AV_PICTURE_TYPE_B) {
1544 s->last_picture_ptr = s->next_picture_ptr;
1546 s->next_picture_ptr = s->current_picture_ptr;
1548 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1549 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1550 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1551 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1552 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1553 s->pict_type, s->droppable);
1555 if ((s->last_picture_ptr == NULL ||
1556 s->last_picture_ptr->f.buf[0] == NULL) &&
1557 (s->pict_type != AV_PICTURE_TYPE_I ||
1558 s->picture_structure != PICT_FRAME)) {
1559 int h_chroma_shift, v_chroma_shift;
1560 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1561 &h_chroma_shift, &v_chroma_shift);
1562 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.buf[0])
1563 av_log(avctx, AV_LOG_DEBUG,
1564 "allocating dummy last picture for B frame\n");
1565 else if (s->pict_type != AV_PICTURE_TYPE_I)
1566 av_log(avctx, AV_LOG_ERROR,
1567 "warning: first frame is no keyframe\n");
1568 else if (s->picture_structure != PICT_FRAME)
1569 av_log(avctx, AV_LOG_DEBUG,
1570 "allocate dummy last picture for field based first keyframe\n");
1572 /* Allocate a dummy frame */
1573 i = ff_find_unused_picture(s, 0);
1575 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1578 s->last_picture_ptr = &s->picture[i];
1579 s->last_picture_ptr->f.key_frame = 0;
1580 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1581 s->last_picture_ptr = NULL;
1585 memset(s->last_picture_ptr->f.data[0], 0x80,
1586 avctx->height * s->last_picture_ptr->f.linesize[0]);
1587 memset(s->last_picture_ptr->f.data[1], 0x80,
1588 (avctx->height >> v_chroma_shift) *
1589 s->last_picture_ptr->f.linesize[1]);
1590 memset(s->last_picture_ptr->f.data[2], 0x80,
1591 (avctx->height >> v_chroma_shift) *
1592 s->last_picture_ptr->f.linesize[2]);
1594 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1595 for(i=0; i<avctx->height; i++)
1596 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1599 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1600 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1602 if ((s->next_picture_ptr == NULL ||
1603 s->next_picture_ptr->f.buf[0] == NULL) &&
1604 s->pict_type == AV_PICTURE_TYPE_B) {
1605 /* Allocate a dummy frame */
1606 i = ff_find_unused_picture(s, 0);
1608 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1611 s->next_picture_ptr = &s->picture[i];
1612 s->next_picture_ptr->f.key_frame = 0;
1613 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1614 s->next_picture_ptr = NULL;
1617 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1618 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1621 #if 0 // BUFREF-FIXME
1622 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1623 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1625 if (s->last_picture_ptr) {
1626 ff_mpeg_unref_picture(s, &s->last_picture);
1627 if (s->last_picture_ptr->f.buf[0] &&
1628 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1629 s->last_picture_ptr)) < 0)
1632 if (s->next_picture_ptr) {
1633 ff_mpeg_unref_picture(s, &s->next_picture);
1634 if (s->next_picture_ptr->f.buf[0] &&
1635 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1636 s->next_picture_ptr)) < 0)
1640 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1641 s->last_picture_ptr->f.buf[0]));
1643 if (s->picture_structure!= PICT_FRAME) {
1645 for (i = 0; i < 4; i++) {
1646 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1647 s->current_picture.f.data[i] +=
1648 s->current_picture.f.linesize[i];
1650 s->current_picture.f.linesize[i] *= 2;
1651 s->last_picture.f.linesize[i] *= 2;
1652 s->next_picture.f.linesize[i] *= 2;
1656 s->err_recognition = avctx->err_recognition;
1658 /* set dequantizer, we can't do it during init as
1659 * it might change for mpeg4 and we can't do it in the header
1660 * decode as init is not called for mpeg4 there yet */
1661 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1662 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1663 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1664 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1665 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1666 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1668 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1669 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1672 if (s->dct_error_sum) {
1673 av_assert2(s->avctx->noise_reduction && s->encoding);
1674 update_noise_reduction(s);
1678 FF_DISABLE_DEPRECATION_WARNINGS
1679 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1680 return ff_xvmc_field_start(s, avctx);
1681 FF_ENABLE_DEPRECATION_WARNINGS
1682 #endif /* FF_API_XVMC */
1687 /* called after a frame has been decoded. */
1688 void ff_MPV_frame_end(MpegEncContext *s)
1691 FF_DISABLE_DEPRECATION_WARNINGS
1692 /* redraw edges for the frame if decoding didn't complete */
1693 // just to make sure that all data is rendered.
1694 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1695 ff_xvmc_field_end(s);
1697 FF_ENABLE_DEPRECATION_WARNINGS
1698 #endif /* FF_API_XVMC */
1699 if ((s->er.error_count || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1700 !s->avctx->hwaccel &&
1701 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1702 s->unrestricted_mv &&
1703 s->current_picture.reference &&
1705 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1708 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1709 int hshift = desc->log2_chroma_w;
1710 int vshift = desc->log2_chroma_h;
1711 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1712 s->h_edge_pos, s->v_edge_pos,
1713 EDGE_WIDTH, EDGE_WIDTH,
1714 EDGE_TOP | EDGE_BOTTOM);
1715 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1716 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1717 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1718 EDGE_TOP | EDGE_BOTTOM);
1719 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1720 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1721 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1722 EDGE_TOP | EDGE_BOTTOM);
1727 if (s->current_picture.reference)
1728 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1732 * Draw a line from (ex, ey) -> (sx, sy).
1733 * @param w width of the image
1734 * @param h height of the image
1735 * @param stride stride/linesize of the image
1736 * @param color color of the arrow
1738 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1739 int w, int h, int stride, int color)
1743 sx = av_clip(sx, 0, w - 1);
1744 sy = av_clip(sy, 0, h - 1);
1745 ex = av_clip(ex, 0, w - 1);
1746 ey = av_clip(ey, 0, h - 1);
1748 buf[sy * stride + sx] += color;
1750 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1752 FFSWAP(int, sx, ex);
1753 FFSWAP(int, sy, ey);
1755 buf += sx + sy * stride;
1757 f = ((ey - sy) << 16) / ex;
1758 for (x = 0; x <= ex; x++) {
1760 fr = (x * f) & 0xFFFF;
1761 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1762 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1766 FFSWAP(int, sx, ex);
1767 FFSWAP(int, sy, ey);
1769 buf += sx + sy * stride;
1772 f = ((ex - sx) << 16) / ey;
1775 for(y= 0; y <= ey; y++){
1777 fr = (y*f) & 0xFFFF;
1778 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1779 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1785 * Draw an arrow from (ex, ey) -> (sx, sy).
1786 * @param w width of the image
1787 * @param h height of the image
1788 * @param stride stride/linesize of the image
1789 * @param color color of the arrow
1791 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1792 int ey, int w, int h, int stride, int color)
1796 sx = av_clip(sx, -100, w + 100);
1797 sy = av_clip(sy, -100, h + 100);
1798 ex = av_clip(ex, -100, w + 100);
1799 ey = av_clip(ey, -100, h + 100);
1804 if (dx * dx + dy * dy > 3 * 3) {
1807 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1809 // FIXME subpixel accuracy
1810 rx = ROUNDED_DIV(rx * 3 << 4, length);
1811 ry = ROUNDED_DIV(ry * 3 << 4, length);
1813 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1814 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1816 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1820 * Print debugging info for the given picture.
1822 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1824 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1826 if (avctx->hwaccel || !p || !p->mb_type
1827 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1831 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1834 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1835 av_get_picture_type_char(pict->pict_type));
1836 for (y = 0; y < mb_height; y++) {
1837 for (x = 0; x < mb_width; x++) {
1838 if (avctx->debug & FF_DEBUG_SKIP) {
1839 int count = mbskip_table[x + y * mb_stride];
1842 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1844 if (avctx->debug & FF_DEBUG_QP) {
1845 av_log(avctx, AV_LOG_DEBUG, "%2d",
1846 p->qscale_table[x + y * mb_stride]);
1848 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1849 int mb_type = p->mb_type[x + y * mb_stride];
1850 // Type & MV direction
1851 if (IS_PCM(mb_type))
1852 av_log(avctx, AV_LOG_DEBUG, "P");
1853 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1854 av_log(avctx, AV_LOG_DEBUG, "A");
1855 else if (IS_INTRA4x4(mb_type))
1856 av_log(avctx, AV_LOG_DEBUG, "i");
1857 else if (IS_INTRA16x16(mb_type))
1858 av_log(avctx, AV_LOG_DEBUG, "I");
1859 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1860 av_log(avctx, AV_LOG_DEBUG, "d");
1861 else if (IS_DIRECT(mb_type))
1862 av_log(avctx, AV_LOG_DEBUG, "D");
1863 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1864 av_log(avctx, AV_LOG_DEBUG, "g");
1865 else if (IS_GMC(mb_type))
1866 av_log(avctx, AV_LOG_DEBUG, "G");
1867 else if (IS_SKIP(mb_type))
1868 av_log(avctx, AV_LOG_DEBUG, "S");
1869 else if (!USES_LIST(mb_type, 1))
1870 av_log(avctx, AV_LOG_DEBUG, ">");
1871 else if (!USES_LIST(mb_type, 0))
1872 av_log(avctx, AV_LOG_DEBUG, "<");
1874 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1875 av_log(avctx, AV_LOG_DEBUG, "X");
1879 if (IS_8X8(mb_type))
1880 av_log(avctx, AV_LOG_DEBUG, "+");
1881 else if (IS_16X8(mb_type))
1882 av_log(avctx, AV_LOG_DEBUG, "-");
1883 else if (IS_8X16(mb_type))
1884 av_log(avctx, AV_LOG_DEBUG, "|");
1885 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1886 av_log(avctx, AV_LOG_DEBUG, " ");
1888 av_log(avctx, AV_LOG_DEBUG, "?");
1891 if (IS_INTERLACED(mb_type))
1892 av_log(avctx, AV_LOG_DEBUG, "=");
1894 av_log(avctx, AV_LOG_DEBUG, " ");
1897 av_log(avctx, AV_LOG_DEBUG, "\n");
1901 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1902 (avctx->debug_mv)) {
1903 const int shift = 1 + quarter_sample;
1907 int h_chroma_shift, v_chroma_shift, block_height;
1908 const int width = avctx->width;
1909 const int height = avctx->height;
1910 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1911 const int mv_stride = (mb_width << mv_sample_log2) +
1912 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1914 *low_delay = 0; // needed to see the vectors without trashing the buffers
1916 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1918 av_frame_make_writable(pict);
1920 pict->opaque = NULL;
1921 ptr = pict->data[0];
1922 block_height = 16 >> v_chroma_shift;
1924 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1926 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1927 const int mb_index = mb_x + mb_y * mb_stride;
1928 if ((avctx->debug_mv) && p->motion_val[0]) {
1930 for (type = 0; type < 3; type++) {
1934 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1935 (pict->pict_type!= AV_PICTURE_TYPE_P))
1940 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1941 (pict->pict_type!= AV_PICTURE_TYPE_B))
1946 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1947 (pict->pict_type!= AV_PICTURE_TYPE_B))
1952 if (!USES_LIST(p->mb_type[mb_index], direction))
1955 if (IS_8X8(p->mb_type[mb_index])) {
1957 for (i = 0; i < 4; i++) {
1958 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1959 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1960 int xy = (mb_x * 2 + (i & 1) +
1961 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1962 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
1963 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
1964 draw_arrow(ptr, sx, sy, mx, my, width,
1965 height, pict->linesize[0], 100);
1967 } else if (IS_16X8(p->mb_type[mb_index])) {
1969 for (i = 0; i < 2; i++) {
1970 int sx = mb_x * 16 + 8;
1971 int sy = mb_y * 16 + 4 + 8 * i;
1972 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1973 int mx = (p->motion_val[direction][xy][0] >> shift);
1974 int my = (p->motion_val[direction][xy][1] >> shift);
1976 if (IS_INTERLACED(p->mb_type[mb_index]))
1979 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1980 height, pict->linesize[0], 100);
1982 } else if (IS_8X16(p->mb_type[mb_index])) {
1984 for (i = 0; i < 2; i++) {
1985 int sx = mb_x * 16 + 4 + 8 * i;
1986 int sy = mb_y * 16 + 8;
1987 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1988 int mx = p->motion_val[direction][xy][0] >> shift;
1989 int my = p->motion_val[direction][xy][1] >> shift;
1991 if (IS_INTERLACED(p->mb_type[mb_index]))
1994 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1995 height, pict->linesize[0], 100);
1998 int sx= mb_x * 16 + 8;
1999 int sy= mb_y * 16 + 8;
2000 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2001 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2002 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2003 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2007 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2008 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2009 0x0101010101010101ULL;
2011 for (y = 0; y < block_height; y++) {
2012 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2013 (block_height * mb_y + y) *
2014 pict->linesize[1]) = c;
2015 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2016 (block_height * mb_y + y) *
2017 pict->linesize[2]) = c;
2020 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2022 int mb_type = p->mb_type[mb_index];
2025 #define COLOR(theta, r) \
2026 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2027 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2031 if (IS_PCM(mb_type)) {
2033 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2034 IS_INTRA16x16(mb_type)) {
2036 } else if (IS_INTRA4x4(mb_type)) {
2038 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2040 } else if (IS_DIRECT(mb_type)) {
2042 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2044 } else if (IS_GMC(mb_type)) {
2046 } else if (IS_SKIP(mb_type)) {
2048 } else if (!USES_LIST(mb_type, 1)) {
2050 } else if (!USES_LIST(mb_type, 0)) {
2053 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2057 u *= 0x0101010101010101ULL;
2058 v *= 0x0101010101010101ULL;
2059 for (y = 0; y < block_height; y++) {
2060 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2061 (block_height * mb_y + y) * pict->linesize[1]) = u;
2062 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2063 (block_height * mb_y + y) * pict->linesize[2]) = v;
2067 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2068 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2069 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2070 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2071 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2073 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2074 for (y = 0; y < 16; y++)
2075 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2076 pict->linesize[0]] ^= 0x80;
2078 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2079 int dm = 1 << (mv_sample_log2 - 2);
2080 for (i = 0; i < 4; i++) {
2081 int sx = mb_x * 16 + 8 * (i & 1);
2082 int sy = mb_y * 16 + 8 * (i >> 1);
2083 int xy = (mb_x * 2 + (i & 1) +
2084 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2086 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2087 if (mv[0] != mv[dm] ||
2088 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2089 for (y = 0; y < 8; y++)
2090 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2091 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2092 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2093 pict->linesize[0]) ^= 0x8080808080808080ULL;
2097 if (IS_INTERLACED(mb_type) &&
2098 avctx->codec->id == AV_CODEC_ID_H264) {
2102 mbskip_table[mb_index] = 0;
2108 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2110 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2111 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2114 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2116 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2117 int offset = 2*s->mb_stride + 1;
2119 return AVERROR(ENOMEM);
2120 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2121 ref->size -= offset;
2122 ref->data += offset;
2123 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2126 static inline int hpel_motion_lowres(MpegEncContext *s,
2127 uint8_t *dest, uint8_t *src,
2128 int field_based, int field_select,
2129 int src_x, int src_y,
2130 int width, int height, ptrdiff_t stride,
2131 int h_edge_pos, int v_edge_pos,
2132 int w, int h, h264_chroma_mc_func *pix_op,
2133 int motion_x, int motion_y)
2135 const int lowres = s->avctx->lowres;
2136 const int op_index = FFMIN(lowres, 3);
2137 const int s_mask = (2 << lowres) - 1;
2141 if (s->quarter_sample) {
2146 sx = motion_x & s_mask;
2147 sy = motion_y & s_mask;
2148 src_x += motion_x >> lowres + 1;
2149 src_y += motion_y >> lowres + 1;
2151 src += src_y * stride + src_x;
2153 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2154 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2155 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2156 s->linesize, s->linesize,
2157 w + 1, (h + 1) << field_based,
2158 src_x, src_y << field_based,
2159 h_edge_pos, v_edge_pos);
2160 src = s->edge_emu_buffer;
2164 sx = (sx << 2) >> lowres;
2165 sy = (sy << 2) >> lowres;
2168 pix_op[op_index](dest, src, stride, h, sx, sy);
2172 /* apply one mpeg motion vector to the three components */
2173 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2180 uint8_t **ref_picture,
2181 h264_chroma_mc_func *pix_op,
2182 int motion_x, int motion_y,
2185 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2186 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2187 ptrdiff_t uvlinesize, linesize;
2188 const int lowres = s->avctx->lowres;
2189 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2190 const int block_s = 8>>lowres;
2191 const int s_mask = (2 << lowres) - 1;
2192 const int h_edge_pos = s->h_edge_pos >> lowres;
2193 const int v_edge_pos = s->v_edge_pos >> lowres;
2194 linesize = s->current_picture.f.linesize[0] << field_based;
2195 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2197 // FIXME obviously not perfect but qpel will not work in lowres anyway
2198 if (s->quarter_sample) {
2204 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2207 sx = motion_x & s_mask;
2208 sy = motion_y & s_mask;
2209 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2210 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2212 if (s->out_format == FMT_H263) {
2213 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2214 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2215 uvsrc_x = src_x >> 1;
2216 uvsrc_y = src_y >> 1;
2217 } else if (s->out_format == FMT_H261) {
2218 // even chroma mv's are full pel in H261
2221 uvsx = (2 * mx) & s_mask;
2222 uvsy = (2 * my) & s_mask;
2223 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2224 uvsrc_y = mb_y * block_s + (my >> lowres);
2226 if(s->chroma_y_shift){
2231 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2232 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2234 if(s->chroma_x_shift){
2238 uvsy = motion_y & s_mask;
2240 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2243 uvsx = motion_x & s_mask;
2244 uvsy = motion_y & s_mask;
2251 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2252 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2253 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2255 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2256 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2257 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2258 linesize >> field_based, linesize >> field_based,
2259 17, 17 + field_based,
2260 src_x, src_y << field_based, h_edge_pos,
2262 ptr_y = s->edge_emu_buffer;
2263 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2264 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2265 s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
2266 uvlinesize >> field_based, uvlinesize >> field_based,
2268 uvsrc_x, uvsrc_y << field_based,
2269 h_edge_pos >> 1, v_edge_pos >> 1);
2270 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
2271 uvlinesize >> field_based,uvlinesize >> field_based,
2273 uvsrc_x, uvsrc_y << field_based,
2274 h_edge_pos >> 1, v_edge_pos >> 1);
2276 ptr_cr = uvbuf + 16;
2280 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2282 dest_y += s->linesize;
2283 dest_cb += s->uvlinesize;
2284 dest_cr += s->uvlinesize;
2288 ptr_y += s->linesize;
2289 ptr_cb += s->uvlinesize;
2290 ptr_cr += s->uvlinesize;
2293 sx = (sx << 2) >> lowres;
2294 sy = (sy << 2) >> lowres;
2295 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2297 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2298 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2299 uvsx = (uvsx << 2) >> lowres;
2300 uvsy = (uvsy << 2) >> lowres;
2302 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2303 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2306 // FIXME h261 lowres loop filter
2309 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2310 uint8_t *dest_cb, uint8_t *dest_cr,
2311 uint8_t **ref_picture,
2312 h264_chroma_mc_func * pix_op,
2315 const int lowres = s->avctx->lowres;
2316 const int op_index = FFMIN(lowres, 3);
2317 const int block_s = 8 >> lowres;
2318 const int s_mask = (2 << lowres) - 1;
2319 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2320 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2321 int emu = 0, src_x, src_y, sx, sy;
2325 if (s->quarter_sample) {
2330 /* In case of 8X8, we construct a single chroma motion vector
2331 with a special rounding */
2332 mx = ff_h263_round_chroma(mx);
2333 my = ff_h263_round_chroma(my);
2337 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2338 src_y = s->mb_y * block_s + (my >> lowres + 1);
2340 offset = src_y * s->uvlinesize + src_x;
2341 ptr = ref_picture[1] + offset;
2342 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2343 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2344 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2345 s->uvlinesize, s->uvlinesize,
2347 src_x, src_y, h_edge_pos, v_edge_pos);
2348 ptr = s->edge_emu_buffer;
2351 sx = (sx << 2) >> lowres;
2352 sy = (sy << 2) >> lowres;
2353 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2355 ptr = ref_picture[2] + offset;
2357 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2358 s->uvlinesize, s->uvlinesize,
2360 src_x, src_y, h_edge_pos, v_edge_pos);
2361 ptr = s->edge_emu_buffer;
2363 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2367 * motion compensation of a single macroblock
2369 * @param dest_y luma destination pointer
2370 * @param dest_cb chroma cb/u destination pointer
2371 * @param dest_cr chroma cr/v destination pointer
2372 * @param dir direction (0->forward, 1->backward)
2373 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2374 * @param pix_op halfpel motion compensation function (average or put normally)
2375 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2377 static inline void MPV_motion_lowres(MpegEncContext *s,
2378 uint8_t *dest_y, uint8_t *dest_cb,
2380 int dir, uint8_t **ref_picture,
2381 h264_chroma_mc_func *pix_op)
2385 const int lowres = s->avctx->lowres;
2386 const int block_s = 8 >>lowres;
2391 switch (s->mv_type) {
2393 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2395 ref_picture, pix_op,
2396 s->mv[dir][0][0], s->mv[dir][0][1],
2402 for (i = 0; i < 4; i++) {
2403 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2404 s->linesize) * block_s,
2405 ref_picture[0], 0, 0,
2406 (2 * mb_x + (i & 1)) * block_s,
2407 (2 * mb_y + (i >> 1)) * block_s,
2408 s->width, s->height, s->linesize,
2409 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2410 block_s, block_s, pix_op,
2411 s->mv[dir][i][0], s->mv[dir][i][1]);
2413 mx += s->mv[dir][i][0];
2414 my += s->mv[dir][i][1];
2417 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2418 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2422 if (s->picture_structure == PICT_FRAME) {
2424 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2425 1, 0, s->field_select[dir][0],
2426 ref_picture, pix_op,
2427 s->mv[dir][0][0], s->mv[dir][0][1],
2430 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2431 1, 1, s->field_select[dir][1],
2432 ref_picture, pix_op,
2433 s->mv[dir][1][0], s->mv[dir][1][1],
2436 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2437 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2438 ref_picture = s->current_picture_ptr->f.data;
2441 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2442 0, 0, s->field_select[dir][0],
2443 ref_picture, pix_op,
2445 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2449 for (i = 0; i < 2; i++) {
2450 uint8_t **ref2picture;
2452 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2453 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2454 ref2picture = ref_picture;
2456 ref2picture = s->current_picture_ptr->f.data;
2459 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2460 0, 0, s->field_select[dir][i],
2461 ref2picture, pix_op,
2462 s->mv[dir][i][0], s->mv[dir][i][1] +
2463 2 * block_s * i, block_s, mb_y >> 1);
2465 dest_y += 2 * block_s * s->linesize;
2466 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2467 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2471 if (s->picture_structure == PICT_FRAME) {
2472 for (i = 0; i < 2; i++) {
2474 for (j = 0; j < 2; j++) {
2475 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2477 ref_picture, pix_op,
2478 s->mv[dir][2 * i + j][0],
2479 s->mv[dir][2 * i + j][1],
2482 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2485 for (i = 0; i < 2; i++) {
2486 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2487 0, 0, s->picture_structure != i + 1,
2488 ref_picture, pix_op,
2489 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2490 2 * block_s, mb_y >> 1);
2492 // after put we make avg of the same block
2493 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2495 // opposite parity is always in the same
2496 // frame if this is second field
2497 if (!s->first_field) {
2498 ref_picture = s->current_picture_ptr->f.data;
2509 * find the lowest MB row referenced in the MVs
2511 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2513 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2514 int my, off, i, mvs;
2516 if (s->picture_structure != PICT_FRAME || s->mcsel)
2519 switch (s->mv_type) {
2533 for (i = 0; i < mvs; i++) {
2534 my = s->mv[dir][i][1]<<qpel_shift;
2535 my_max = FFMAX(my_max, my);
2536 my_min = FFMIN(my_min, my);
2539 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2541 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2543 return s->mb_height-1;
2546 /* put block[] to dest[] */
2547 static inline void put_dct(MpegEncContext *s,
2548 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2550 s->dct_unquantize_intra(s, block, i, qscale);
2551 s->dsp.idct_put (dest, line_size, block);
2554 /* add block[] to dest[] */
2555 static inline void add_dct(MpegEncContext *s,
2556 int16_t *block, int i, uint8_t *dest, int line_size)
2558 if (s->block_last_index[i] >= 0) {
2559 s->dsp.idct_add (dest, line_size, block);
2563 static inline void add_dequant_dct(MpegEncContext *s,
2564 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2566 if (s->block_last_index[i] >= 0) {
2567 s->dct_unquantize_inter(s, block, i, qscale);
2569 s->dsp.idct_add (dest, line_size, block);
2574 * Clean dc, ac, coded_block for the current non-intra MB.
2576 void ff_clean_intra_table_entries(MpegEncContext *s)
2578 int wrap = s->b8_stride;
2579 int xy = s->block_index[0];
2582 s->dc_val[0][xy + 1 ] =
2583 s->dc_val[0][xy + wrap] =
2584 s->dc_val[0][xy + 1 + wrap] = 1024;
2586 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2587 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2588 if (s->msmpeg4_version>=3) {
2589 s->coded_block[xy ] =
2590 s->coded_block[xy + 1 ] =
2591 s->coded_block[xy + wrap] =
2592 s->coded_block[xy + 1 + wrap] = 0;
2595 wrap = s->mb_stride;
2596 xy = s->mb_x + s->mb_y * wrap;
2598 s->dc_val[2][xy] = 1024;
2600 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2601 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2603 s->mbintra_table[xy]= 0;
2606 /* generic function called after a macroblock has been parsed by the
2607 decoder or after it has been encoded by the encoder.
2609 Important variables used:
2610 s->mb_intra : true if intra macroblock
2611 s->mv_dir : motion vector direction
2612 s->mv_type : motion vector type
2613 s->mv : motion vector
2614 s->interlaced_dct : true if interlaced dct used (mpeg2)
2616 static av_always_inline
2617 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2618 int lowres_flag, int is_mpeg12)
2620 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2623 FF_DISABLE_DEPRECATION_WARNINGS
2624 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2625 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2628 FF_ENABLE_DEPRECATION_WARNINGS
2629 #endif /* FF_API_XVMC */
2631 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2632 /* print DCT coefficients */
2634 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2636 for(j=0; j<64; j++){
2637 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2639 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2643 s->current_picture.qscale_table[mb_xy] = s->qscale;
2645 /* update DC predictors for P macroblocks */
2647 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2648 if(s->mbintra_table[mb_xy])
2649 ff_clean_intra_table_entries(s);
2653 s->last_dc[2] = 128 << s->intra_dc_precision;
2656 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2657 s->mbintra_table[mb_xy]=1;
2659 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2660 uint8_t *dest_y, *dest_cb, *dest_cr;
2661 int dct_linesize, dct_offset;
2662 op_pixels_func (*op_pix)[4];
2663 qpel_mc_func (*op_qpix)[16];
2664 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2665 const int uvlinesize = s->current_picture.f.linesize[1];
2666 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2667 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2669 /* avoid copy if macroblock skipped in last frame too */
2670 /* skip only during decoding as we might trash the buffers during encoding a bit */
2672 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2674 if (s->mb_skipped) {
2676 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2678 } else if(!s->current_picture.reference) {
2681 *mbskip_ptr = 0; /* not skipped */
2685 dct_linesize = linesize << s->interlaced_dct;
2686 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2690 dest_cb= s->dest[1];
2691 dest_cr= s->dest[2];
2693 dest_y = s->b_scratchpad;
2694 dest_cb= s->b_scratchpad+16*linesize;
2695 dest_cr= s->b_scratchpad+32*linesize;
2699 /* motion handling */
2700 /* decoding or more than one mb_type (MC was already done otherwise) */
2703 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2704 if (s->mv_dir & MV_DIR_FORWARD) {
2705 ff_thread_await_progress(&s->last_picture_ptr->tf,
2706 ff_MPV_lowest_referenced_row(s, 0),
2709 if (s->mv_dir & MV_DIR_BACKWARD) {
2710 ff_thread_await_progress(&s->next_picture_ptr->tf,
2711 ff_MPV_lowest_referenced_row(s, 1),
2717 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2719 if (s->mv_dir & MV_DIR_FORWARD) {
2720 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2721 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2723 if (s->mv_dir & MV_DIR_BACKWARD) {
2724 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2727 op_qpix = s->me.qpel_put;
2728 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2729 op_pix = s->hdsp.put_pixels_tab;
2731 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2733 if (s->mv_dir & MV_DIR_FORWARD) {
2734 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2735 op_pix = s->hdsp.avg_pixels_tab;
2736 op_qpix= s->me.qpel_avg;
2738 if (s->mv_dir & MV_DIR_BACKWARD) {
2739 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2744 /* skip dequant / idct if we are really late ;) */
2745 if(s->avctx->skip_idct){
2746 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2747 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2748 || s->avctx->skip_idct >= AVDISCARD_ALL)
2752 /* add dct residue */
2753 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2754 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2755 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2756 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2757 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2758 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2760 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2761 if (s->chroma_y_shift){
2762 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2763 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2767 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2768 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2769 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2770 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2773 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2774 add_dct(s, block[0], 0, dest_y , dct_linesize);
2775 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2776 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2777 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2779 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2780 if(s->chroma_y_shift){//Chroma420
2781 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2782 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2785 dct_linesize = uvlinesize << s->interlaced_dct;
2786 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2788 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2789 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2790 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2791 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2792 if(!s->chroma_x_shift){//Chroma444
2793 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2794 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2795 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2796 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2801 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2802 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2805 /* dct only in intra block */
2806 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2807 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2808 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2809 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2810 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2812 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2813 if(s->chroma_y_shift){
2814 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2815 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2819 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2820 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2821 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2822 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2826 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2827 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2828 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2829 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2831 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2832 if(s->chroma_y_shift){
2833 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2834 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2837 dct_linesize = uvlinesize << s->interlaced_dct;
2838 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2840 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2841 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2842 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2843 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2844 if(!s->chroma_x_shift){//Chroma444
2845 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2846 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2847 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2848 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2856 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2857 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2858 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2863 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2865 if(s->out_format == FMT_MPEG1) {
2866 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2867 else MPV_decode_mb_internal(s, block, 0, 1);
2870 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2871 else MPV_decode_mb_internal(s, block, 0, 0);
2875 * @param h is the normal height, this will be reduced automatically if needed for the last row
2877 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2878 Picture *last, int y, int h, int picture_structure,
2879 int first_field, int draw_edges, int low_delay,
2880 int v_edge_pos, int h_edge_pos)
2882 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2883 int hshift = desc->log2_chroma_w;
2884 int vshift = desc->log2_chroma_h;
2885 const int field_pic = picture_structure != PICT_FRAME;
2891 if (!avctx->hwaccel &&
2892 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2895 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2896 int *linesize = cur->f.linesize;
2897 int sides = 0, edge_h;
2898 if (y==0) sides |= EDGE_TOP;
2899 if (y + h >= v_edge_pos)
2900 sides |= EDGE_BOTTOM;
2902 edge_h= FFMIN(h, v_edge_pos - y);
2904 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2905 linesize[0], h_edge_pos, edge_h,
2906 EDGE_WIDTH, EDGE_WIDTH, sides);
2907 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2908 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2909 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2910 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2911 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2912 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2915 h = FFMIN(h, avctx->height - y);
2917 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2919 if (avctx->draw_horiz_band) {
2921 int offset[AV_NUM_DATA_POINTERS];
2924 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2925 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2932 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2933 picture_structure == PICT_FRAME &&
2934 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2935 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2938 offset[0]= y * src->linesize[0];
2940 offset[2]= (y >> vshift) * src->linesize[1];
2941 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2947 avctx->draw_horiz_band(avctx, src, offset,
2948 y, picture_structure, h);
2952 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2954 int draw_edges = s->unrestricted_mv && !s->intra_only;
2955 ff_draw_horiz_band(s->avctx, &s->dsp, s->current_picture_ptr,
2956 s->last_picture_ptr, y, h, s->picture_structure,
2957 s->first_field, draw_edges, s->low_delay,
2958 s->v_edge_pos, s->h_edge_pos);
2961 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2962 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2963 const int uvlinesize = s->current_picture.f.linesize[1];
2964 const int mb_size= 4 - s->avctx->lowres;
2966 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2967 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2968 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2969 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2970 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2971 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2972 //block_index is not used by mpeg2, so it is not affected by chroma_format
2974 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2975 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2976 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2978 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2980 if(s->picture_structure==PICT_FRAME){
2981 s->dest[0] += s->mb_y * linesize << mb_size;
2982 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2983 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2985 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2986 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2987 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2988 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2994 * Permute an 8x8 block.
2995 * @param block the block which will be permuted according to the given permutation vector
2996 * @param permutation the permutation vector
2997 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2998 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2999 * (inverse) permutated to scantable order!
3001 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3007 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3009 for(i=0; i<=last; i++){
3010 const int j= scantable[i];
3015 for(i=0; i<=last; i++){
3016 const int j= scantable[i];
3017 const int perm_j= permutation[j];
3018 block[perm_j]= temp[j];
3022 void ff_mpeg_flush(AVCodecContext *avctx){
3024 MpegEncContext *s = avctx->priv_data;
3026 if(s==NULL || s->picture==NULL)
3029 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3030 ff_mpeg_unref_picture(s, &s->picture[i]);
3031 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3033 ff_mpeg_unref_picture(s, &s->current_picture);
3034 ff_mpeg_unref_picture(s, &s->last_picture);
3035 ff_mpeg_unref_picture(s, &s->next_picture);
3037 s->mb_x= s->mb_y= 0;
3040 s->parse_context.state= -1;
3041 s->parse_context.frame_start_found= 0;
3042 s->parse_context.overread= 0;
3043 s->parse_context.overread_index= 0;
3044 s->parse_context.index= 0;
3045 s->parse_context.last_index= 0;
3046 s->bitstream_buffer_size=0;
3050 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3051 int16_t *block, int n, int qscale)
3053 int i, level, nCoeffs;
3054 const uint16_t *quant_matrix;
3056 nCoeffs= s->block_last_index[n];
3058 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3059 /* XXX: only mpeg1 */
3060 quant_matrix = s->intra_matrix;
3061 for(i=1;i<=nCoeffs;i++) {
3062 int j= s->intra_scantable.permutated[i];
3067 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3068 level = (level - 1) | 1;
3071 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3072 level = (level - 1) | 1;
3079 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3080 int16_t *block, int n, int qscale)
3082 int i, level, nCoeffs;
3083 const uint16_t *quant_matrix;
3085 nCoeffs= s->block_last_index[n];
3087 quant_matrix = s->inter_matrix;
3088 for(i=0; i<=nCoeffs; i++) {
3089 int j= s->intra_scantable.permutated[i];
3094 level = (((level << 1) + 1) * qscale *
3095 ((int) (quant_matrix[j]))) >> 4;
3096 level = (level - 1) | 1;
3099 level = (((level << 1) + 1) * qscale *
3100 ((int) (quant_matrix[j]))) >> 4;
3101 level = (level - 1) | 1;
3108 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3109 int16_t *block, int n, int qscale)
3111 int i, level, nCoeffs;
3112 const uint16_t *quant_matrix;
3114 if(s->alternate_scan) nCoeffs= 63;
3115 else nCoeffs= s->block_last_index[n];
3117 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3118 quant_matrix = s->intra_matrix;
3119 for(i=1;i<=nCoeffs;i++) {
3120 int j= s->intra_scantable.permutated[i];
3125 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3128 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3135 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3136 int16_t *block, int n, int qscale)
3138 int i, level, nCoeffs;
3139 const uint16_t *quant_matrix;
3142 if(s->alternate_scan) nCoeffs= 63;
3143 else nCoeffs= s->block_last_index[n];
3145 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3147 quant_matrix = s->intra_matrix;
3148 for(i=1;i<=nCoeffs;i++) {
3149 int j= s->intra_scantable.permutated[i];
3154 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3157 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3166 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3167 int16_t *block, int n, int qscale)
3169 int i, level, nCoeffs;
3170 const uint16_t *quant_matrix;
3173 if(s->alternate_scan) nCoeffs= 63;
3174 else nCoeffs= s->block_last_index[n];
3176 quant_matrix = s->inter_matrix;
3177 for(i=0; i<=nCoeffs; i++) {
3178 int j= s->intra_scantable.permutated[i];
3183 level = (((level << 1) + 1) * qscale *
3184 ((int) (quant_matrix[j]))) >> 4;
3187 level = (((level << 1) + 1) * qscale *
3188 ((int) (quant_matrix[j]))) >> 4;
3197 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3198 int16_t *block, int n, int qscale)
3200 int i, level, qmul, qadd;
3203 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
3208 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3209 qadd = (qscale - 1) | 1;
3216 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3218 for(i=1; i<=nCoeffs; i++) {
3222 level = level * qmul - qadd;
3224 level = level * qmul + qadd;
3231 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3232 int16_t *block, int n, int qscale)
3234 int i, level, qmul, qadd;
3237 av_assert2(s->block_last_index[n]>=0);
3239 qadd = (qscale - 1) | 1;
3242 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3244 for(i=0; i<=nCoeffs; i++) {
3248 level = level * qmul - qadd;
3250 level = level * qmul + qadd;
3258 * set qscale and update qscale dependent variables.
3260 void ff_set_qscale(MpegEncContext * s, int qscale)
3264 else if (qscale > 31)
3268 s->chroma_qscale= s->chroma_qscale_table[qscale];
3270 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3271 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3274 void ff_MPV_report_decode_progress(MpegEncContext *s)
3276 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3277 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3280 #if CONFIG_ERROR_RESILIENCE
3281 void ff_mpeg_er_frame_start(MpegEncContext *s)
3283 ERContext *er = &s->er;
3285 er->cur_pic = s->current_picture_ptr;
3286 er->last_pic = s->last_picture_ptr;
3287 er->next_pic = s->next_picture_ptr;
3289 er->pp_time = s->pp_time;
3290 er->pb_time = s->pb_time;
3291 er->quarter_sample = s->quarter_sample;
3292 er->partitioned_frame = s->partitioned_frame;
3294 ff_er_frame_start(er);
3296 #endif /* CONFIG_ERROR_RESILIENCE */