2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
36 #include "h264chroma.h"
39 #include "mpegvideo.h"
42 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 int16_t *block, int n, int qscale);
61 static const uint8_t ff_default_chroma_qscale_table[32] = {
62 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
63 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
64 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
67 const uint8_t ff_mpeg1_dc_scale_table[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 static const uint8_t mpeg2_dc_scale_table1[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 static const uint8_t mpeg2_dc_scale_table2[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 static const uint8_t mpeg2_dc_scale_table3[128] = {
104 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
116 ff_mpeg1_dc_scale_table,
117 mpeg2_dc_scale_table1,
118 mpeg2_dc_scale_table2,
119 mpeg2_dc_scale_table3,
122 const enum AVPixelFormat ff_pixfmt_list_420[] = {
127 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
129 int mb_x, int mb_y, int mb_intra, int mb_skipped)
131 MpegEncContext *s = opaque;
134 s->mv_type = mv_type;
135 s->mb_intra = mb_intra;
136 s->mb_skipped = mb_skipped;
139 memcpy(s->mv, mv, sizeof(*mv));
141 ff_init_block_index(s);
142 ff_update_block_index(s);
144 s->dsp.clear_blocks(s->block[0]);
146 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
147 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
148 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
151 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
152 ff_MPV_decode_mb(s, s->block);
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 ff_dsputil_init(&s->dsp, s->avctx);
159 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
160 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
161 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
163 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
164 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
165 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
166 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
167 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
168 if (s->flags & CODEC_FLAG_BITEXACT)
169 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
170 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
173 ff_MPV_common_init_axp(s);
175 ff_MPV_common_init_arm(s);
177 ff_MPV_common_init_bfin(s);
179 ff_MPV_common_init_ppc(s);
181 ff_MPV_common_init_x86(s);
183 /* load & permutate scantables
184 * note: only wmv uses different ones
186 if (s->alternate_scan) {
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
201 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
203 // edge emu needs blocksize + filter length - 1
204 // (= 17x17 for halfpel / 21x21 for h264)
205 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
206 // at uvlinesize. It supports only YUV420 so 24x24 is enough
207 // linesize * interlaced * MBsize
208 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
211 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
213 s->me.temp = s->me.scratchpad;
214 s->rd_scratchpad = s->me.scratchpad;
215 s->b_scratchpad = s->me.scratchpad;
216 s->obmc_scratchpad = s->me.scratchpad + 16;
220 av_freep(&s->edge_emu_buffer);
221 return AVERROR(ENOMEM);
225 * Allocate a frame buffer
227 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
232 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
233 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
234 s->codec_id != AV_CODEC_ID_MSS2)
235 r = ff_thread_get_buffer(s->avctx, &pic->tf,
236 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
238 pic->f.width = s->avctx->width;
239 pic->f.height = s->avctx->height;
240 pic->f.format = s->avctx->pix_fmt;
241 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
244 if (r < 0 || !pic->f.buf[0]) {
245 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
250 if (s->avctx->hwaccel) {
251 assert(!pic->hwaccel_picture_private);
252 if (s->avctx->hwaccel->priv_data_size) {
253 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
254 if (!pic->hwaccel_priv_buf) {
255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
258 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
262 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
263 s->uvlinesize != pic->f.linesize[1])) {
264 av_log(s->avctx, AV_LOG_ERROR,
265 "get_buffer() failed (stride changed)\n");
266 ff_mpeg_unref_picture(s, pic);
270 if (pic->f.linesize[1] != pic->f.linesize[2]) {
271 av_log(s->avctx, AV_LOG_ERROR,
272 "get_buffer() failed (uv stride mismatch)\n");
273 ff_mpeg_unref_picture(s, pic);
277 if (!s->edge_emu_buffer &&
278 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
279 av_log(s->avctx, AV_LOG_ERROR,
280 "get_buffer() failed to allocate context scratch buffers.\n");
281 ff_mpeg_unref_picture(s, pic);
288 static void free_picture_tables(Picture *pic)
292 pic->alloc_mb_width =
293 pic->alloc_mb_height = 0;
295 av_buffer_unref(&pic->mb_var_buf);
296 av_buffer_unref(&pic->mc_mb_var_buf);
297 av_buffer_unref(&pic->mb_mean_buf);
298 av_buffer_unref(&pic->mbskip_table_buf);
299 av_buffer_unref(&pic->qscale_table_buf);
300 av_buffer_unref(&pic->mb_type_buf);
302 for (i = 0; i < 2; i++) {
303 av_buffer_unref(&pic->motion_val_buf[i]);
304 av_buffer_unref(&pic->ref_index_buf[i]);
308 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
310 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
311 const int mb_array_size = s->mb_stride * s->mb_height;
312 const int b8_array_size = s->b8_stride * s->mb_height * 2;
316 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
317 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
318 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
320 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
321 return AVERROR(ENOMEM);
324 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
326 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
327 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
328 return AVERROR(ENOMEM);
331 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
332 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
333 int ref_index_size = 4 * mb_array_size;
335 for (i = 0; mv_size && i < 2; i++) {
336 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
338 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339 return AVERROR(ENOMEM);
343 pic->alloc_mb_width = s->mb_width;
344 pic->alloc_mb_height = s->mb_height;
349 static int make_tables_writable(Picture *pic)
352 #define MAKE_WRITABLE(table) \
355 (ret = av_buffer_make_writable(&pic->table)) < 0)\
359 MAKE_WRITABLE(mb_var_buf);
360 MAKE_WRITABLE(mc_mb_var_buf);
361 MAKE_WRITABLE(mb_mean_buf);
362 MAKE_WRITABLE(mbskip_table_buf);
363 MAKE_WRITABLE(qscale_table_buf);
364 MAKE_WRITABLE(mb_type_buf);
366 for (i = 0; i < 2; i++) {
367 MAKE_WRITABLE(motion_val_buf[i]);
368 MAKE_WRITABLE(ref_index_buf[i]);
375 * Allocate a Picture.
376 * The pixels are allocated/set by calling get_buffer() if shared = 0
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
382 if (pic->qscale_table_buf)
383 if ( pic->alloc_mb_width != s->mb_width
384 || pic->alloc_mb_height != s->mb_height)
385 free_picture_tables(pic);
388 av_assert0(pic->f.data[0]);
391 av_assert0(!pic->f.buf[0]);
393 if (alloc_frame_buffer(s, pic) < 0)
396 s->linesize = pic->f.linesize[0];
397 s->uvlinesize = pic->f.linesize[1];
400 if (!pic->qscale_table_buf)
401 ret = alloc_picture_tables(s, pic);
403 ret = make_tables_writable(pic);
408 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
409 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410 pic->mb_mean = pic->mb_mean_buf->data;
413 pic->mbskip_table = pic->mbskip_table_buf->data;
414 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
417 if (pic->motion_val_buf[0]) {
418 for (i = 0; i < 2; i++) {
419 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420 pic->ref_index[i] = pic->ref_index_buf[i]->data;
426 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427 ff_mpeg_unref_picture(s, pic);
428 free_picture_tables(pic);
429 return AVERROR(ENOMEM);
433 * Deallocate a picture.
435 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
437 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
440 /* WM Image / Screen codecs allocate internal buffers with different
441 * dimensions / colorspaces; ignore user-defined callbacks for these. */
442 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
443 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
444 s->codec_id != AV_CODEC_ID_MSS2)
445 ff_thread_release_buffer(s->avctx, &pic->tf);
447 av_frame_unref(&pic->f);
449 av_buffer_unref(&pic->hwaccel_priv_buf);
451 if (pic->needs_realloc)
452 free_picture_tables(pic);
454 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
457 static int update_picture_tables(Picture *dst, Picture *src)
461 #define UPDATE_TABLE(table)\
464 (!dst->table || dst->table->buffer != src->table->buffer)) {\
465 av_buffer_unref(&dst->table);\
466 dst->table = av_buffer_ref(src->table);\
468 free_picture_tables(dst);\
469 return AVERROR(ENOMEM);\
474 UPDATE_TABLE(mb_var_buf);
475 UPDATE_TABLE(mc_mb_var_buf);
476 UPDATE_TABLE(mb_mean_buf);
477 UPDATE_TABLE(mbskip_table_buf);
478 UPDATE_TABLE(qscale_table_buf);
479 UPDATE_TABLE(mb_type_buf);
480 for (i = 0; i < 2; i++) {
481 UPDATE_TABLE(motion_val_buf[i]);
482 UPDATE_TABLE(ref_index_buf[i]);
485 dst->mb_var = src->mb_var;
486 dst->mc_mb_var = src->mc_mb_var;
487 dst->mb_mean = src->mb_mean;
488 dst->mbskip_table = src->mbskip_table;
489 dst->qscale_table = src->qscale_table;
490 dst->mb_type = src->mb_type;
491 for (i = 0; i < 2; i++) {
492 dst->motion_val[i] = src->motion_val[i];
493 dst->ref_index[i] = src->ref_index[i];
496 dst->alloc_mb_width = src->alloc_mb_width;
497 dst->alloc_mb_height = src->alloc_mb_height;
502 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
506 av_assert0(!dst->f.buf[0]);
507 av_assert0(src->f.buf[0]);
511 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
515 ret = update_picture_tables(dst, src);
519 if (src->hwaccel_picture_private) {
520 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
521 if (!dst->hwaccel_priv_buf)
523 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
526 dst->field_picture = src->field_picture;
527 dst->mb_var_sum = src->mb_var_sum;
528 dst->mc_mb_var_sum = src->mc_mb_var_sum;
529 dst->b_frame_score = src->b_frame_score;
530 dst->needs_realloc = src->needs_realloc;
531 dst->reference = src->reference;
532 dst->shared = src->shared;
536 ff_mpeg_unref_picture(s, dst);
540 static void exchange_uv(MpegEncContext *s)
545 s->pblocks[4] = s->pblocks[5];
549 static int init_duplicate_context(MpegEncContext *s)
551 int y_size = s->b8_stride * (2 * s->mb_height + 1);
552 int c_size = s->mb_stride * (s->mb_height + 1);
553 int yc_size = y_size + 2 * c_size;
561 s->obmc_scratchpad = NULL;
564 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
565 ME_MAP_SIZE * sizeof(uint32_t), fail)
566 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
567 ME_MAP_SIZE * sizeof(uint32_t), fail)
568 if (s->avctx->noise_reduction) {
569 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
570 2 * 64 * sizeof(int), fail)
573 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
574 s->block = s->blocks[0];
576 for (i = 0; i < 12; i++) {
577 s->pblocks[i] = &s->block[i];
579 if (s->avctx->codec_tag == AV_RL32("VCR2"))
582 if (s->out_format == FMT_H263) {
584 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
585 yc_size * sizeof(int16_t) * 16, fail);
586 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
587 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
588 s->ac_val[2] = s->ac_val[1] + c_size;
593 return -1; // free() through ff_MPV_common_end()
596 static void free_duplicate_context(MpegEncContext *s)
601 av_freep(&s->edge_emu_buffer);
602 av_freep(&s->me.scratchpad);
606 s->obmc_scratchpad = NULL;
608 av_freep(&s->dct_error_sum);
609 av_freep(&s->me.map);
610 av_freep(&s->me.score_map);
611 av_freep(&s->blocks);
612 av_freep(&s->ac_val_base);
616 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
618 #define COPY(a) bak->a = src->a
619 COPY(edge_emu_buffer);
624 COPY(obmc_scratchpad);
631 COPY(me.map_generation);
643 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
647 // FIXME copy only needed parts
649 backup_duplicate_context(&bak, dst);
650 memcpy(dst, src, sizeof(MpegEncContext));
651 backup_duplicate_context(dst, &bak);
652 for (i = 0; i < 12; i++) {
653 dst->pblocks[i] = &dst->block[i];
655 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
657 if (!dst->edge_emu_buffer &&
658 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
659 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
660 "scratch buffers.\n");
663 // STOP_TIMER("update_duplicate_context")
664 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
668 int ff_mpeg_update_thread_context(AVCodecContext *dst,
669 const AVCodecContext *src)
672 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
679 // FIXME can parameters change on I-frames?
680 // in that case dst may need a reinit
681 if (!s->context_initialized) {
682 memcpy(s, s1, sizeof(MpegEncContext));
685 s->bitstream_buffer = NULL;
686 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
688 if (s1->context_initialized){
689 // s->picture_range_start += MAX_PICTURE_COUNT;
690 // s->picture_range_end += MAX_PICTURE_COUNT;
691 if((ret = ff_MPV_common_init(s)) < 0){
692 memset(s, 0, sizeof(MpegEncContext));
699 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
700 s->context_reinit = 0;
701 s->height = s1->height;
702 s->width = s1->width;
703 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
707 s->avctx->coded_height = s1->avctx->coded_height;
708 s->avctx->coded_width = s1->avctx->coded_width;
709 s->avctx->width = s1->avctx->width;
710 s->avctx->height = s1->avctx->height;
712 s->coded_picture_number = s1->coded_picture_number;
713 s->picture_number = s1->picture_number;
714 s->input_picture_number = s1->input_picture_number;
716 av_assert0(!s->picture || s->picture != s1->picture);
718 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
719 ff_mpeg_unref_picture(s, &s->picture[i]);
720 if (s1->picture[i].f.buf[0] &&
721 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
725 #define UPDATE_PICTURE(pic)\
727 ff_mpeg_unref_picture(s, &s->pic);\
728 if (s1->pic.f.buf[0])\
729 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
731 ret = update_picture_tables(&s->pic, &s1->pic);\
736 UPDATE_PICTURE(current_picture);
737 UPDATE_PICTURE(last_picture);
738 UPDATE_PICTURE(next_picture);
740 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
741 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
742 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
744 // Error/bug resilience
745 s->next_p_frame_damaged = s1->next_p_frame_damaged;
746 s->workaround_bugs = s1->workaround_bugs;
747 s->padding_bug_score = s1->padding_bug_score;
750 memcpy(&s->last_time_base, &s1->last_time_base,
751 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
752 (char *) &s1->last_time_base);
755 s->max_b_frames = s1->max_b_frames;
756 s->low_delay = s1->low_delay;
757 s->droppable = s1->droppable;
759 // DivX handling (doesn't work)
760 s->divx_packed = s1->divx_packed;
762 if (s1->bitstream_buffer) {
763 if (s1->bitstream_buffer_size +
764 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
765 av_fast_malloc(&s->bitstream_buffer,
766 &s->allocated_bitstream_buffer_size,
767 s1->allocated_bitstream_buffer_size);
768 s->bitstream_buffer_size = s1->bitstream_buffer_size;
769 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
770 s1->bitstream_buffer_size);
771 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
772 FF_INPUT_BUFFER_PADDING_SIZE);
775 // linesize dependend scratch buffer allocation
776 if (!s->edge_emu_buffer)
778 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
779 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
780 "scratch buffers.\n");
781 return AVERROR(ENOMEM);
784 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
785 "be allocated due to unknown size.\n");
788 // MPEG2/interlacing info
789 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
790 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
792 if (!s1->first_field) {
793 s->last_pict_type = s1->pict_type;
794 if (s1->current_picture_ptr)
795 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
797 if (s1->pict_type != AV_PICTURE_TYPE_B) {
798 s->last_non_b_pict_type = s1->pict_type;
806 * Set the given MpegEncContext to common defaults
807 * (same for encoding and decoding).
808 * The changed fields will not depend upon the
809 * prior state of the MpegEncContext.
811 void ff_MPV_common_defaults(MpegEncContext *s)
813 s->y_dc_scale_table =
814 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
815 s->chroma_qscale_table = ff_default_chroma_qscale_table;
816 s->progressive_frame = 1;
817 s->progressive_sequence = 1;
818 s->picture_structure = PICT_FRAME;
820 s->coded_picture_number = 0;
821 s->picture_number = 0;
822 s->input_picture_number = 0;
824 s->picture_in_gop_number = 0;
829 s->slice_context_count = 1;
833 * Set the given MpegEncContext to defaults for decoding.
834 * the changed fields will not depend upon
835 * the prior state of the MpegEncContext.
837 void ff_MPV_decode_defaults(MpegEncContext *s)
839 ff_MPV_common_defaults(s);
842 static int init_er(MpegEncContext *s)
844 ERContext *er = &s->er;
845 int mb_array_size = s->mb_height * s->mb_stride;
848 er->avctx = s->avctx;
851 er->mb_index2xy = s->mb_index2xy;
852 er->mb_num = s->mb_num;
853 er->mb_width = s->mb_width;
854 er->mb_height = s->mb_height;
855 er->mb_stride = s->mb_stride;
856 er->b8_stride = s->b8_stride;
858 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
859 er->error_status_table = av_mallocz(mb_array_size);
860 if (!er->er_temp_buffer || !er->error_status_table)
863 er->mbskip_table = s->mbskip_table;
864 er->mbintra_table = s->mbintra_table;
866 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
867 er->dc_val[i] = s->dc_val[i];
869 er->decode_mb = mpeg_er_decode_mb;
874 av_freep(&er->er_temp_buffer);
875 av_freep(&er->error_status_table);
876 return AVERROR(ENOMEM);
880 * Initialize and allocates MpegEncContext fields dependent on the resolution.
882 static int init_context_frame(MpegEncContext *s)
884 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
886 s->mb_width = (s->width + 15) / 16;
887 s->mb_stride = s->mb_width + 1;
888 s->b8_stride = s->mb_width * 2 + 1;
889 s->b4_stride = s->mb_width * 4 + 1;
890 mb_array_size = s->mb_height * s->mb_stride;
891 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
893 /* set default edge pos, will be overriden
894 * in decode_header if needed */
895 s->h_edge_pos = s->mb_width * 16;
896 s->v_edge_pos = s->mb_height * 16;
898 s->mb_num = s->mb_width * s->mb_height;
903 s->block_wrap[3] = s->b8_stride;
905 s->block_wrap[5] = s->mb_stride;
907 y_size = s->b8_stride * (2 * s->mb_height + 1);
908 c_size = s->mb_stride * (s->mb_height + 1);
909 yc_size = y_size + 2 * c_size;
911 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
912 for (y = 0; y < s->mb_height; y++)
913 for (x = 0; x < s->mb_width; x++)
914 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
916 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
919 /* Allocate MV tables */
920 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
921 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
922 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
923 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
924 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
925 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
926 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
927 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
928 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
929 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
930 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
931 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
933 /* Allocate MB type table */
934 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
936 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
938 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
939 mb_array_size * sizeof(float), fail);
940 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
941 mb_array_size * sizeof(float), fail);
945 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
946 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
947 /* interlaced direct mode decoding tables */
948 for (i = 0; i < 2; i++) {
950 for (j = 0; j < 2; j++) {
951 for (k = 0; k < 2; k++) {
952 FF_ALLOCZ_OR_GOTO(s->avctx,
953 s->b_field_mv_table_base[i][j][k],
954 mv_table_size * 2 * sizeof(int16_t),
956 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
959 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
960 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
961 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
963 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
966 if (s->out_format == FMT_H263) {
968 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
969 s->coded_block = s->coded_block_base + s->b8_stride + 1;
971 /* cbp, ac_pred, pred_dir */
972 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
973 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
976 if (s->h263_pred || s->h263_plus || !s->encoding) {
978 // MN: we need these for error resilience of intra-frames
979 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
980 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
981 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
982 s->dc_val[2] = s->dc_val[1] + c_size;
983 for (i = 0; i < yc_size; i++)
984 s->dc_val_base[i] = 1024;
987 /* which mb is a intra block */
988 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
989 memset(s->mbintra_table, 1, mb_array_size);
991 /* init macroblock skip table */
992 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
993 // Note the + 1 is for a quicker mpeg4 slice_end detection
997 return AVERROR(ENOMEM);
1001 * init common structure for both encoder and decoder.
1002 * this assumes that some variables like width/height are already set
1004 av_cold int ff_MPV_common_init(MpegEncContext *s)
1007 int nb_slices = (HAVE_THREADS &&
1008 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1009 s->avctx->thread_count : 1;
1011 if (s->encoding && s->avctx->slices)
1012 nb_slices = s->avctx->slices;
1014 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1015 s->mb_height = (s->height + 31) / 32 * 2;
1017 s->mb_height = (s->height + 15) / 16;
1019 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1020 av_log(s->avctx, AV_LOG_ERROR,
1021 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1025 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1028 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1030 max_slices = MAX_THREADS;
1031 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1032 " reducing to %d\n", nb_slices, max_slices);
1033 nb_slices = max_slices;
1036 if ((s->width || s->height) &&
1037 av_image_check_size(s->width, s->height, 0, s->avctx))
1040 ff_dct_common_init(s);
1042 s->flags = s->avctx->flags;
1043 s->flags2 = s->avctx->flags2;
1045 /* set chroma shifts */
1046 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1048 &s->chroma_y_shift);
1050 /* convert fourcc to upper case */
1051 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1053 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1055 s->avctx->coded_frame = &s->current_picture.f;
1058 if (s->msmpeg4_version) {
1059 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1060 2 * 2 * (MAX_LEVEL + 1) *
1061 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1063 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1065 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1066 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1067 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1068 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1069 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1070 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1071 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1072 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1074 if (s->avctx->noise_reduction) {
1075 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1079 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1080 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1081 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1082 avcodec_get_frame_defaults(&s->picture[i].f);
1084 memset(&s->next_picture, 0, sizeof(s->next_picture));
1085 memset(&s->last_picture, 0, sizeof(s->last_picture));
1086 memset(&s->current_picture, 0, sizeof(s->current_picture));
1087 avcodec_get_frame_defaults(&s->next_picture.f);
1088 avcodec_get_frame_defaults(&s->last_picture.f);
1089 avcodec_get_frame_defaults(&s->current_picture.f);
1091 if (init_context_frame(s))
1094 s->parse_context.state = -1;
1096 s->context_initialized = 1;
1097 s->thread_context[0] = s;
1099 // if (s->width && s->height) {
1100 if (nb_slices > 1) {
1101 for (i = 1; i < nb_slices; i++) {
1102 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1103 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1106 for (i = 0; i < nb_slices; i++) {
1107 if (init_duplicate_context(s->thread_context[i]) < 0)
1109 s->thread_context[i]->start_mb_y =
1110 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1111 s->thread_context[i]->end_mb_y =
1112 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1115 if (init_duplicate_context(s) < 0)
1118 s->end_mb_y = s->mb_height;
1120 s->slice_context_count = nb_slices;
1125 ff_MPV_common_end(s);
1130 * Frees and resets MpegEncContext fields depending on the resolution.
1131 * Is used during resolution changes to avoid a full reinitialization of the
1134 static int free_context_frame(MpegEncContext *s)
1138 av_freep(&s->mb_type);
1139 av_freep(&s->p_mv_table_base);
1140 av_freep(&s->b_forw_mv_table_base);
1141 av_freep(&s->b_back_mv_table_base);
1142 av_freep(&s->b_bidir_forw_mv_table_base);
1143 av_freep(&s->b_bidir_back_mv_table_base);
1144 av_freep(&s->b_direct_mv_table_base);
1145 s->p_mv_table = NULL;
1146 s->b_forw_mv_table = NULL;
1147 s->b_back_mv_table = NULL;
1148 s->b_bidir_forw_mv_table = NULL;
1149 s->b_bidir_back_mv_table = NULL;
1150 s->b_direct_mv_table = NULL;
1151 for (i = 0; i < 2; i++) {
1152 for (j = 0; j < 2; j++) {
1153 for (k = 0; k < 2; k++) {
1154 av_freep(&s->b_field_mv_table_base[i][j][k]);
1155 s->b_field_mv_table[i][j][k] = NULL;
1157 av_freep(&s->b_field_select_table[i][j]);
1158 av_freep(&s->p_field_mv_table_base[i][j]);
1159 s->p_field_mv_table[i][j] = NULL;
1161 av_freep(&s->p_field_select_table[i]);
1164 av_freep(&s->dc_val_base);
1165 av_freep(&s->coded_block_base);
1166 av_freep(&s->mbintra_table);
1167 av_freep(&s->cbp_table);
1168 av_freep(&s->pred_dir_table);
1170 av_freep(&s->mbskip_table);
1172 av_freep(&s->er.error_status_table);
1173 av_freep(&s->er.er_temp_buffer);
1174 av_freep(&s->mb_index2xy);
1175 av_freep(&s->lambda_table);
1177 av_freep(&s->cplx_tab);
1178 av_freep(&s->bits_tab);
1180 s->linesize = s->uvlinesize = 0;
1185 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1189 if (s->slice_context_count > 1) {
1190 for (i = 0; i < s->slice_context_count; i++) {
1191 free_duplicate_context(s->thread_context[i]);
1193 for (i = 1; i < s->slice_context_count; i++) {
1194 av_freep(&s->thread_context[i]);
1197 free_duplicate_context(s);
1199 if ((err = free_context_frame(s)) < 0)
1203 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1204 s->picture[i].needs_realloc = 1;
1207 s->last_picture_ptr =
1208 s->next_picture_ptr =
1209 s->current_picture_ptr = NULL;
1212 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1213 s->mb_height = (s->height + 31) / 32 * 2;
1215 s->mb_height = (s->height + 15) / 16;
1217 if ((s->width || s->height) &&
1218 av_image_check_size(s->width, s->height, 0, s->avctx))
1219 return AVERROR_INVALIDDATA;
1221 if ((err = init_context_frame(s)))
1224 s->thread_context[0] = s;
1226 if (s->width && s->height) {
1227 int nb_slices = s->slice_context_count;
1228 if (nb_slices > 1) {
1229 for (i = 1; i < nb_slices; i++) {
1230 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1231 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1234 for (i = 0; i < nb_slices; i++) {
1235 if (init_duplicate_context(s->thread_context[i]) < 0)
1237 s->thread_context[i]->start_mb_y =
1238 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1239 s->thread_context[i]->end_mb_y =
1240 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1243 err = init_duplicate_context(s);
1247 s->end_mb_y = s->mb_height;
1249 s->slice_context_count = nb_slices;
1254 ff_MPV_common_end(s);
1258 /* init common structure for both encoder and decoder */
1259 void ff_MPV_common_end(MpegEncContext *s)
1263 if (s->slice_context_count > 1) {
1264 for (i = 0; i < s->slice_context_count; i++) {
1265 free_duplicate_context(s->thread_context[i]);
1267 for (i = 1; i < s->slice_context_count; i++) {
1268 av_freep(&s->thread_context[i]);
1270 s->slice_context_count = 1;
1271 } else free_duplicate_context(s);
1273 av_freep(&s->parse_context.buffer);
1274 s->parse_context.buffer_size = 0;
1276 av_freep(&s->bitstream_buffer);
1277 s->allocated_bitstream_buffer_size = 0;
1279 av_freep(&s->avctx->stats_out);
1280 av_freep(&s->ac_stats);
1282 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1283 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1284 s->q_chroma_intra_matrix= NULL;
1285 s->q_chroma_intra_matrix16= NULL;
1286 av_freep(&s->q_intra_matrix);
1287 av_freep(&s->q_inter_matrix);
1288 av_freep(&s->q_intra_matrix16);
1289 av_freep(&s->q_inter_matrix16);
1290 av_freep(&s->input_picture);
1291 av_freep(&s->reordered_input_picture);
1292 av_freep(&s->dct_offset);
1295 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1296 free_picture_tables(&s->picture[i]);
1297 ff_mpeg_unref_picture(s, &s->picture[i]);
1300 av_freep(&s->picture);
1301 free_picture_tables(&s->last_picture);
1302 ff_mpeg_unref_picture(s, &s->last_picture);
1303 free_picture_tables(&s->current_picture);
1304 ff_mpeg_unref_picture(s, &s->current_picture);
1305 free_picture_tables(&s->next_picture);
1306 ff_mpeg_unref_picture(s, &s->next_picture);
1307 free_picture_tables(&s->new_picture);
1308 ff_mpeg_unref_picture(s, &s->new_picture);
1310 free_context_frame(s);
1312 s->context_initialized = 0;
1313 s->last_picture_ptr =
1314 s->next_picture_ptr =
1315 s->current_picture_ptr = NULL;
1316 s->linesize = s->uvlinesize = 0;
1319 av_cold void ff_init_rl(RLTable *rl,
1320 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1322 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1323 uint8_t index_run[MAX_RUN + 1];
1324 int last, run, level, start, end, i;
1326 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1327 if (static_store && rl->max_level[0])
1330 /* compute max_level[], max_run[] and index_run[] */
1331 for (last = 0; last < 2; last++) {
1340 memset(max_level, 0, MAX_RUN + 1);
1341 memset(max_run, 0, MAX_LEVEL + 1);
1342 memset(index_run, rl->n, MAX_RUN + 1);
1343 for (i = start; i < end; i++) {
1344 run = rl->table_run[i];
1345 level = rl->table_level[i];
1346 if (index_run[run] == rl->n)
1348 if (level > max_level[run])
1349 max_level[run] = level;
1350 if (run > max_run[level])
1351 max_run[level] = run;
1354 rl->max_level[last] = static_store[last];
1356 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1357 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1359 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1361 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1362 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1364 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1366 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1367 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1371 av_cold void ff_init_vlc_rl(RLTable *rl)
1375 for (q = 0; q < 32; q++) {
1377 int qadd = (q - 1) | 1;
1383 for (i = 0; i < rl->vlc.table_size; i++) {
1384 int code = rl->vlc.table[i][0];
1385 int len = rl->vlc.table[i][1];
1388 if (len == 0) { // illegal code
1391 } else if (len < 0) { // more bits needed
1395 if (code == rl->n) { // esc
1399 run = rl->table_run[code] + 1;
1400 level = rl->table_level[code] * qmul + qadd;
1401 if (code >= rl->last) run += 192;
1404 rl->rl_vlc[q][i].len = len;
1405 rl->rl_vlc[q][i].level = level;
1406 rl->rl_vlc[q][i].run = run;
1411 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1415 /* release non reference frames */
1416 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1417 if (!s->picture[i].reference &&
1418 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1419 ff_mpeg_unref_picture(s, &s->picture[i]);
1424 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1426 if (pic == s->last_picture_ptr)
1428 if (pic->f.buf[0] == NULL)
1430 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1435 static int find_unused_picture(MpegEncContext *s, int shared)
1440 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1441 if (s->picture[i].f.buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1445 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1446 if (pic_is_unused(s, &s->picture[i]))
1451 av_log(s->avctx, AV_LOG_FATAL,
1452 "Internal error, picture buffer overflow\n");
1453 /* We could return -1, but the codec would crash trying to draw into a
1454 * non-existing frame anyway. This is safer than waiting for a random crash.
1455 * Also the return of this is never useful, an encoder must only allocate
1456 * as much as allowed in the specification. This has no relationship to how
1457 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1458 * enough for such valid streams).
1459 * Plus, a decoder has to check stream validity and remove frames if too
1460 * many reference frames are around. Waiting for "OOM" is not correct at
1461 * all. Similarly, missing reference frames have to be replaced by
1462 * interpolated/MC frames, anything else is a bug in the codec ...
1468 int ff_find_unused_picture(MpegEncContext *s, int shared)
1470 int ret = find_unused_picture(s, shared);
1472 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1473 if (s->picture[ret].needs_realloc) {
1474 s->picture[ret].needs_realloc = 0;
1475 free_picture_tables(&s->picture[ret]);
1476 ff_mpeg_unref_picture(s, &s->picture[ret]);
1477 avcodec_get_frame_defaults(&s->picture[ret].f);
1483 static void update_noise_reduction(MpegEncContext *s)
1487 for (intra = 0; intra < 2; intra++) {
1488 if (s->dct_count[intra] > (1 << 16)) {
1489 for (i = 0; i < 64; i++) {
1490 s->dct_error_sum[intra][i] >>= 1;
1492 s->dct_count[intra] >>= 1;
1495 for (i = 0; i < 64; i++) {
1496 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1497 s->dct_count[intra] +
1498 s->dct_error_sum[intra][i] / 2) /
1499 (s->dct_error_sum[intra][i] + 1);
1505 * generic function for encode/decode called after coding/decoding
1506 * the header and before a frame is coded/decoded.
1508 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1514 if (!ff_thread_can_start_frame(avctx)) {
1515 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1519 /* mark & release old frames */
1520 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1521 s->last_picture_ptr != s->next_picture_ptr &&
1522 s->last_picture_ptr->f.buf[0]) {
1523 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1526 /* release forgotten pictures */
1527 /* if (mpeg124/h263) */
1529 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1530 if (&s->picture[i] != s->last_picture_ptr &&
1531 &s->picture[i] != s->next_picture_ptr &&
1532 s->picture[i].reference && !s->picture[i].needs_realloc) {
1533 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1534 av_log(avctx, AV_LOG_ERROR,
1535 "releasing zombie picture\n");
1536 ff_mpeg_unref_picture(s, &s->picture[i]);
1541 ff_mpeg_unref_picture(s, &s->current_picture);
1544 ff_release_unused_pictures(s, 1);
1546 if (s->current_picture_ptr &&
1547 s->current_picture_ptr->f.buf[0] == NULL) {
1548 // we already have a unused image
1549 // (maybe it was set before reading the header)
1550 pic = s->current_picture_ptr;
1552 i = ff_find_unused_picture(s, 0);
1554 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1557 pic = &s->picture[i];
1561 if (!s->droppable) {
1562 if (s->pict_type != AV_PICTURE_TYPE_B)
1566 pic->f.coded_picture_number = s->coded_picture_number++;
1568 if (ff_alloc_picture(s, pic, 0) < 0)
1571 s->current_picture_ptr = pic;
1572 // FIXME use only the vars from current_pic
1573 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1574 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1575 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1576 if (s->picture_structure != PICT_FRAME)
1577 s->current_picture_ptr->f.top_field_first =
1578 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1580 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1581 !s->progressive_sequence;
1582 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1585 s->current_picture_ptr->f.pict_type = s->pict_type;
1586 // if (s->flags && CODEC_FLAG_QSCALE)
1587 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1588 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1590 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1591 s->current_picture_ptr)) < 0)
1594 if (s->pict_type != AV_PICTURE_TYPE_B) {
1595 s->last_picture_ptr = s->next_picture_ptr;
1597 s->next_picture_ptr = s->current_picture_ptr;
1599 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1600 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1601 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1602 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1603 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1604 s->pict_type, s->droppable);
1606 if ((s->last_picture_ptr == NULL ||
1607 s->last_picture_ptr->f.buf[0] == NULL) &&
1608 (s->pict_type != AV_PICTURE_TYPE_I ||
1609 s->picture_structure != PICT_FRAME)) {
1610 int h_chroma_shift, v_chroma_shift;
1611 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1612 &h_chroma_shift, &v_chroma_shift);
1613 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.buf[0])
1614 av_log(avctx, AV_LOG_DEBUG,
1615 "allocating dummy last picture for B frame\n");
1616 else if (s->pict_type != AV_PICTURE_TYPE_I)
1617 av_log(avctx, AV_LOG_ERROR,
1618 "warning: first frame is no keyframe\n");
1619 else if (s->picture_structure != PICT_FRAME)
1620 av_log(avctx, AV_LOG_DEBUG,
1621 "allocate dummy last picture for field based first keyframe\n");
1623 /* Allocate a dummy frame */
1624 i = ff_find_unused_picture(s, 0);
1626 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1629 s->last_picture_ptr = &s->picture[i];
1630 s->last_picture_ptr->f.key_frame = 0;
1631 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1632 s->last_picture_ptr = NULL;
1636 memset(s->last_picture_ptr->f.data[0], 0x80,
1637 avctx->height * s->last_picture_ptr->f.linesize[0]);
1638 memset(s->last_picture_ptr->f.data[1], 0x80,
1639 (avctx->height >> v_chroma_shift) *
1640 s->last_picture_ptr->f.linesize[1]);
1641 memset(s->last_picture_ptr->f.data[2], 0x80,
1642 (avctx->height >> v_chroma_shift) *
1643 s->last_picture_ptr->f.linesize[2]);
1645 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1646 for(i=0; i<avctx->height; i++)
1647 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1650 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1651 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1653 if ((s->next_picture_ptr == NULL ||
1654 s->next_picture_ptr->f.buf[0] == NULL) &&
1655 s->pict_type == AV_PICTURE_TYPE_B) {
1656 /* Allocate a dummy frame */
1657 i = ff_find_unused_picture(s, 0);
1659 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1662 s->next_picture_ptr = &s->picture[i];
1663 s->next_picture_ptr->f.key_frame = 0;
1664 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1665 s->next_picture_ptr = NULL;
1668 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1669 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1672 #if 0 // BUFREF-FIXME
1673 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1674 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1676 if (s->last_picture_ptr) {
1677 ff_mpeg_unref_picture(s, &s->last_picture);
1678 if (s->last_picture_ptr->f.buf[0] &&
1679 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1680 s->last_picture_ptr)) < 0)
1683 if (s->next_picture_ptr) {
1684 ff_mpeg_unref_picture(s, &s->next_picture);
1685 if (s->next_picture_ptr->f.buf[0] &&
1686 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1687 s->next_picture_ptr)) < 0)
1691 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1692 s->last_picture_ptr->f.buf[0]));
1694 if (s->picture_structure!= PICT_FRAME) {
1696 for (i = 0; i < 4; i++) {
1697 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1698 s->current_picture.f.data[i] +=
1699 s->current_picture.f.linesize[i];
1701 s->current_picture.f.linesize[i] *= 2;
1702 s->last_picture.f.linesize[i] *= 2;
1703 s->next_picture.f.linesize[i] *= 2;
1707 s->err_recognition = avctx->err_recognition;
1709 /* set dequantizer, we can't do it during init as
1710 * it might change for mpeg4 and we can't do it in the header
1711 * decode as init is not called for mpeg4 there yet */
1712 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1713 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1714 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1715 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1716 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1717 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1719 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1720 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1723 if (s->dct_error_sum) {
1724 av_assert2(s->avctx->noise_reduction && s->encoding);
1725 update_noise_reduction(s);
1729 FF_DISABLE_DEPRECATION_WARNINGS
1730 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1731 return ff_xvmc_field_start(s, avctx);
1732 FF_ENABLE_DEPRECATION_WARNINGS
1733 #endif /* FF_API_XVMC */
1738 /* generic function for encode/decode called after a
1739 * frame has been coded/decoded. */
1740 void ff_MPV_frame_end(MpegEncContext *s)
1743 FF_DISABLE_DEPRECATION_WARNINGS
1744 /* redraw edges for the frame if decoding didn't complete */
1745 // just to make sure that all data is rendered.
1746 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1747 ff_xvmc_field_end(s);
1749 FF_ENABLE_DEPRECATION_WARNINGS
1750 #endif /* FF_API_XVMC */
1751 if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1752 !s->avctx->hwaccel &&
1753 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1754 s->unrestricted_mv &&
1755 s->current_picture.reference &&
1757 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1760 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1761 int hshift = desc->log2_chroma_w;
1762 int vshift = desc->log2_chroma_h;
1763 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1764 s->h_edge_pos, s->v_edge_pos,
1765 EDGE_WIDTH, EDGE_WIDTH,
1766 EDGE_TOP | EDGE_BOTTOM);
1767 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1768 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1769 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1770 EDGE_TOP | EDGE_BOTTOM);
1771 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1772 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1773 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1774 EDGE_TOP | EDGE_BOTTOM);
1779 s->last_pict_type = s->pict_type;
1780 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1781 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1782 s->last_non_b_pict_type = s->pict_type;
1785 /* copy back current_picture variables */
1786 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1787 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1788 s->picture[i] = s->current_picture;
1792 av_assert0(i < MAX_PICTURE_COUNT);
1795 // clear copies, to avoid confusion
1797 memset(&s->last_picture, 0, sizeof(Picture));
1798 memset(&s->next_picture, 0, sizeof(Picture));
1799 memset(&s->current_picture, 0, sizeof(Picture));
1801 s->avctx->coded_frame = &s->current_picture_ptr->f;
1803 if (s->current_picture.reference)
1804 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1808 * Draw a line from (ex, ey) -> (sx, sy).
1809 * @param w width of the image
1810 * @param h height of the image
1811 * @param stride stride/linesize of the image
1812 * @param color color of the arrow
1814 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1815 int w, int h, int stride, int color)
1819 sx = av_clip(sx, 0, w - 1);
1820 sy = av_clip(sy, 0, h - 1);
1821 ex = av_clip(ex, 0, w - 1);
1822 ey = av_clip(ey, 0, h - 1);
1824 buf[sy * stride + sx] += color;
1826 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1828 FFSWAP(int, sx, ex);
1829 FFSWAP(int, sy, ey);
1831 buf += sx + sy * stride;
1833 f = ((ey - sy) << 16) / ex;
1834 for (x = 0; x <= ex; x++) {
1836 fr = (x * f) & 0xFFFF;
1837 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1838 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1842 FFSWAP(int, sx, ex);
1843 FFSWAP(int, sy, ey);
1845 buf += sx + sy * stride;
1848 f = ((ex - sx) << 16) / ey;
1851 for(y= 0; y <= ey; y++){
1853 fr = (y*f) & 0xFFFF;
1854 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1855 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1861 * Draw an arrow from (ex, ey) -> (sx, sy).
1862 * @param w width of the image
1863 * @param h height of the image
1864 * @param stride stride/linesize of the image
1865 * @param color color of the arrow
1867 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1868 int ey, int w, int h, int stride, int color)
1872 sx = av_clip(sx, -100, w + 100);
1873 sy = av_clip(sy, -100, h + 100);
1874 ex = av_clip(ex, -100, w + 100);
1875 ey = av_clip(ey, -100, h + 100);
1880 if (dx * dx + dy * dy > 3 * 3) {
1883 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1885 // FIXME subpixel accuracy
1886 rx = ROUNDED_DIV(rx * 3 << 4, length);
1887 ry = ROUNDED_DIV(ry * 3 << 4, length);
1889 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1890 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1892 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1896 * Print debugging info for the given picture.
1898 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1900 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1902 if (avctx->hwaccel || !p || !p->mb_type
1903 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1907 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1910 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1911 av_get_picture_type_char(pict->pict_type));
1912 for (y = 0; y < mb_height; y++) {
1913 for (x = 0; x < mb_width; x++) {
1914 if (avctx->debug & FF_DEBUG_SKIP) {
1915 int count = mbskip_table[x + y * mb_stride];
1918 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1920 if (avctx->debug & FF_DEBUG_QP) {
1921 av_log(avctx, AV_LOG_DEBUG, "%2d",
1922 p->qscale_table[x + y * mb_stride]);
1924 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1925 int mb_type = p->mb_type[x + y * mb_stride];
1926 // Type & MV direction
1927 if (IS_PCM(mb_type))
1928 av_log(avctx, AV_LOG_DEBUG, "P");
1929 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1930 av_log(avctx, AV_LOG_DEBUG, "A");
1931 else if (IS_INTRA4x4(mb_type))
1932 av_log(avctx, AV_LOG_DEBUG, "i");
1933 else if (IS_INTRA16x16(mb_type))
1934 av_log(avctx, AV_LOG_DEBUG, "I");
1935 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1936 av_log(avctx, AV_LOG_DEBUG, "d");
1937 else if (IS_DIRECT(mb_type))
1938 av_log(avctx, AV_LOG_DEBUG, "D");
1939 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1940 av_log(avctx, AV_LOG_DEBUG, "g");
1941 else if (IS_GMC(mb_type))
1942 av_log(avctx, AV_LOG_DEBUG, "G");
1943 else if (IS_SKIP(mb_type))
1944 av_log(avctx, AV_LOG_DEBUG, "S");
1945 else if (!USES_LIST(mb_type, 1))
1946 av_log(avctx, AV_LOG_DEBUG, ">");
1947 else if (!USES_LIST(mb_type, 0))
1948 av_log(avctx, AV_LOG_DEBUG, "<");
1950 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1951 av_log(avctx, AV_LOG_DEBUG, "X");
1955 if (IS_8X8(mb_type))
1956 av_log(avctx, AV_LOG_DEBUG, "+");
1957 else if (IS_16X8(mb_type))
1958 av_log(avctx, AV_LOG_DEBUG, "-");
1959 else if (IS_8X16(mb_type))
1960 av_log(avctx, AV_LOG_DEBUG, "|");
1961 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1962 av_log(avctx, AV_LOG_DEBUG, " ");
1964 av_log(avctx, AV_LOG_DEBUG, "?");
1967 if (IS_INTERLACED(mb_type))
1968 av_log(avctx, AV_LOG_DEBUG, "=");
1970 av_log(avctx, AV_LOG_DEBUG, " ");
1973 av_log(avctx, AV_LOG_DEBUG, "\n");
1977 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1978 (avctx->debug_mv)) {
1979 const int shift = 1 + quarter_sample;
1983 int h_chroma_shift, v_chroma_shift, block_height;
1984 const int width = avctx->width;
1985 const int height = avctx->height;
1986 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1987 const int mv_stride = (mb_width << mv_sample_log2) +
1988 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1990 *low_delay = 0; // needed to see the vectors without trashing the buffers
1992 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1994 av_frame_make_writable(pict);
1996 pict->opaque = NULL;
1997 ptr = pict->data[0];
1998 block_height = 16 >> v_chroma_shift;
2000 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2002 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2003 const int mb_index = mb_x + mb_y * mb_stride;
2004 if ((avctx->debug_mv) && p->motion_val[0]) {
2006 for (type = 0; type < 3; type++) {
2010 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2011 (pict->pict_type!= AV_PICTURE_TYPE_P))
2016 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2017 (pict->pict_type!= AV_PICTURE_TYPE_B))
2022 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2023 (pict->pict_type!= AV_PICTURE_TYPE_B))
2028 if (!USES_LIST(p->mb_type[mb_index], direction))
2031 if (IS_8X8(p->mb_type[mb_index])) {
2033 for (i = 0; i < 4; i++) {
2034 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2035 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2036 int xy = (mb_x * 2 + (i & 1) +
2037 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2038 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2039 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2040 draw_arrow(ptr, sx, sy, mx, my, width,
2041 height, pict->linesize[0], 100);
2043 } else if (IS_16X8(p->mb_type[mb_index])) {
2045 for (i = 0; i < 2; i++) {
2046 int sx = mb_x * 16 + 8;
2047 int sy = mb_y * 16 + 4 + 8 * i;
2048 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2049 int mx = (p->motion_val[direction][xy][0] >> shift);
2050 int my = (p->motion_val[direction][xy][1] >> shift);
2052 if (IS_INTERLACED(p->mb_type[mb_index]))
2055 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2056 height, pict->linesize[0], 100);
2058 } else if (IS_8X16(p->mb_type[mb_index])) {
2060 for (i = 0; i < 2; i++) {
2061 int sx = mb_x * 16 + 4 + 8 * i;
2062 int sy = mb_y * 16 + 8;
2063 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2064 int mx = p->motion_val[direction][xy][0] >> shift;
2065 int my = p->motion_val[direction][xy][1] >> shift;
2067 if (IS_INTERLACED(p->mb_type[mb_index]))
2070 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2071 height, pict->linesize[0], 100);
2074 int sx= mb_x * 16 + 8;
2075 int sy= mb_y * 16 + 8;
2076 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2077 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2078 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2079 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2083 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2084 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2085 0x0101010101010101ULL;
2087 for (y = 0; y < block_height; y++) {
2088 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2089 (block_height * mb_y + y) *
2090 pict->linesize[1]) = c;
2091 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2092 (block_height * mb_y + y) *
2093 pict->linesize[2]) = c;
2096 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2098 int mb_type = p->mb_type[mb_index];
2101 #define COLOR(theta, r) \
2102 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2103 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2107 if (IS_PCM(mb_type)) {
2109 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2110 IS_INTRA16x16(mb_type)) {
2112 } else if (IS_INTRA4x4(mb_type)) {
2114 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2116 } else if (IS_DIRECT(mb_type)) {
2118 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2120 } else if (IS_GMC(mb_type)) {
2122 } else if (IS_SKIP(mb_type)) {
2124 } else if (!USES_LIST(mb_type, 1)) {
2126 } else if (!USES_LIST(mb_type, 0)) {
2129 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2133 u *= 0x0101010101010101ULL;
2134 v *= 0x0101010101010101ULL;
2135 for (y = 0; y < block_height; y++) {
2136 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2137 (block_height * mb_y + y) * pict->linesize[1]) = u;
2138 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2139 (block_height * mb_y + y) * pict->linesize[2]) = v;
2143 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2144 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2145 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2146 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2147 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2149 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2150 for (y = 0; y < 16; y++)
2151 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2152 pict->linesize[0]] ^= 0x80;
2154 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2155 int dm = 1 << (mv_sample_log2 - 2);
2156 for (i = 0; i < 4; i++) {
2157 int sx = mb_x * 16 + 8 * (i & 1);
2158 int sy = mb_y * 16 + 8 * (i >> 1);
2159 int xy = (mb_x * 2 + (i & 1) +
2160 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2162 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2163 if (mv[0] != mv[dm] ||
2164 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2165 for (y = 0; y < 8; y++)
2166 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2167 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2168 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2169 pict->linesize[0]) ^= 0x8080808080808080ULL;
2173 if (IS_INTERLACED(mb_type) &&
2174 avctx->codec->id == AV_CODEC_ID_H264) {
2178 mbskip_table[mb_index] = 0;
2184 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2186 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2187 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2190 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2192 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2193 int offset = 2*s->mb_stride + 1;
2195 return AVERROR(ENOMEM);
2196 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2197 ref->size -= offset;
2198 ref->data += offset;
2199 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2202 static inline int hpel_motion_lowres(MpegEncContext *s,
2203 uint8_t *dest, uint8_t *src,
2204 int field_based, int field_select,
2205 int src_x, int src_y,
2206 int width, int height, ptrdiff_t stride,
2207 int h_edge_pos, int v_edge_pos,
2208 int w, int h, h264_chroma_mc_func *pix_op,
2209 int motion_x, int motion_y)
2211 const int lowres = s->avctx->lowres;
2212 const int op_index = FFMIN(lowres, 3);
2213 const int s_mask = (2 << lowres) - 1;
2217 if (s->quarter_sample) {
2222 sx = motion_x & s_mask;
2223 sy = motion_y & s_mask;
2224 src_x += motion_x >> lowres + 1;
2225 src_y += motion_y >> lowres + 1;
2227 src += src_y * stride + src_x;
2229 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2230 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2231 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2232 s->linesize, s->linesize,
2233 w + 1, (h + 1) << field_based,
2234 src_x, src_y << field_based,
2235 h_edge_pos, v_edge_pos);
2236 src = s->edge_emu_buffer;
2240 sx = (sx << 2) >> lowres;
2241 sy = (sy << 2) >> lowres;
2244 pix_op[op_index](dest, src, stride, h, sx, sy);
2248 /* apply one mpeg motion vector to the three components */
2249 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2256 uint8_t **ref_picture,
2257 h264_chroma_mc_func *pix_op,
2258 int motion_x, int motion_y,
2261 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2262 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2263 ptrdiff_t uvlinesize, linesize;
2264 const int lowres = s->avctx->lowres;
2265 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2266 const int block_s = 8>>lowres;
2267 const int s_mask = (2 << lowres) - 1;
2268 const int h_edge_pos = s->h_edge_pos >> lowres;
2269 const int v_edge_pos = s->v_edge_pos >> lowres;
2270 linesize = s->current_picture.f.linesize[0] << field_based;
2271 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2273 // FIXME obviously not perfect but qpel will not work in lowres anyway
2274 if (s->quarter_sample) {
2280 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2283 sx = motion_x & s_mask;
2284 sy = motion_y & s_mask;
2285 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2286 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2288 if (s->out_format == FMT_H263) {
2289 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2290 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2291 uvsrc_x = src_x >> 1;
2292 uvsrc_y = src_y >> 1;
2293 } else if (s->out_format == FMT_H261) {
2294 // even chroma mv's are full pel in H261
2297 uvsx = (2 * mx) & s_mask;
2298 uvsy = (2 * my) & s_mask;
2299 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2300 uvsrc_y = mb_y * block_s + (my >> lowres);
2302 if(s->chroma_y_shift){
2307 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2308 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2310 if(s->chroma_x_shift){
2314 uvsy = motion_y & s_mask;
2316 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2319 uvsx = motion_x & s_mask;
2320 uvsy = motion_y & s_mask;
2327 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2328 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2329 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2331 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2332 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2333 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2334 linesize >> field_based, linesize >> field_based,
2335 17, 17 + field_based,
2336 src_x, src_y << field_based, h_edge_pos,
2338 ptr_y = s->edge_emu_buffer;
2339 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2340 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2341 s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
2342 uvlinesize >> field_based, uvlinesize >> field_based,
2344 uvsrc_x, uvsrc_y << field_based,
2345 h_edge_pos >> 1, v_edge_pos >> 1);
2346 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
2347 uvlinesize >> field_based,uvlinesize >> field_based,
2349 uvsrc_x, uvsrc_y << field_based,
2350 h_edge_pos >> 1, v_edge_pos >> 1);
2352 ptr_cr = uvbuf + 16;
2356 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2358 dest_y += s->linesize;
2359 dest_cb += s->uvlinesize;
2360 dest_cr += s->uvlinesize;
2364 ptr_y += s->linesize;
2365 ptr_cb += s->uvlinesize;
2366 ptr_cr += s->uvlinesize;
2369 sx = (sx << 2) >> lowres;
2370 sy = (sy << 2) >> lowres;
2371 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2373 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2374 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2375 uvsx = (uvsx << 2) >> lowres;
2376 uvsy = (uvsy << 2) >> lowres;
2378 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2379 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2382 // FIXME h261 lowres loop filter
2385 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2386 uint8_t *dest_cb, uint8_t *dest_cr,
2387 uint8_t **ref_picture,
2388 h264_chroma_mc_func * pix_op,
2391 const int lowres = s->avctx->lowres;
2392 const int op_index = FFMIN(lowres, 3);
2393 const int block_s = 8 >> lowres;
2394 const int s_mask = (2 << lowres) - 1;
2395 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2396 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2397 int emu = 0, src_x, src_y, sx, sy;
2401 if (s->quarter_sample) {
2406 /* In case of 8X8, we construct a single chroma motion vector
2407 with a special rounding */
2408 mx = ff_h263_round_chroma(mx);
2409 my = ff_h263_round_chroma(my);
2413 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2414 src_y = s->mb_y * block_s + (my >> lowres + 1);
2416 offset = src_y * s->uvlinesize + src_x;
2417 ptr = ref_picture[1] + offset;
2418 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2419 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2420 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2421 s->uvlinesize, s->uvlinesize,
2423 src_x, src_y, h_edge_pos, v_edge_pos);
2424 ptr = s->edge_emu_buffer;
2427 sx = (sx << 2) >> lowres;
2428 sy = (sy << 2) >> lowres;
2429 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2431 ptr = ref_picture[2] + offset;
2433 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2434 s->uvlinesize, s->uvlinesize,
2436 src_x, src_y, h_edge_pos, v_edge_pos);
2437 ptr = s->edge_emu_buffer;
2439 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2443 * motion compensation of a single macroblock
2445 * @param dest_y luma destination pointer
2446 * @param dest_cb chroma cb/u destination pointer
2447 * @param dest_cr chroma cr/v destination pointer
2448 * @param dir direction (0->forward, 1->backward)
2449 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2450 * @param pix_op halfpel motion compensation function (average or put normally)
2451 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2453 static inline void MPV_motion_lowres(MpegEncContext *s,
2454 uint8_t *dest_y, uint8_t *dest_cb,
2456 int dir, uint8_t **ref_picture,
2457 h264_chroma_mc_func *pix_op)
2461 const int lowres = s->avctx->lowres;
2462 const int block_s = 8 >>lowres;
2467 switch (s->mv_type) {
2469 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2471 ref_picture, pix_op,
2472 s->mv[dir][0][0], s->mv[dir][0][1],
2478 for (i = 0; i < 4; i++) {
2479 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2480 s->linesize) * block_s,
2481 ref_picture[0], 0, 0,
2482 (2 * mb_x + (i & 1)) * block_s,
2483 (2 * mb_y + (i >> 1)) * block_s,
2484 s->width, s->height, s->linesize,
2485 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2486 block_s, block_s, pix_op,
2487 s->mv[dir][i][0], s->mv[dir][i][1]);
2489 mx += s->mv[dir][i][0];
2490 my += s->mv[dir][i][1];
2493 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2494 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2498 if (s->picture_structure == PICT_FRAME) {
2500 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2501 1, 0, s->field_select[dir][0],
2502 ref_picture, pix_op,
2503 s->mv[dir][0][0], s->mv[dir][0][1],
2506 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2507 1, 1, s->field_select[dir][1],
2508 ref_picture, pix_op,
2509 s->mv[dir][1][0], s->mv[dir][1][1],
2512 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2513 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2514 ref_picture = s->current_picture_ptr->f.data;
2517 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2518 0, 0, s->field_select[dir][0],
2519 ref_picture, pix_op,
2521 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2525 for (i = 0; i < 2; i++) {
2526 uint8_t **ref2picture;
2528 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2529 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2530 ref2picture = ref_picture;
2532 ref2picture = s->current_picture_ptr->f.data;
2535 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2536 0, 0, s->field_select[dir][i],
2537 ref2picture, pix_op,
2538 s->mv[dir][i][0], s->mv[dir][i][1] +
2539 2 * block_s * i, block_s, mb_y >> 1);
2541 dest_y += 2 * block_s * s->linesize;
2542 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2543 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2547 if (s->picture_structure == PICT_FRAME) {
2548 for (i = 0; i < 2; i++) {
2550 for (j = 0; j < 2; j++) {
2551 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2553 ref_picture, pix_op,
2554 s->mv[dir][2 * i + j][0],
2555 s->mv[dir][2 * i + j][1],
2558 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2561 for (i = 0; i < 2; i++) {
2562 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2563 0, 0, s->picture_structure != i + 1,
2564 ref_picture, pix_op,
2565 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2566 2 * block_s, mb_y >> 1);
2568 // after put we make avg of the same block
2569 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2571 // opposite parity is always in the same
2572 // frame if this is second field
2573 if (!s->first_field) {
2574 ref_picture = s->current_picture_ptr->f.data;
2585 * find the lowest MB row referenced in the MVs
2587 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2589 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2590 int my, off, i, mvs;
2592 if (s->picture_structure != PICT_FRAME || s->mcsel)
2595 switch (s->mv_type) {
2609 for (i = 0; i < mvs; i++) {
2610 my = s->mv[dir][i][1]<<qpel_shift;
2611 my_max = FFMAX(my_max, my);
2612 my_min = FFMIN(my_min, my);
2615 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2617 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2619 return s->mb_height-1;
2622 /* put block[] to dest[] */
2623 static inline void put_dct(MpegEncContext *s,
2624 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2626 s->dct_unquantize_intra(s, block, i, qscale);
2627 s->dsp.idct_put (dest, line_size, block);
2630 /* add block[] to dest[] */
2631 static inline void add_dct(MpegEncContext *s,
2632 int16_t *block, int i, uint8_t *dest, int line_size)
2634 if (s->block_last_index[i] >= 0) {
2635 s->dsp.idct_add (dest, line_size, block);
2639 static inline void add_dequant_dct(MpegEncContext *s,
2640 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2642 if (s->block_last_index[i] >= 0) {
2643 s->dct_unquantize_inter(s, block, i, qscale);
2645 s->dsp.idct_add (dest, line_size, block);
2650 * Clean dc, ac, coded_block for the current non-intra MB.
2652 void ff_clean_intra_table_entries(MpegEncContext *s)
2654 int wrap = s->b8_stride;
2655 int xy = s->block_index[0];
2658 s->dc_val[0][xy + 1 ] =
2659 s->dc_val[0][xy + wrap] =
2660 s->dc_val[0][xy + 1 + wrap] = 1024;
2662 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2663 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2664 if (s->msmpeg4_version>=3) {
2665 s->coded_block[xy ] =
2666 s->coded_block[xy + 1 ] =
2667 s->coded_block[xy + wrap] =
2668 s->coded_block[xy + 1 + wrap] = 0;
2671 wrap = s->mb_stride;
2672 xy = s->mb_x + s->mb_y * wrap;
2674 s->dc_val[2][xy] = 1024;
2676 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2677 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2679 s->mbintra_table[xy]= 0;
2682 /* generic function called after a macroblock has been parsed by the
2683 decoder or after it has been encoded by the encoder.
2685 Important variables used:
2686 s->mb_intra : true if intra macroblock
2687 s->mv_dir : motion vector direction
2688 s->mv_type : motion vector type
2689 s->mv : motion vector
2690 s->interlaced_dct : true if interlaced dct used (mpeg2)
2692 static av_always_inline
2693 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2694 int lowres_flag, int is_mpeg12)
2696 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2699 FF_DISABLE_DEPRECATION_WARNINGS
2700 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2701 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2704 FF_ENABLE_DEPRECATION_WARNINGS
2705 #endif /* FF_API_XVMC */
2707 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2708 /* print DCT coefficients */
2710 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2712 for(j=0; j<64; j++){
2713 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2715 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2719 s->current_picture.qscale_table[mb_xy] = s->qscale;
2721 /* update DC predictors for P macroblocks */
2723 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2724 if(s->mbintra_table[mb_xy])
2725 ff_clean_intra_table_entries(s);
2729 s->last_dc[2] = 128 << s->intra_dc_precision;
2732 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2733 s->mbintra_table[mb_xy]=1;
2735 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2736 uint8_t *dest_y, *dest_cb, *dest_cr;
2737 int dct_linesize, dct_offset;
2738 op_pixels_func (*op_pix)[4];
2739 qpel_mc_func (*op_qpix)[16];
2740 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2741 const int uvlinesize = s->current_picture.f.linesize[1];
2742 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2743 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2745 /* avoid copy if macroblock skipped in last frame too */
2746 /* skip only during decoding as we might trash the buffers during encoding a bit */
2748 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2750 if (s->mb_skipped) {
2752 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2754 } else if(!s->current_picture.reference) {
2757 *mbskip_ptr = 0; /* not skipped */
2761 dct_linesize = linesize << s->interlaced_dct;
2762 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2766 dest_cb= s->dest[1];
2767 dest_cr= s->dest[2];
2769 dest_y = s->b_scratchpad;
2770 dest_cb= s->b_scratchpad+16*linesize;
2771 dest_cr= s->b_scratchpad+32*linesize;
2775 /* motion handling */
2776 /* decoding or more than one mb_type (MC was already done otherwise) */
2779 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2780 if (s->mv_dir & MV_DIR_FORWARD) {
2781 ff_thread_await_progress(&s->last_picture_ptr->tf,
2782 ff_MPV_lowest_referenced_row(s, 0),
2785 if (s->mv_dir & MV_DIR_BACKWARD) {
2786 ff_thread_await_progress(&s->next_picture_ptr->tf,
2787 ff_MPV_lowest_referenced_row(s, 1),
2793 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2795 if (s->mv_dir & MV_DIR_FORWARD) {
2796 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2797 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2799 if (s->mv_dir & MV_DIR_BACKWARD) {
2800 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2803 op_qpix = s->me.qpel_put;
2804 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2805 op_pix = s->hdsp.put_pixels_tab;
2807 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2809 if (s->mv_dir & MV_DIR_FORWARD) {
2810 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2811 op_pix = s->hdsp.avg_pixels_tab;
2812 op_qpix= s->me.qpel_avg;
2814 if (s->mv_dir & MV_DIR_BACKWARD) {
2815 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2820 /* skip dequant / idct if we are really late ;) */
2821 if(s->avctx->skip_idct){
2822 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2823 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2824 || s->avctx->skip_idct >= AVDISCARD_ALL)
2828 /* add dct residue */
2829 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2830 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2831 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2832 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2833 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2834 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2836 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2837 if (s->chroma_y_shift){
2838 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2839 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2843 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2844 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2845 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2846 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2849 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2850 add_dct(s, block[0], 0, dest_y , dct_linesize);
2851 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2852 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2853 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2855 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2856 if(s->chroma_y_shift){//Chroma420
2857 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2858 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2861 dct_linesize = uvlinesize << s->interlaced_dct;
2862 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2864 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2865 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2866 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2867 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2868 if(!s->chroma_x_shift){//Chroma444
2869 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2870 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2871 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2872 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2877 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2878 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2881 /* dct only in intra block */
2882 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2883 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2884 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2885 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2886 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2888 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2889 if(s->chroma_y_shift){
2890 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2891 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2895 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2896 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2897 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2898 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2902 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2903 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2904 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2905 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2907 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2908 if(s->chroma_y_shift){
2909 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2910 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2913 dct_linesize = uvlinesize << s->interlaced_dct;
2914 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2916 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2917 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2918 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2919 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2920 if(!s->chroma_x_shift){//Chroma444
2921 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2922 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2923 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2924 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2932 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2933 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2934 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2939 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2941 if(s->out_format == FMT_MPEG1) {
2942 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2943 else MPV_decode_mb_internal(s, block, 0, 1);
2946 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2947 else MPV_decode_mb_internal(s, block, 0, 0);
2951 * @param h is the normal height, this will be reduced automatically if needed for the last row
2953 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2954 Picture *last, int y, int h, int picture_structure,
2955 int first_field, int draw_edges, int low_delay,
2956 int v_edge_pos, int h_edge_pos)
2958 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2959 int hshift = desc->log2_chroma_w;
2960 int vshift = desc->log2_chroma_h;
2961 const int field_pic = picture_structure != PICT_FRAME;
2967 if (!avctx->hwaccel &&
2968 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2971 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2972 int *linesize = cur->f.linesize;
2973 int sides = 0, edge_h;
2974 if (y==0) sides |= EDGE_TOP;
2975 if (y + h >= v_edge_pos)
2976 sides |= EDGE_BOTTOM;
2978 edge_h= FFMIN(h, v_edge_pos - y);
2980 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2981 linesize[0], h_edge_pos, edge_h,
2982 EDGE_WIDTH, EDGE_WIDTH, sides);
2983 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2984 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2985 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2986 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2987 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2988 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2991 h = FFMIN(h, avctx->height - y);
2993 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2995 if (avctx->draw_horiz_band) {
2997 int offset[AV_NUM_DATA_POINTERS];
3000 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
3001 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
3008 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
3009 picture_structure == PICT_FRAME &&
3010 avctx->codec_id != AV_CODEC_ID_SVQ3) {
3011 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
3014 offset[0]= y * src->linesize[0];
3016 offset[2]= (y >> vshift) * src->linesize[1];
3017 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
3023 avctx->draw_horiz_band(avctx, src, offset,
3024 y, picture_structure, h);
3028 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3030 int draw_edges = s->unrestricted_mv && !s->intra_only;
3031 ff_draw_horiz_band(s->avctx, &s->dsp, s->current_picture_ptr,
3032 s->last_picture_ptr, y, h, s->picture_structure,
3033 s->first_field, draw_edges, s->low_delay,
3034 s->v_edge_pos, s->h_edge_pos);
3037 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3038 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3039 const int uvlinesize = s->current_picture.f.linesize[1];
3040 const int mb_size= 4 - s->avctx->lowres;
3042 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3043 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3044 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3045 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3046 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3047 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3048 //block_index is not used by mpeg2, so it is not affected by chroma_format
3050 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3051 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3052 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3054 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3056 if(s->picture_structure==PICT_FRAME){
3057 s->dest[0] += s->mb_y * linesize << mb_size;
3058 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3059 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3061 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3062 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3063 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3064 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3070 * Permute an 8x8 block.
3071 * @param block the block which will be permuted according to the given permutation vector
3072 * @param permutation the permutation vector
3073 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3074 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3075 * (inverse) permutated to scantable order!
3077 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3083 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3085 for(i=0; i<=last; i++){
3086 const int j= scantable[i];
3091 for(i=0; i<=last; i++){
3092 const int j= scantable[i];
3093 const int perm_j= permutation[j];
3094 block[perm_j]= temp[j];
3098 void ff_mpeg_flush(AVCodecContext *avctx){
3100 MpegEncContext *s = avctx->priv_data;
3102 if(s==NULL || s->picture==NULL)
3105 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3106 ff_mpeg_unref_picture(s, &s->picture[i]);
3107 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3109 ff_mpeg_unref_picture(s, &s->current_picture);
3110 ff_mpeg_unref_picture(s, &s->last_picture);
3111 ff_mpeg_unref_picture(s, &s->next_picture);
3113 s->mb_x= s->mb_y= 0;
3116 s->parse_context.state= -1;
3117 s->parse_context.frame_start_found= 0;
3118 s->parse_context.overread= 0;
3119 s->parse_context.overread_index= 0;
3120 s->parse_context.index= 0;
3121 s->parse_context.last_index= 0;
3122 s->bitstream_buffer_size=0;
3126 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3127 int16_t *block, int n, int qscale)
3129 int i, level, nCoeffs;
3130 const uint16_t *quant_matrix;
3132 nCoeffs= s->block_last_index[n];
3134 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3135 /* XXX: only mpeg1 */
3136 quant_matrix = s->intra_matrix;
3137 for(i=1;i<=nCoeffs;i++) {
3138 int j= s->intra_scantable.permutated[i];
3143 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3144 level = (level - 1) | 1;
3147 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3148 level = (level - 1) | 1;
3155 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3156 int16_t *block, int n, int qscale)
3158 int i, level, nCoeffs;
3159 const uint16_t *quant_matrix;
3161 nCoeffs= s->block_last_index[n];
3163 quant_matrix = s->inter_matrix;
3164 for(i=0; i<=nCoeffs; i++) {
3165 int j= s->intra_scantable.permutated[i];
3170 level = (((level << 1) + 1) * qscale *
3171 ((int) (quant_matrix[j]))) >> 4;
3172 level = (level - 1) | 1;
3175 level = (((level << 1) + 1) * qscale *
3176 ((int) (quant_matrix[j]))) >> 4;
3177 level = (level - 1) | 1;
3184 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3185 int16_t *block, int n, int qscale)
3187 int i, level, nCoeffs;
3188 const uint16_t *quant_matrix;
3190 if(s->alternate_scan) nCoeffs= 63;
3191 else nCoeffs= s->block_last_index[n];
3193 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3194 quant_matrix = s->intra_matrix;
3195 for(i=1;i<=nCoeffs;i++) {
3196 int j= s->intra_scantable.permutated[i];
3201 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3204 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3211 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3212 int16_t *block, int n, int qscale)
3214 int i, level, nCoeffs;
3215 const uint16_t *quant_matrix;
3218 if(s->alternate_scan) nCoeffs= 63;
3219 else nCoeffs= s->block_last_index[n];
3221 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3223 quant_matrix = s->intra_matrix;
3224 for(i=1;i<=nCoeffs;i++) {
3225 int j= s->intra_scantable.permutated[i];
3230 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3233 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3242 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3243 int16_t *block, int n, int qscale)
3245 int i, level, nCoeffs;
3246 const uint16_t *quant_matrix;
3249 if(s->alternate_scan) nCoeffs= 63;
3250 else nCoeffs= s->block_last_index[n];
3252 quant_matrix = s->inter_matrix;
3253 for(i=0; i<=nCoeffs; i++) {
3254 int j= s->intra_scantable.permutated[i];
3259 level = (((level << 1) + 1) * qscale *
3260 ((int) (quant_matrix[j]))) >> 4;
3263 level = (((level << 1) + 1) * qscale *
3264 ((int) (quant_matrix[j]))) >> 4;
3273 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3274 int16_t *block, int n, int qscale)
3276 int i, level, qmul, qadd;
3279 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
3284 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3285 qadd = (qscale - 1) | 1;
3292 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3294 for(i=1; i<=nCoeffs; i++) {
3298 level = level * qmul - qadd;
3300 level = level * qmul + qadd;
3307 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3308 int16_t *block, int n, int qscale)
3310 int i, level, qmul, qadd;
3313 av_assert2(s->block_last_index[n]>=0);
3315 qadd = (qscale - 1) | 1;
3318 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3320 for(i=0; i<=nCoeffs; i++) {
3324 level = level * qmul - qadd;
3326 level = level * qmul + qadd;
3334 * set qscale and update qscale dependent variables.
3336 void ff_set_qscale(MpegEncContext * s, int qscale)
3340 else if (qscale > 31)
3344 s->chroma_qscale= s->chroma_qscale_table[qscale];
3346 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3347 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3350 void ff_MPV_report_decode_progress(MpegEncContext *s)
3352 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3353 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3356 #if CONFIG_ERROR_RESILIENCE
3357 void ff_mpeg_er_frame_start(MpegEncContext *s)
3359 ERContext *er = &s->er;
3361 er->cur_pic = s->current_picture_ptr;
3362 er->last_pic = s->last_picture_ptr;
3363 er->next_pic = s->next_picture_ptr;
3365 er->pp_time = s->pp_time;
3366 er->pb_time = s->pb_time;
3367 er->quarter_sample = s->quarter_sample;
3368 er->partitioned_frame = s->partitioned_frame;
3370 ff_er_frame_start(er);
3372 #endif /* CONFIG_ERROR_RESILIENCE */