2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
36 #include "h264chroma.h"
39 #include "mpegvideo.h"
42 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 int16_t *block, int n, int qscale);
61 static const uint8_t ff_default_chroma_qscale_table[32] = {
62 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
63 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
64 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
67 const uint8_t ff_mpeg1_dc_scale_table[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 static const uint8_t mpeg2_dc_scale_table1[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 static const uint8_t mpeg2_dc_scale_table2[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 static const uint8_t mpeg2_dc_scale_table3[128] = {
104 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
116 ff_mpeg1_dc_scale_table,
117 mpeg2_dc_scale_table1,
118 mpeg2_dc_scale_table2,
119 mpeg2_dc_scale_table3,
122 const enum AVPixelFormat ff_pixfmt_list_420[] = {
127 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
129 int mb_x, int mb_y, int mb_intra, int mb_skipped)
131 MpegEncContext *s = opaque;
134 s->mv_type = mv_type;
135 s->mb_intra = mb_intra;
136 s->mb_skipped = mb_skipped;
139 memcpy(s->mv, mv, sizeof(*mv));
141 ff_init_block_index(s);
142 ff_update_block_index(s);
144 s->dsp.clear_blocks(s->block[0]);
146 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
147 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
148 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
151 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
152 ff_MPV_decode_mb(s, s->block);
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 ff_dsputil_init(&s->dsp, s->avctx);
159 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
160 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
161 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
163 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
164 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
165 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
166 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
167 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
168 if (s->flags & CODEC_FLAG_BITEXACT)
169 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
170 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
173 ff_MPV_common_init_axp(s);
175 ff_MPV_common_init_arm(s);
177 ff_MPV_common_init_bfin(s);
179 ff_MPV_common_init_ppc(s);
181 ff_MPV_common_init_x86(s);
183 /* load & permutate scantables
184 * note: only wmv uses different ones
186 if (s->alternate_scan) {
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
201 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
203 // edge emu needs blocksize + filter length - 1
204 // (= 17x17 for halfpel / 21x21 for h264)
205 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
206 // at uvlinesize. It supports only YUV420 so 24x24 is enough
207 // linesize * interlaced * MBsize
208 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
211 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
213 s->me.temp = s->me.scratchpad;
214 s->rd_scratchpad = s->me.scratchpad;
215 s->b_scratchpad = s->me.scratchpad;
216 s->obmc_scratchpad = s->me.scratchpad + 16;
220 av_freep(&s->edge_emu_buffer);
221 return AVERROR(ENOMEM);
225 * Allocate a frame buffer
227 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
232 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
233 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
234 s->codec_id != AV_CODEC_ID_MSS2)
235 r = ff_thread_get_buffer(s->avctx, &pic->tf,
236 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
238 pic->f.width = s->avctx->width;
239 pic->f.height = s->avctx->height;
240 pic->f.format = s->avctx->pix_fmt;
241 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
244 if (r < 0 || !pic->f.buf[0]) {
245 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
250 if (s->avctx->hwaccel) {
251 assert(!pic->hwaccel_picture_private);
252 if (s->avctx->hwaccel->priv_data_size) {
253 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
254 if (!pic->hwaccel_priv_buf) {
255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
258 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
262 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
263 s->uvlinesize != pic->f.linesize[1])) {
264 av_log(s->avctx, AV_LOG_ERROR,
265 "get_buffer() failed (stride changed)\n");
266 ff_mpeg_unref_picture(s, pic);
270 if (pic->f.linesize[1] != pic->f.linesize[2]) {
271 av_log(s->avctx, AV_LOG_ERROR,
272 "get_buffer() failed (uv stride mismatch)\n");
273 ff_mpeg_unref_picture(s, pic);
277 if (!s->edge_emu_buffer &&
278 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
279 av_log(s->avctx, AV_LOG_ERROR,
280 "get_buffer() failed to allocate context scratch buffers.\n");
281 ff_mpeg_unref_picture(s, pic);
288 static void free_picture_tables(Picture *pic)
292 pic->alloc_mb_width =
293 pic->alloc_mb_height = 0;
295 av_buffer_unref(&pic->mb_var_buf);
296 av_buffer_unref(&pic->mc_mb_var_buf);
297 av_buffer_unref(&pic->mb_mean_buf);
298 av_buffer_unref(&pic->mbskip_table_buf);
299 av_buffer_unref(&pic->qscale_table_buf);
300 av_buffer_unref(&pic->mb_type_buf);
302 for (i = 0; i < 2; i++) {
303 av_buffer_unref(&pic->motion_val_buf[i]);
304 av_buffer_unref(&pic->ref_index_buf[i]);
308 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
310 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
311 const int mb_array_size = s->mb_stride * s->mb_height;
312 const int b8_array_size = s->b8_stride * s->mb_height * 2;
316 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
317 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
318 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
320 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
321 return AVERROR(ENOMEM);
324 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
326 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
327 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
328 return AVERROR(ENOMEM);
331 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
332 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
333 int ref_index_size = 4 * mb_array_size;
335 for (i = 0; mv_size && i < 2; i++) {
336 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
338 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339 return AVERROR(ENOMEM);
343 pic->alloc_mb_width = s->mb_width;
344 pic->alloc_mb_height = s->mb_height;
349 static int make_tables_writable(Picture *pic)
352 #define MAKE_WRITABLE(table) \
355 (ret = av_buffer_make_writable(&pic->table)) < 0)\
359 MAKE_WRITABLE(mb_var_buf);
360 MAKE_WRITABLE(mc_mb_var_buf);
361 MAKE_WRITABLE(mb_mean_buf);
362 MAKE_WRITABLE(mbskip_table_buf);
363 MAKE_WRITABLE(qscale_table_buf);
364 MAKE_WRITABLE(mb_type_buf);
366 for (i = 0; i < 2; i++) {
367 MAKE_WRITABLE(motion_val_buf[i]);
368 MAKE_WRITABLE(ref_index_buf[i]);
375 * Allocate a Picture.
376 * The pixels are allocated/set by calling get_buffer() if shared = 0
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
382 if (pic->qscale_table_buf)
383 if ( pic->alloc_mb_width != s->mb_width
384 || pic->alloc_mb_height != s->mb_height)
385 free_picture_tables(pic);
388 av_assert0(pic->f.data[0]);
391 av_assert0(!pic->f.buf[0]);
393 if (alloc_frame_buffer(s, pic) < 0)
396 s->linesize = pic->f.linesize[0];
397 s->uvlinesize = pic->f.linesize[1];
400 if (!pic->qscale_table_buf)
401 ret = alloc_picture_tables(s, pic);
403 ret = make_tables_writable(pic);
408 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
409 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410 pic->mb_mean = pic->mb_mean_buf->data;
413 pic->mbskip_table = pic->mbskip_table_buf->data;
414 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
417 if (pic->motion_val_buf[0]) {
418 for (i = 0; i < 2; i++) {
419 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420 pic->ref_index[i] = pic->ref_index_buf[i]->data;
426 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427 ff_mpeg_unref_picture(s, pic);
428 free_picture_tables(pic);
429 return AVERROR(ENOMEM);
433 * Deallocate a picture.
435 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
437 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
440 /* WM Image / Screen codecs allocate internal buffers with different
441 * dimensions / colorspaces; ignore user-defined callbacks for these. */
442 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
443 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
444 s->codec_id != AV_CODEC_ID_MSS2)
445 ff_thread_release_buffer(s->avctx, &pic->tf);
447 av_frame_unref(&pic->f);
449 av_buffer_unref(&pic->hwaccel_priv_buf);
451 if (pic->needs_realloc)
452 free_picture_tables(pic);
454 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
457 static int update_picture_tables(Picture *dst, Picture *src)
461 #define UPDATE_TABLE(table)\
464 (!dst->table || dst->table->buffer != src->table->buffer)) {\
465 av_buffer_unref(&dst->table);\
466 dst->table = av_buffer_ref(src->table);\
468 free_picture_tables(dst);\
469 return AVERROR(ENOMEM);\
474 UPDATE_TABLE(mb_var_buf);
475 UPDATE_TABLE(mc_mb_var_buf);
476 UPDATE_TABLE(mb_mean_buf);
477 UPDATE_TABLE(mbskip_table_buf);
478 UPDATE_TABLE(qscale_table_buf);
479 UPDATE_TABLE(mb_type_buf);
480 for (i = 0; i < 2; i++) {
481 UPDATE_TABLE(motion_val_buf[i]);
482 UPDATE_TABLE(ref_index_buf[i]);
485 dst->mb_var = src->mb_var;
486 dst->mc_mb_var = src->mc_mb_var;
487 dst->mb_mean = src->mb_mean;
488 dst->mbskip_table = src->mbskip_table;
489 dst->qscale_table = src->qscale_table;
490 dst->mb_type = src->mb_type;
491 for (i = 0; i < 2; i++) {
492 dst->motion_val[i] = src->motion_val[i];
493 dst->ref_index[i] = src->ref_index[i];
496 dst->alloc_mb_width = src->alloc_mb_width;
497 dst->alloc_mb_height = src->alloc_mb_height;
502 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
506 av_assert0(!dst->f.buf[0]);
507 av_assert0(src->f.buf[0]);
511 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
515 ret = update_picture_tables(dst, src);
519 if (src->hwaccel_picture_private) {
520 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
521 if (!dst->hwaccel_priv_buf)
523 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
526 dst->field_picture = src->field_picture;
527 dst->mb_var_sum = src->mb_var_sum;
528 dst->mc_mb_var_sum = src->mc_mb_var_sum;
529 dst->b_frame_score = src->b_frame_score;
530 dst->needs_realloc = src->needs_realloc;
531 dst->reference = src->reference;
532 dst->shared = src->shared;
536 ff_mpeg_unref_picture(s, dst);
540 static void exchange_uv(MpegEncContext *s)
545 s->pblocks[4] = s->pblocks[5];
549 static int init_duplicate_context(MpegEncContext *s)
551 int y_size = s->b8_stride * (2 * s->mb_height + 1);
552 int c_size = s->mb_stride * (s->mb_height + 1);
553 int yc_size = y_size + 2 * c_size;
561 s->obmc_scratchpad = NULL;
564 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
565 ME_MAP_SIZE * sizeof(uint32_t), fail)
566 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
567 ME_MAP_SIZE * sizeof(uint32_t), fail)
568 if (s->avctx->noise_reduction) {
569 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
570 2 * 64 * sizeof(int), fail)
573 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
574 s->block = s->blocks[0];
576 for (i = 0; i < 12; i++) {
577 s->pblocks[i] = &s->block[i];
579 if (s->avctx->codec_tag == AV_RL32("VCR2"))
582 if (s->out_format == FMT_H263) {
584 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
585 yc_size * sizeof(int16_t) * 16, fail);
586 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
587 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
588 s->ac_val[2] = s->ac_val[1] + c_size;
593 return -1; // free() through ff_MPV_common_end()
596 static void free_duplicate_context(MpegEncContext *s)
601 av_freep(&s->edge_emu_buffer);
602 av_freep(&s->me.scratchpad);
606 s->obmc_scratchpad = NULL;
608 av_freep(&s->dct_error_sum);
609 av_freep(&s->me.map);
610 av_freep(&s->me.score_map);
611 av_freep(&s->blocks);
612 av_freep(&s->ac_val_base);
616 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
618 #define COPY(a) bak->a = src->a
619 COPY(edge_emu_buffer);
624 COPY(obmc_scratchpad);
631 COPY(me.map_generation);
643 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
647 // FIXME copy only needed parts
649 backup_duplicate_context(&bak, dst);
650 memcpy(dst, src, sizeof(MpegEncContext));
651 backup_duplicate_context(dst, &bak);
652 for (i = 0; i < 12; i++) {
653 dst->pblocks[i] = &dst->block[i];
655 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
657 if (!dst->edge_emu_buffer &&
658 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
659 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
660 "scratch buffers.\n");
663 // STOP_TIMER("update_duplicate_context")
664 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
668 int ff_mpeg_update_thread_context(AVCodecContext *dst,
669 const AVCodecContext *src)
672 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
679 // FIXME can parameters change on I-frames?
680 // in that case dst may need a reinit
681 if (!s->context_initialized) {
682 memcpy(s, s1, sizeof(MpegEncContext));
685 s->bitstream_buffer = NULL;
686 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
688 if (s1->context_initialized){
689 // s->picture_range_start += MAX_PICTURE_COUNT;
690 // s->picture_range_end += MAX_PICTURE_COUNT;
691 if((ret = ff_MPV_common_init(s)) < 0){
692 memset(s, 0, sizeof(MpegEncContext));
699 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
700 s->context_reinit = 0;
701 s->height = s1->height;
702 s->width = s1->width;
703 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
707 s->avctx->coded_height = s1->avctx->coded_height;
708 s->avctx->coded_width = s1->avctx->coded_width;
709 s->avctx->width = s1->avctx->width;
710 s->avctx->height = s1->avctx->height;
712 s->coded_picture_number = s1->coded_picture_number;
713 s->picture_number = s1->picture_number;
714 s->input_picture_number = s1->input_picture_number;
716 av_assert0(!s->picture || s->picture != s1->picture);
718 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
719 ff_mpeg_unref_picture(s, &s->picture[i]);
720 if (s1->picture[i].f.buf[0] &&
721 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
725 #define UPDATE_PICTURE(pic)\
727 ff_mpeg_unref_picture(s, &s->pic);\
728 if (s1->pic.f.buf[0])\
729 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
731 ret = update_picture_tables(&s->pic, &s1->pic);\
736 UPDATE_PICTURE(current_picture);
737 UPDATE_PICTURE(last_picture);
738 UPDATE_PICTURE(next_picture);
740 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
741 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
742 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
744 // Error/bug resilience
745 s->next_p_frame_damaged = s1->next_p_frame_damaged;
746 s->workaround_bugs = s1->workaround_bugs;
747 s->padding_bug_score = s1->padding_bug_score;
750 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
751 (char *) &s1->shape - (char *) &s1->time_increment_bits);
754 s->max_b_frames = s1->max_b_frames;
755 s->low_delay = s1->low_delay;
756 s->droppable = s1->droppable;
758 // DivX handling (doesn't work)
759 s->divx_packed = s1->divx_packed;
761 if (s1->bitstream_buffer) {
762 if (s1->bitstream_buffer_size +
763 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
764 av_fast_malloc(&s->bitstream_buffer,
765 &s->allocated_bitstream_buffer_size,
766 s1->allocated_bitstream_buffer_size);
767 s->bitstream_buffer_size = s1->bitstream_buffer_size;
768 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
769 s1->bitstream_buffer_size);
770 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
771 FF_INPUT_BUFFER_PADDING_SIZE);
774 // linesize dependend scratch buffer allocation
775 if (!s->edge_emu_buffer)
777 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
778 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
779 "scratch buffers.\n");
780 return AVERROR(ENOMEM);
783 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
784 "be allocated due to unknown size.\n");
787 // MPEG2/interlacing info
788 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
789 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
791 if (!s1->first_field) {
792 s->last_pict_type = s1->pict_type;
793 if (s1->current_picture_ptr)
794 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
796 if (s1->pict_type != AV_PICTURE_TYPE_B) {
797 s->last_non_b_pict_type = s1->pict_type;
805 * Set the given MpegEncContext to common defaults
806 * (same for encoding and decoding).
807 * The changed fields will not depend upon the
808 * prior state of the MpegEncContext.
810 void ff_MPV_common_defaults(MpegEncContext *s)
812 s->y_dc_scale_table =
813 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
814 s->chroma_qscale_table = ff_default_chroma_qscale_table;
815 s->progressive_frame = 1;
816 s->progressive_sequence = 1;
817 s->picture_structure = PICT_FRAME;
819 s->coded_picture_number = 0;
820 s->picture_number = 0;
821 s->input_picture_number = 0;
823 s->picture_in_gop_number = 0;
828 s->slice_context_count = 1;
832 * Set the given MpegEncContext to defaults for decoding.
833 * the changed fields will not depend upon
834 * the prior state of the MpegEncContext.
836 void ff_MPV_decode_defaults(MpegEncContext *s)
838 ff_MPV_common_defaults(s);
841 static int init_er(MpegEncContext *s)
843 ERContext *er = &s->er;
844 int mb_array_size = s->mb_height * s->mb_stride;
847 er->avctx = s->avctx;
850 er->mb_index2xy = s->mb_index2xy;
851 er->mb_num = s->mb_num;
852 er->mb_width = s->mb_width;
853 er->mb_height = s->mb_height;
854 er->mb_stride = s->mb_stride;
855 er->b8_stride = s->b8_stride;
857 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
858 er->error_status_table = av_mallocz(mb_array_size);
859 if (!er->er_temp_buffer || !er->error_status_table)
862 er->mbskip_table = s->mbskip_table;
863 er->mbintra_table = s->mbintra_table;
865 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
866 er->dc_val[i] = s->dc_val[i];
868 er->decode_mb = mpeg_er_decode_mb;
873 av_freep(&er->er_temp_buffer);
874 av_freep(&er->error_status_table);
875 return AVERROR(ENOMEM);
879 * Initialize and allocates MpegEncContext fields dependent on the resolution.
881 static int init_context_frame(MpegEncContext *s)
883 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
885 s->mb_width = (s->width + 15) / 16;
886 s->mb_stride = s->mb_width + 1;
887 s->b8_stride = s->mb_width * 2 + 1;
888 s->b4_stride = s->mb_width * 4 + 1;
889 mb_array_size = s->mb_height * s->mb_stride;
890 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
892 /* set default edge pos, will be overriden
893 * in decode_header if needed */
894 s->h_edge_pos = s->mb_width * 16;
895 s->v_edge_pos = s->mb_height * 16;
897 s->mb_num = s->mb_width * s->mb_height;
902 s->block_wrap[3] = s->b8_stride;
904 s->block_wrap[5] = s->mb_stride;
906 y_size = s->b8_stride * (2 * s->mb_height + 1);
907 c_size = s->mb_stride * (s->mb_height + 1);
908 yc_size = y_size + 2 * c_size;
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
911 for (y = 0; y < s->mb_height; y++)
912 for (x = 0; x < s->mb_width; x++)
913 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
915 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
918 /* Allocate MV tables */
919 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
920 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
921 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
922 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
923 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
924 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
925 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
926 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
927 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
928 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
929 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
930 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
932 /* Allocate MB type table */
933 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
935 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
937 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
938 mb_array_size * sizeof(float), fail);
939 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
940 mb_array_size * sizeof(float), fail);
944 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
945 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
946 /* interlaced direct mode decoding tables */
947 for (i = 0; i < 2; i++) {
949 for (j = 0; j < 2; j++) {
950 for (k = 0; k < 2; k++) {
951 FF_ALLOCZ_OR_GOTO(s->avctx,
952 s->b_field_mv_table_base[i][j][k],
953 mv_table_size * 2 * sizeof(int16_t),
955 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
958 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
959 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
960 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
962 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
965 if (s->out_format == FMT_H263) {
967 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
968 s->coded_block = s->coded_block_base + s->b8_stride + 1;
970 /* cbp, ac_pred, pred_dir */
971 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
972 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
975 if (s->h263_pred || s->h263_plus || !s->encoding) {
977 // MN: we need these for error resilience of intra-frames
978 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
979 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
980 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
981 s->dc_val[2] = s->dc_val[1] + c_size;
982 for (i = 0; i < yc_size; i++)
983 s->dc_val_base[i] = 1024;
986 /* which mb is a intra block */
987 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
988 memset(s->mbintra_table, 1, mb_array_size);
990 /* init macroblock skip table */
991 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
992 // Note the + 1 is for a quicker mpeg4 slice_end detection
996 return AVERROR(ENOMEM);
1000 * init common structure for both encoder and decoder.
1001 * this assumes that some variables like width/height are already set
1003 av_cold int ff_MPV_common_init(MpegEncContext *s)
1006 int nb_slices = (HAVE_THREADS &&
1007 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1008 s->avctx->thread_count : 1;
1010 if (s->encoding && s->avctx->slices)
1011 nb_slices = s->avctx->slices;
1013 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1014 s->mb_height = (s->height + 31) / 32 * 2;
1016 s->mb_height = (s->height + 15) / 16;
1018 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1019 av_log(s->avctx, AV_LOG_ERROR,
1020 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1024 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1027 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1029 max_slices = MAX_THREADS;
1030 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1031 " reducing to %d\n", nb_slices, max_slices);
1032 nb_slices = max_slices;
1035 if ((s->width || s->height) &&
1036 av_image_check_size(s->width, s->height, 0, s->avctx))
1039 ff_dct_common_init(s);
1041 s->flags = s->avctx->flags;
1042 s->flags2 = s->avctx->flags2;
1044 /* set chroma shifts */
1045 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1047 &s->chroma_y_shift);
1049 /* convert fourcc to upper case */
1050 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1052 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1054 s->avctx->coded_frame = &s->current_picture.f;
1057 if (s->msmpeg4_version) {
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1059 2 * 2 * (MAX_LEVEL + 1) *
1060 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1062 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1064 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1065 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1066 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1067 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1068 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1069 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1070 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1071 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1073 if (s->avctx->noise_reduction) {
1074 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1078 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1079 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1080 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1081 avcodec_get_frame_defaults(&s->picture[i].f);
1083 memset(&s->next_picture, 0, sizeof(s->next_picture));
1084 memset(&s->last_picture, 0, sizeof(s->last_picture));
1085 memset(&s->current_picture, 0, sizeof(s->current_picture));
1086 avcodec_get_frame_defaults(&s->next_picture.f);
1087 avcodec_get_frame_defaults(&s->last_picture.f);
1088 avcodec_get_frame_defaults(&s->current_picture.f);
1090 if (init_context_frame(s))
1093 s->parse_context.state = -1;
1095 s->context_initialized = 1;
1096 s->thread_context[0] = s;
1098 // if (s->width && s->height) {
1099 if (nb_slices > 1) {
1100 for (i = 1; i < nb_slices; i++) {
1101 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1102 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1105 for (i = 0; i < nb_slices; i++) {
1106 if (init_duplicate_context(s->thread_context[i]) < 0)
1108 s->thread_context[i]->start_mb_y =
1109 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1110 s->thread_context[i]->end_mb_y =
1111 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1114 if (init_duplicate_context(s) < 0)
1117 s->end_mb_y = s->mb_height;
1119 s->slice_context_count = nb_slices;
1124 ff_MPV_common_end(s);
1129 * Frees and resets MpegEncContext fields depending on the resolution.
1130 * Is used during resolution changes to avoid a full reinitialization of the
1133 static int free_context_frame(MpegEncContext *s)
1137 av_freep(&s->mb_type);
1138 av_freep(&s->p_mv_table_base);
1139 av_freep(&s->b_forw_mv_table_base);
1140 av_freep(&s->b_back_mv_table_base);
1141 av_freep(&s->b_bidir_forw_mv_table_base);
1142 av_freep(&s->b_bidir_back_mv_table_base);
1143 av_freep(&s->b_direct_mv_table_base);
1144 s->p_mv_table = NULL;
1145 s->b_forw_mv_table = NULL;
1146 s->b_back_mv_table = NULL;
1147 s->b_bidir_forw_mv_table = NULL;
1148 s->b_bidir_back_mv_table = NULL;
1149 s->b_direct_mv_table = NULL;
1150 for (i = 0; i < 2; i++) {
1151 for (j = 0; j < 2; j++) {
1152 for (k = 0; k < 2; k++) {
1153 av_freep(&s->b_field_mv_table_base[i][j][k]);
1154 s->b_field_mv_table[i][j][k] = NULL;
1156 av_freep(&s->b_field_select_table[i][j]);
1157 av_freep(&s->p_field_mv_table_base[i][j]);
1158 s->p_field_mv_table[i][j] = NULL;
1160 av_freep(&s->p_field_select_table[i]);
1163 av_freep(&s->dc_val_base);
1164 av_freep(&s->coded_block_base);
1165 av_freep(&s->mbintra_table);
1166 av_freep(&s->cbp_table);
1167 av_freep(&s->pred_dir_table);
1169 av_freep(&s->mbskip_table);
1171 av_freep(&s->er.error_status_table);
1172 av_freep(&s->er.er_temp_buffer);
1173 av_freep(&s->mb_index2xy);
1174 av_freep(&s->lambda_table);
1176 av_freep(&s->cplx_tab);
1177 av_freep(&s->bits_tab);
1179 s->linesize = s->uvlinesize = 0;
1184 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1188 if (s->slice_context_count > 1) {
1189 for (i = 0; i < s->slice_context_count; i++) {
1190 free_duplicate_context(s->thread_context[i]);
1192 for (i = 1; i < s->slice_context_count; i++) {
1193 av_freep(&s->thread_context[i]);
1196 free_duplicate_context(s);
1198 if ((err = free_context_frame(s)) < 0)
1202 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1203 s->picture[i].needs_realloc = 1;
1206 s->last_picture_ptr =
1207 s->next_picture_ptr =
1208 s->current_picture_ptr = NULL;
1211 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1212 s->mb_height = (s->height + 31) / 32 * 2;
1214 s->mb_height = (s->height + 15) / 16;
1216 if ((s->width || s->height) &&
1217 av_image_check_size(s->width, s->height, 0, s->avctx))
1218 return AVERROR_INVALIDDATA;
1220 if ((err = init_context_frame(s)))
1223 s->thread_context[0] = s;
1225 if (s->width && s->height) {
1226 int nb_slices = s->slice_context_count;
1227 if (nb_slices > 1) {
1228 for (i = 1; i < nb_slices; i++) {
1229 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1230 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1233 for (i = 0; i < nb_slices; i++) {
1234 if (init_duplicate_context(s->thread_context[i]) < 0)
1236 s->thread_context[i]->start_mb_y =
1237 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1238 s->thread_context[i]->end_mb_y =
1239 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1242 err = init_duplicate_context(s);
1246 s->end_mb_y = s->mb_height;
1248 s->slice_context_count = nb_slices;
1253 ff_MPV_common_end(s);
1257 /* init common structure for both encoder and decoder */
1258 void ff_MPV_common_end(MpegEncContext *s)
1262 if (s->slice_context_count > 1) {
1263 for (i = 0; i < s->slice_context_count; i++) {
1264 free_duplicate_context(s->thread_context[i]);
1266 for (i = 1; i < s->slice_context_count; i++) {
1267 av_freep(&s->thread_context[i]);
1269 s->slice_context_count = 1;
1270 } else free_duplicate_context(s);
1272 av_freep(&s->parse_context.buffer);
1273 s->parse_context.buffer_size = 0;
1275 av_freep(&s->bitstream_buffer);
1276 s->allocated_bitstream_buffer_size = 0;
1278 av_freep(&s->avctx->stats_out);
1279 av_freep(&s->ac_stats);
1281 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1282 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1283 s->q_chroma_intra_matrix= NULL;
1284 s->q_chroma_intra_matrix16= NULL;
1285 av_freep(&s->q_intra_matrix);
1286 av_freep(&s->q_inter_matrix);
1287 av_freep(&s->q_intra_matrix16);
1288 av_freep(&s->q_inter_matrix16);
1289 av_freep(&s->input_picture);
1290 av_freep(&s->reordered_input_picture);
1291 av_freep(&s->dct_offset);
1294 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1295 free_picture_tables(&s->picture[i]);
1296 ff_mpeg_unref_picture(s, &s->picture[i]);
1299 av_freep(&s->picture);
1300 free_picture_tables(&s->last_picture);
1301 ff_mpeg_unref_picture(s, &s->last_picture);
1302 free_picture_tables(&s->current_picture);
1303 ff_mpeg_unref_picture(s, &s->current_picture);
1304 free_picture_tables(&s->next_picture);
1305 ff_mpeg_unref_picture(s, &s->next_picture);
1306 free_picture_tables(&s->new_picture);
1307 ff_mpeg_unref_picture(s, &s->new_picture);
1309 free_context_frame(s);
1311 s->context_initialized = 0;
1312 s->last_picture_ptr =
1313 s->next_picture_ptr =
1314 s->current_picture_ptr = NULL;
1315 s->linesize = s->uvlinesize = 0;
1318 av_cold void ff_init_rl(RLTable *rl,
1319 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1321 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1322 uint8_t index_run[MAX_RUN + 1];
1323 int last, run, level, start, end, i;
1325 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1326 if (static_store && rl->max_level[0])
1329 /* compute max_level[], max_run[] and index_run[] */
1330 for (last = 0; last < 2; last++) {
1339 memset(max_level, 0, MAX_RUN + 1);
1340 memset(max_run, 0, MAX_LEVEL + 1);
1341 memset(index_run, rl->n, MAX_RUN + 1);
1342 for (i = start; i < end; i++) {
1343 run = rl->table_run[i];
1344 level = rl->table_level[i];
1345 if (index_run[run] == rl->n)
1347 if (level > max_level[run])
1348 max_level[run] = level;
1349 if (run > max_run[level])
1350 max_run[level] = run;
1353 rl->max_level[last] = static_store[last];
1355 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1356 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1358 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1360 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1361 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1363 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1365 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1366 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1370 av_cold void ff_init_vlc_rl(RLTable *rl)
1374 for (q = 0; q < 32; q++) {
1376 int qadd = (q - 1) | 1;
1382 for (i = 0; i < rl->vlc.table_size; i++) {
1383 int code = rl->vlc.table[i][0];
1384 int len = rl->vlc.table[i][1];
1387 if (len == 0) { // illegal code
1390 } else if (len < 0) { // more bits needed
1394 if (code == rl->n) { // esc
1398 run = rl->table_run[code] + 1;
1399 level = rl->table_level[code] * qmul + qadd;
1400 if (code >= rl->last) run += 192;
1403 rl->rl_vlc[q][i].len = len;
1404 rl->rl_vlc[q][i].level = level;
1405 rl->rl_vlc[q][i].run = run;
1410 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1414 /* release non reference frames */
1415 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1416 if (!s->picture[i].reference &&
1417 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1418 ff_mpeg_unref_picture(s, &s->picture[i]);
1423 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1425 if (pic == s->last_picture_ptr)
1427 if (pic->f.buf[0] == NULL)
1429 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1434 static int find_unused_picture(MpegEncContext *s, int shared)
1439 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1440 if (s->picture[i].f.buf[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1444 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1445 if (pic_is_unused(s, &s->picture[i]))
1450 av_log(s->avctx, AV_LOG_FATAL,
1451 "Internal error, picture buffer overflow\n");
1452 /* We could return -1, but the codec would crash trying to draw into a
1453 * non-existing frame anyway. This is safer than waiting for a random crash.
1454 * Also the return of this is never useful, an encoder must only allocate
1455 * as much as allowed in the specification. This has no relationship to how
1456 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1457 * enough for such valid streams).
1458 * Plus, a decoder has to check stream validity and remove frames if too
1459 * many reference frames are around. Waiting for "OOM" is not correct at
1460 * all. Similarly, missing reference frames have to be replaced by
1461 * interpolated/MC frames, anything else is a bug in the codec ...
1467 int ff_find_unused_picture(MpegEncContext *s, int shared)
1469 int ret = find_unused_picture(s, shared);
1471 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1472 if (s->picture[ret].needs_realloc) {
1473 s->picture[ret].needs_realloc = 0;
1474 free_picture_tables(&s->picture[ret]);
1475 ff_mpeg_unref_picture(s, &s->picture[ret]);
1476 avcodec_get_frame_defaults(&s->picture[ret].f);
1482 static void update_noise_reduction(MpegEncContext *s)
1486 for (intra = 0; intra < 2; intra++) {
1487 if (s->dct_count[intra] > (1 << 16)) {
1488 for (i = 0; i < 64; i++) {
1489 s->dct_error_sum[intra][i] >>= 1;
1491 s->dct_count[intra] >>= 1;
1494 for (i = 0; i < 64; i++) {
1495 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1496 s->dct_count[intra] +
1497 s->dct_error_sum[intra][i] / 2) /
1498 (s->dct_error_sum[intra][i] + 1);
1504 * generic function for encode/decode called after coding/decoding
1505 * the header and before a frame is coded/decoded.
1507 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1513 if (!ff_thread_can_start_frame(avctx)) {
1514 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1518 /* mark & release old frames */
1519 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1520 s->last_picture_ptr != s->next_picture_ptr &&
1521 s->last_picture_ptr->f.buf[0]) {
1522 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1525 /* release forgotten pictures */
1526 /* if (mpeg124/h263) */
1528 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1529 if (&s->picture[i] != s->last_picture_ptr &&
1530 &s->picture[i] != s->next_picture_ptr &&
1531 s->picture[i].reference && !s->picture[i].needs_realloc) {
1532 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1533 av_log(avctx, AV_LOG_ERROR,
1534 "releasing zombie picture\n");
1535 ff_mpeg_unref_picture(s, &s->picture[i]);
1540 ff_mpeg_unref_picture(s, &s->current_picture);
1543 ff_release_unused_pictures(s, 1);
1545 if (s->current_picture_ptr &&
1546 s->current_picture_ptr->f.buf[0] == NULL) {
1547 // we already have a unused image
1548 // (maybe it was set before reading the header)
1549 pic = s->current_picture_ptr;
1551 i = ff_find_unused_picture(s, 0);
1553 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1556 pic = &s->picture[i];
1560 if (!s->droppable) {
1561 if (s->pict_type != AV_PICTURE_TYPE_B)
1565 pic->f.coded_picture_number = s->coded_picture_number++;
1567 if (ff_alloc_picture(s, pic, 0) < 0)
1570 s->current_picture_ptr = pic;
1571 // FIXME use only the vars from current_pic
1572 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1573 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1574 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1575 if (s->picture_structure != PICT_FRAME)
1576 s->current_picture_ptr->f.top_field_first =
1577 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1579 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1580 !s->progressive_sequence;
1581 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1584 s->current_picture_ptr->f.pict_type = s->pict_type;
1585 // if (s->flags && CODEC_FLAG_QSCALE)
1586 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1587 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1589 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1590 s->current_picture_ptr)) < 0)
1593 if (s->pict_type != AV_PICTURE_TYPE_B) {
1594 s->last_picture_ptr = s->next_picture_ptr;
1596 s->next_picture_ptr = s->current_picture_ptr;
1598 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1599 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1600 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1601 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1602 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1603 s->pict_type, s->droppable);
1605 if ((s->last_picture_ptr == NULL ||
1606 s->last_picture_ptr->f.buf[0] == NULL) &&
1607 (s->pict_type != AV_PICTURE_TYPE_I ||
1608 s->picture_structure != PICT_FRAME)) {
1609 int h_chroma_shift, v_chroma_shift;
1610 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1611 &h_chroma_shift, &v_chroma_shift);
1612 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.buf[0])
1613 av_log(avctx, AV_LOG_DEBUG,
1614 "allocating dummy last picture for B frame\n");
1615 else if (s->pict_type != AV_PICTURE_TYPE_I)
1616 av_log(avctx, AV_LOG_ERROR,
1617 "warning: first frame is no keyframe\n");
1618 else if (s->picture_structure != PICT_FRAME)
1619 av_log(avctx, AV_LOG_DEBUG,
1620 "allocate dummy last picture for field based first keyframe\n");
1622 /* Allocate a dummy frame */
1623 i = ff_find_unused_picture(s, 0);
1625 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1628 s->last_picture_ptr = &s->picture[i];
1629 s->last_picture_ptr->f.key_frame = 0;
1630 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1631 s->last_picture_ptr = NULL;
1635 memset(s->last_picture_ptr->f.data[0], 0x80,
1636 avctx->height * s->last_picture_ptr->f.linesize[0]);
1637 memset(s->last_picture_ptr->f.data[1], 0x80,
1638 (avctx->height >> v_chroma_shift) *
1639 s->last_picture_ptr->f.linesize[1]);
1640 memset(s->last_picture_ptr->f.data[2], 0x80,
1641 (avctx->height >> v_chroma_shift) *
1642 s->last_picture_ptr->f.linesize[2]);
1644 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1645 for(i=0; i<avctx->height; i++)
1646 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1649 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1650 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1652 if ((s->next_picture_ptr == NULL ||
1653 s->next_picture_ptr->f.buf[0] == NULL) &&
1654 s->pict_type == AV_PICTURE_TYPE_B) {
1655 /* Allocate a dummy frame */
1656 i = ff_find_unused_picture(s, 0);
1658 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1661 s->next_picture_ptr = &s->picture[i];
1662 s->next_picture_ptr->f.key_frame = 0;
1663 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1664 s->next_picture_ptr = NULL;
1667 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1668 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1671 #if 0 // BUFREF-FIXME
1672 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1673 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1675 if (s->last_picture_ptr) {
1676 ff_mpeg_unref_picture(s, &s->last_picture);
1677 if (s->last_picture_ptr->f.buf[0] &&
1678 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1679 s->last_picture_ptr)) < 0)
1682 if (s->next_picture_ptr) {
1683 ff_mpeg_unref_picture(s, &s->next_picture);
1684 if (s->next_picture_ptr->f.buf[0] &&
1685 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1686 s->next_picture_ptr)) < 0)
1690 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1691 s->last_picture_ptr->f.buf[0]));
1693 if (s->picture_structure!= PICT_FRAME) {
1695 for (i = 0; i < 4; i++) {
1696 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1697 s->current_picture.f.data[i] +=
1698 s->current_picture.f.linesize[i];
1700 s->current_picture.f.linesize[i] *= 2;
1701 s->last_picture.f.linesize[i] *= 2;
1702 s->next_picture.f.linesize[i] *= 2;
1706 s->err_recognition = avctx->err_recognition;
1708 /* set dequantizer, we can't do it during init as
1709 * it might change for mpeg4 and we can't do it in the header
1710 * decode as init is not called for mpeg4 there yet */
1711 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1712 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1713 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1714 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1715 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1716 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1718 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1719 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1722 if (s->dct_error_sum) {
1723 av_assert2(s->avctx->noise_reduction && s->encoding);
1724 update_noise_reduction(s);
1728 FF_DISABLE_DEPRECATION_WARNINGS
1729 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1730 return ff_xvmc_field_start(s, avctx);
1731 FF_ENABLE_DEPRECATION_WARNINGS
1732 #endif /* FF_API_XVMC */
1737 /* generic function for encode/decode called after a
1738 * frame has been coded/decoded. */
1739 void ff_MPV_frame_end(MpegEncContext *s)
1742 FF_DISABLE_DEPRECATION_WARNINGS
1743 /* redraw edges for the frame if decoding didn't complete */
1744 // just to make sure that all data is rendered.
1745 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1746 ff_xvmc_field_end(s);
1748 FF_ENABLE_DEPRECATION_WARNINGS
1749 #endif /* FF_API_XVMC */
1750 if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1751 !s->avctx->hwaccel &&
1752 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1753 s->unrestricted_mv &&
1754 s->current_picture.reference &&
1756 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1759 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1760 int hshift = desc->log2_chroma_w;
1761 int vshift = desc->log2_chroma_h;
1762 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1763 s->h_edge_pos, s->v_edge_pos,
1764 EDGE_WIDTH, EDGE_WIDTH,
1765 EDGE_TOP | EDGE_BOTTOM);
1766 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1767 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1768 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1769 EDGE_TOP | EDGE_BOTTOM);
1770 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1771 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1772 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1773 EDGE_TOP | EDGE_BOTTOM);
1778 s->last_pict_type = s->pict_type;
1779 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1780 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1781 s->last_non_b_pict_type = s->pict_type;
1784 /* copy back current_picture variables */
1785 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1786 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1787 s->picture[i] = s->current_picture;
1791 av_assert0(i < MAX_PICTURE_COUNT);
1794 // clear copies, to avoid confusion
1796 memset(&s->last_picture, 0, sizeof(Picture));
1797 memset(&s->next_picture, 0, sizeof(Picture));
1798 memset(&s->current_picture, 0, sizeof(Picture));
1800 s->avctx->coded_frame = &s->current_picture_ptr->f;
1802 if (s->current_picture.reference)
1803 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1807 * Draw a line from (ex, ey) -> (sx, sy).
1808 * @param w width of the image
1809 * @param h height of the image
1810 * @param stride stride/linesize of the image
1811 * @param color color of the arrow
1813 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1814 int w, int h, int stride, int color)
1818 sx = av_clip(sx, 0, w - 1);
1819 sy = av_clip(sy, 0, h - 1);
1820 ex = av_clip(ex, 0, w - 1);
1821 ey = av_clip(ey, 0, h - 1);
1823 buf[sy * stride + sx] += color;
1825 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1827 FFSWAP(int, sx, ex);
1828 FFSWAP(int, sy, ey);
1830 buf += sx + sy * stride;
1832 f = ((ey - sy) << 16) / ex;
1833 for (x = 0; x <= ex; x++) {
1835 fr = (x * f) & 0xFFFF;
1836 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1837 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1841 FFSWAP(int, sx, ex);
1842 FFSWAP(int, sy, ey);
1844 buf += sx + sy * stride;
1847 f = ((ex - sx) << 16) / ey;
1850 for(y= 0; y <= ey; y++){
1852 fr = (y*f) & 0xFFFF;
1853 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1854 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1860 * Draw an arrow from (ex, ey) -> (sx, sy).
1861 * @param w width of the image
1862 * @param h height of the image
1863 * @param stride stride/linesize of the image
1864 * @param color color of the arrow
1866 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1867 int ey, int w, int h, int stride, int color)
1871 sx = av_clip(sx, -100, w + 100);
1872 sy = av_clip(sy, -100, h + 100);
1873 ex = av_clip(ex, -100, w + 100);
1874 ey = av_clip(ey, -100, h + 100);
1879 if (dx * dx + dy * dy > 3 * 3) {
1882 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1884 // FIXME subpixel accuracy
1885 rx = ROUNDED_DIV(rx * 3 << 4, length);
1886 ry = ROUNDED_DIV(ry * 3 << 4, length);
1888 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1889 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1891 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1895 * Print debugging info for the given picture.
1897 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1899 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1901 if (avctx->hwaccel || !p || !p->mb_type
1902 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1906 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1909 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1910 av_get_picture_type_char(pict->pict_type));
1911 for (y = 0; y < mb_height; y++) {
1912 for (x = 0; x < mb_width; x++) {
1913 if (avctx->debug & FF_DEBUG_SKIP) {
1914 int count = mbskip_table[x + y * mb_stride];
1917 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1919 if (avctx->debug & FF_DEBUG_QP) {
1920 av_log(avctx, AV_LOG_DEBUG, "%2d",
1921 p->qscale_table[x + y * mb_stride]);
1923 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1924 int mb_type = p->mb_type[x + y * mb_stride];
1925 // Type & MV direction
1926 if (IS_PCM(mb_type))
1927 av_log(avctx, AV_LOG_DEBUG, "P");
1928 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1929 av_log(avctx, AV_LOG_DEBUG, "A");
1930 else if (IS_INTRA4x4(mb_type))
1931 av_log(avctx, AV_LOG_DEBUG, "i");
1932 else if (IS_INTRA16x16(mb_type))
1933 av_log(avctx, AV_LOG_DEBUG, "I");
1934 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1935 av_log(avctx, AV_LOG_DEBUG, "d");
1936 else if (IS_DIRECT(mb_type))
1937 av_log(avctx, AV_LOG_DEBUG, "D");
1938 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1939 av_log(avctx, AV_LOG_DEBUG, "g");
1940 else if (IS_GMC(mb_type))
1941 av_log(avctx, AV_LOG_DEBUG, "G");
1942 else if (IS_SKIP(mb_type))
1943 av_log(avctx, AV_LOG_DEBUG, "S");
1944 else if (!USES_LIST(mb_type, 1))
1945 av_log(avctx, AV_LOG_DEBUG, ">");
1946 else if (!USES_LIST(mb_type, 0))
1947 av_log(avctx, AV_LOG_DEBUG, "<");
1949 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1950 av_log(avctx, AV_LOG_DEBUG, "X");
1954 if (IS_8X8(mb_type))
1955 av_log(avctx, AV_LOG_DEBUG, "+");
1956 else if (IS_16X8(mb_type))
1957 av_log(avctx, AV_LOG_DEBUG, "-");
1958 else if (IS_8X16(mb_type))
1959 av_log(avctx, AV_LOG_DEBUG, "|");
1960 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1961 av_log(avctx, AV_LOG_DEBUG, " ");
1963 av_log(avctx, AV_LOG_DEBUG, "?");
1966 if (IS_INTERLACED(mb_type))
1967 av_log(avctx, AV_LOG_DEBUG, "=");
1969 av_log(avctx, AV_LOG_DEBUG, " ");
1972 av_log(avctx, AV_LOG_DEBUG, "\n");
1976 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1977 (avctx->debug_mv)) {
1978 const int shift = 1 + quarter_sample;
1982 int h_chroma_shift, v_chroma_shift, block_height;
1983 const int width = avctx->width;
1984 const int height = avctx->height;
1985 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1986 const int mv_stride = (mb_width << mv_sample_log2) +
1987 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1989 *low_delay = 0; // needed to see the vectors without trashing the buffers
1991 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1993 av_frame_make_writable(pict);
1995 pict->opaque = NULL;
1996 ptr = pict->data[0];
1997 block_height = 16 >> v_chroma_shift;
1999 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2001 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2002 const int mb_index = mb_x + mb_y * mb_stride;
2003 if ((avctx->debug_mv) && p->motion_val[0]) {
2005 for (type = 0; type < 3; type++) {
2009 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2010 (pict->pict_type!= AV_PICTURE_TYPE_P))
2015 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2016 (pict->pict_type!= AV_PICTURE_TYPE_B))
2021 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2022 (pict->pict_type!= AV_PICTURE_TYPE_B))
2027 if (!USES_LIST(p->mb_type[mb_index], direction))
2030 if (IS_8X8(p->mb_type[mb_index])) {
2032 for (i = 0; i < 4; i++) {
2033 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2034 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2035 int xy = (mb_x * 2 + (i & 1) +
2036 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2037 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2038 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2039 draw_arrow(ptr, sx, sy, mx, my, width,
2040 height, pict->linesize[0], 100);
2042 } else if (IS_16X8(p->mb_type[mb_index])) {
2044 for (i = 0; i < 2; i++) {
2045 int sx = mb_x * 16 + 8;
2046 int sy = mb_y * 16 + 4 + 8 * i;
2047 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2048 int mx = (p->motion_val[direction][xy][0] >> shift);
2049 int my = (p->motion_val[direction][xy][1] >> shift);
2051 if (IS_INTERLACED(p->mb_type[mb_index]))
2054 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2055 height, pict->linesize[0], 100);
2057 } else if (IS_8X16(p->mb_type[mb_index])) {
2059 for (i = 0; i < 2; i++) {
2060 int sx = mb_x * 16 + 4 + 8 * i;
2061 int sy = mb_y * 16 + 8;
2062 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2063 int mx = p->motion_val[direction][xy][0] >> shift;
2064 int my = p->motion_val[direction][xy][1] >> shift;
2066 if (IS_INTERLACED(p->mb_type[mb_index]))
2069 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2070 height, pict->linesize[0], 100);
2073 int sx= mb_x * 16 + 8;
2074 int sy= mb_y * 16 + 8;
2075 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2076 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2077 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2078 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2082 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2083 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2084 0x0101010101010101ULL;
2086 for (y = 0; y < block_height; y++) {
2087 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2088 (block_height * mb_y + y) *
2089 pict->linesize[1]) = c;
2090 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2091 (block_height * mb_y + y) *
2092 pict->linesize[2]) = c;
2095 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2097 int mb_type = p->mb_type[mb_index];
2100 #define COLOR(theta, r) \
2101 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2102 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2106 if (IS_PCM(mb_type)) {
2108 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2109 IS_INTRA16x16(mb_type)) {
2111 } else if (IS_INTRA4x4(mb_type)) {
2113 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2115 } else if (IS_DIRECT(mb_type)) {
2117 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2119 } else if (IS_GMC(mb_type)) {
2121 } else if (IS_SKIP(mb_type)) {
2123 } else if (!USES_LIST(mb_type, 1)) {
2125 } else if (!USES_LIST(mb_type, 0)) {
2128 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2132 u *= 0x0101010101010101ULL;
2133 v *= 0x0101010101010101ULL;
2134 for (y = 0; y < block_height; y++) {
2135 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2136 (block_height * mb_y + y) * pict->linesize[1]) = u;
2137 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2138 (block_height * mb_y + y) * pict->linesize[2]) = v;
2142 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2143 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2144 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2145 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2146 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2148 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2149 for (y = 0; y < 16; y++)
2150 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2151 pict->linesize[0]] ^= 0x80;
2153 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2154 int dm = 1 << (mv_sample_log2 - 2);
2155 for (i = 0; i < 4; i++) {
2156 int sx = mb_x * 16 + 8 * (i & 1);
2157 int sy = mb_y * 16 + 8 * (i >> 1);
2158 int xy = (mb_x * 2 + (i & 1) +
2159 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2161 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2162 if (mv[0] != mv[dm] ||
2163 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2164 for (y = 0; y < 8; y++)
2165 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2166 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2167 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2168 pict->linesize[0]) ^= 0x8080808080808080ULL;
2172 if (IS_INTERLACED(mb_type) &&
2173 avctx->codec->id == AV_CODEC_ID_H264) {
2177 mbskip_table[mb_index] = 0;
2183 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2185 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2186 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2189 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2191 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2192 int offset = 2*s->mb_stride + 1;
2194 return AVERROR(ENOMEM);
2195 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2196 ref->size -= offset;
2197 ref->data += offset;
2198 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2201 static inline int hpel_motion_lowres(MpegEncContext *s,
2202 uint8_t *dest, uint8_t *src,
2203 int field_based, int field_select,
2204 int src_x, int src_y,
2205 int width, int height, ptrdiff_t stride,
2206 int h_edge_pos, int v_edge_pos,
2207 int w, int h, h264_chroma_mc_func *pix_op,
2208 int motion_x, int motion_y)
2210 const int lowres = s->avctx->lowres;
2211 const int op_index = FFMIN(lowres, 3);
2212 const int s_mask = (2 << lowres) - 1;
2216 if (s->quarter_sample) {
2221 sx = motion_x & s_mask;
2222 sy = motion_y & s_mask;
2223 src_x += motion_x >> lowres + 1;
2224 src_y += motion_y >> lowres + 1;
2226 src += src_y * stride + src_x;
2228 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2229 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2230 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
2231 s->linesize, s->linesize,
2232 w + 1, (h + 1) << field_based,
2233 src_x, src_y << field_based,
2234 h_edge_pos, v_edge_pos);
2235 src = s->edge_emu_buffer;
2239 sx = (sx << 2) >> lowres;
2240 sy = (sy << 2) >> lowres;
2243 pix_op[op_index](dest, src, stride, h, sx, sy);
2247 /* apply one mpeg motion vector to the three components */
2248 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2255 uint8_t **ref_picture,
2256 h264_chroma_mc_func *pix_op,
2257 int motion_x, int motion_y,
2260 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2261 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2262 ptrdiff_t uvlinesize, linesize;
2263 const int lowres = s->avctx->lowres;
2264 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2265 const int block_s = 8>>lowres;
2266 const int s_mask = (2 << lowres) - 1;
2267 const int h_edge_pos = s->h_edge_pos >> lowres;
2268 const int v_edge_pos = s->v_edge_pos >> lowres;
2269 linesize = s->current_picture.f.linesize[0] << field_based;
2270 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2272 // FIXME obviously not perfect but qpel will not work in lowres anyway
2273 if (s->quarter_sample) {
2279 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2282 sx = motion_x & s_mask;
2283 sy = motion_y & s_mask;
2284 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2285 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2287 if (s->out_format == FMT_H263) {
2288 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2289 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2290 uvsrc_x = src_x >> 1;
2291 uvsrc_y = src_y >> 1;
2292 } else if (s->out_format == FMT_H261) {
2293 // even chroma mv's are full pel in H261
2296 uvsx = (2 * mx) & s_mask;
2297 uvsy = (2 * my) & s_mask;
2298 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2299 uvsrc_y = mb_y * block_s + (my >> lowres);
2301 if(s->chroma_y_shift){
2306 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2307 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2309 if(s->chroma_x_shift){
2313 uvsy = motion_y & s_mask;
2315 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2318 uvsx = motion_x & s_mask;
2319 uvsy = motion_y & s_mask;
2326 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2327 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2328 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2330 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2331 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2332 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2333 linesize >> field_based, linesize >> field_based,
2334 17, 17 + field_based,
2335 src_x, src_y << field_based, h_edge_pos,
2337 ptr_y = s->edge_emu_buffer;
2338 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2339 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2340 s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
2341 uvlinesize >> field_based, uvlinesize >> field_based,
2343 uvsrc_x, uvsrc_y << field_based,
2344 h_edge_pos >> 1, v_edge_pos >> 1);
2345 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
2346 uvlinesize >> field_based,uvlinesize >> field_based,
2348 uvsrc_x, uvsrc_y << field_based,
2349 h_edge_pos >> 1, v_edge_pos >> 1);
2351 ptr_cr = uvbuf + 16;
2355 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2357 dest_y += s->linesize;
2358 dest_cb += s->uvlinesize;
2359 dest_cr += s->uvlinesize;
2363 ptr_y += s->linesize;
2364 ptr_cb += s->uvlinesize;
2365 ptr_cr += s->uvlinesize;
2368 sx = (sx << 2) >> lowres;
2369 sy = (sy << 2) >> lowres;
2370 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2372 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2373 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2374 uvsx = (uvsx << 2) >> lowres;
2375 uvsy = (uvsy << 2) >> lowres;
2377 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2378 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2381 // FIXME h261 lowres loop filter
2384 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2385 uint8_t *dest_cb, uint8_t *dest_cr,
2386 uint8_t **ref_picture,
2387 h264_chroma_mc_func * pix_op,
2390 const int lowres = s->avctx->lowres;
2391 const int op_index = FFMIN(lowres, 3);
2392 const int block_s = 8 >> lowres;
2393 const int s_mask = (2 << lowres) - 1;
2394 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2395 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2396 int emu = 0, src_x, src_y, sx, sy;
2400 if (s->quarter_sample) {
2405 /* In case of 8X8, we construct a single chroma motion vector
2406 with a special rounding */
2407 mx = ff_h263_round_chroma(mx);
2408 my = ff_h263_round_chroma(my);
2412 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2413 src_y = s->mb_y * block_s + (my >> lowres + 1);
2415 offset = src_y * s->uvlinesize + src_x;
2416 ptr = ref_picture[1] + offset;
2417 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2418 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2419 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2420 s->uvlinesize, s->uvlinesize,
2422 src_x, src_y, h_edge_pos, v_edge_pos);
2423 ptr = s->edge_emu_buffer;
2426 sx = (sx << 2) >> lowres;
2427 sy = (sy << 2) >> lowres;
2428 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2430 ptr = ref_picture[2] + offset;
2432 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr,
2433 s->uvlinesize, s->uvlinesize,
2435 src_x, src_y, h_edge_pos, v_edge_pos);
2436 ptr = s->edge_emu_buffer;
2438 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2442 * motion compensation of a single macroblock
2444 * @param dest_y luma destination pointer
2445 * @param dest_cb chroma cb/u destination pointer
2446 * @param dest_cr chroma cr/v destination pointer
2447 * @param dir direction (0->forward, 1->backward)
2448 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2449 * @param pix_op halfpel motion compensation function (average or put normally)
2450 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2452 static inline void MPV_motion_lowres(MpegEncContext *s,
2453 uint8_t *dest_y, uint8_t *dest_cb,
2455 int dir, uint8_t **ref_picture,
2456 h264_chroma_mc_func *pix_op)
2460 const int lowres = s->avctx->lowres;
2461 const int block_s = 8 >>lowres;
2466 switch (s->mv_type) {
2468 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2470 ref_picture, pix_op,
2471 s->mv[dir][0][0], s->mv[dir][0][1],
2477 for (i = 0; i < 4; i++) {
2478 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2479 s->linesize) * block_s,
2480 ref_picture[0], 0, 0,
2481 (2 * mb_x + (i & 1)) * block_s,
2482 (2 * mb_y + (i >> 1)) * block_s,
2483 s->width, s->height, s->linesize,
2484 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2485 block_s, block_s, pix_op,
2486 s->mv[dir][i][0], s->mv[dir][i][1]);
2488 mx += s->mv[dir][i][0];
2489 my += s->mv[dir][i][1];
2492 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2493 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2497 if (s->picture_structure == PICT_FRAME) {
2499 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2500 1, 0, s->field_select[dir][0],
2501 ref_picture, pix_op,
2502 s->mv[dir][0][0], s->mv[dir][0][1],
2505 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2506 1, 1, s->field_select[dir][1],
2507 ref_picture, pix_op,
2508 s->mv[dir][1][0], s->mv[dir][1][1],
2511 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2512 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2513 ref_picture = s->current_picture_ptr->f.data;
2516 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2517 0, 0, s->field_select[dir][0],
2518 ref_picture, pix_op,
2520 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2524 for (i = 0; i < 2; i++) {
2525 uint8_t **ref2picture;
2527 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2528 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2529 ref2picture = ref_picture;
2531 ref2picture = s->current_picture_ptr->f.data;
2534 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2535 0, 0, s->field_select[dir][i],
2536 ref2picture, pix_op,
2537 s->mv[dir][i][0], s->mv[dir][i][1] +
2538 2 * block_s * i, block_s, mb_y >> 1);
2540 dest_y += 2 * block_s * s->linesize;
2541 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2542 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2546 if (s->picture_structure == PICT_FRAME) {
2547 for (i = 0; i < 2; i++) {
2549 for (j = 0; j < 2; j++) {
2550 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2552 ref_picture, pix_op,
2553 s->mv[dir][2 * i + j][0],
2554 s->mv[dir][2 * i + j][1],
2557 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2560 for (i = 0; i < 2; i++) {
2561 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2562 0, 0, s->picture_structure != i + 1,
2563 ref_picture, pix_op,
2564 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2565 2 * block_s, mb_y >> 1);
2567 // after put we make avg of the same block
2568 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2570 // opposite parity is always in the same
2571 // frame if this is second field
2572 if (!s->first_field) {
2573 ref_picture = s->current_picture_ptr->f.data;
2584 * find the lowest MB row referenced in the MVs
2586 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2588 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2589 int my, off, i, mvs;
2591 if (s->picture_structure != PICT_FRAME || s->mcsel)
2594 switch (s->mv_type) {
2608 for (i = 0; i < mvs; i++) {
2609 my = s->mv[dir][i][1]<<qpel_shift;
2610 my_max = FFMAX(my_max, my);
2611 my_min = FFMIN(my_min, my);
2614 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2616 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2618 return s->mb_height-1;
2621 /* put block[] to dest[] */
2622 static inline void put_dct(MpegEncContext *s,
2623 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2625 s->dct_unquantize_intra(s, block, i, qscale);
2626 s->dsp.idct_put (dest, line_size, block);
2629 /* add block[] to dest[] */
2630 static inline void add_dct(MpegEncContext *s,
2631 int16_t *block, int i, uint8_t *dest, int line_size)
2633 if (s->block_last_index[i] >= 0) {
2634 s->dsp.idct_add (dest, line_size, block);
2638 static inline void add_dequant_dct(MpegEncContext *s,
2639 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2641 if (s->block_last_index[i] >= 0) {
2642 s->dct_unquantize_inter(s, block, i, qscale);
2644 s->dsp.idct_add (dest, line_size, block);
2649 * Clean dc, ac, coded_block for the current non-intra MB.
2651 void ff_clean_intra_table_entries(MpegEncContext *s)
2653 int wrap = s->b8_stride;
2654 int xy = s->block_index[0];
2657 s->dc_val[0][xy + 1 ] =
2658 s->dc_val[0][xy + wrap] =
2659 s->dc_val[0][xy + 1 + wrap] = 1024;
2661 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2662 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2663 if (s->msmpeg4_version>=3) {
2664 s->coded_block[xy ] =
2665 s->coded_block[xy + 1 ] =
2666 s->coded_block[xy + wrap] =
2667 s->coded_block[xy + 1 + wrap] = 0;
2670 wrap = s->mb_stride;
2671 xy = s->mb_x + s->mb_y * wrap;
2673 s->dc_val[2][xy] = 1024;
2675 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2676 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2678 s->mbintra_table[xy]= 0;
2681 /* generic function called after a macroblock has been parsed by the
2682 decoder or after it has been encoded by the encoder.
2684 Important variables used:
2685 s->mb_intra : true if intra macroblock
2686 s->mv_dir : motion vector direction
2687 s->mv_type : motion vector type
2688 s->mv : motion vector
2689 s->interlaced_dct : true if interlaced dct used (mpeg2)
2691 static av_always_inline
2692 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2693 int lowres_flag, int is_mpeg12)
2695 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2698 FF_DISABLE_DEPRECATION_WARNINGS
2699 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2700 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2703 FF_ENABLE_DEPRECATION_WARNINGS
2704 #endif /* FF_API_XVMC */
2706 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2707 /* print DCT coefficients */
2709 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2711 for(j=0; j<64; j++){
2712 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2714 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2718 s->current_picture.qscale_table[mb_xy] = s->qscale;
2720 /* update DC predictors for P macroblocks */
2722 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2723 if(s->mbintra_table[mb_xy])
2724 ff_clean_intra_table_entries(s);
2728 s->last_dc[2] = 128 << s->intra_dc_precision;
2731 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2732 s->mbintra_table[mb_xy]=1;
2734 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2735 uint8_t *dest_y, *dest_cb, *dest_cr;
2736 int dct_linesize, dct_offset;
2737 op_pixels_func (*op_pix)[4];
2738 qpel_mc_func (*op_qpix)[16];
2739 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2740 const int uvlinesize = s->current_picture.f.linesize[1];
2741 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2742 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2744 /* avoid copy if macroblock skipped in last frame too */
2745 /* skip only during decoding as we might trash the buffers during encoding a bit */
2747 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2749 if (s->mb_skipped) {
2751 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2753 } else if(!s->current_picture.reference) {
2756 *mbskip_ptr = 0; /* not skipped */
2760 dct_linesize = linesize << s->interlaced_dct;
2761 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2765 dest_cb= s->dest[1];
2766 dest_cr= s->dest[2];
2768 dest_y = s->b_scratchpad;
2769 dest_cb= s->b_scratchpad+16*linesize;
2770 dest_cr= s->b_scratchpad+32*linesize;
2774 /* motion handling */
2775 /* decoding or more than one mb_type (MC was already done otherwise) */
2778 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2779 if (s->mv_dir & MV_DIR_FORWARD) {
2780 ff_thread_await_progress(&s->last_picture_ptr->tf,
2781 ff_MPV_lowest_referenced_row(s, 0),
2784 if (s->mv_dir & MV_DIR_BACKWARD) {
2785 ff_thread_await_progress(&s->next_picture_ptr->tf,
2786 ff_MPV_lowest_referenced_row(s, 1),
2792 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2794 if (s->mv_dir & MV_DIR_FORWARD) {
2795 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2796 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2798 if (s->mv_dir & MV_DIR_BACKWARD) {
2799 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2802 op_qpix = s->me.qpel_put;
2803 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2804 op_pix = s->hdsp.put_pixels_tab;
2806 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2808 if (s->mv_dir & MV_DIR_FORWARD) {
2809 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2810 op_pix = s->hdsp.avg_pixels_tab;
2811 op_qpix= s->me.qpel_avg;
2813 if (s->mv_dir & MV_DIR_BACKWARD) {
2814 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2819 /* skip dequant / idct if we are really late ;) */
2820 if(s->avctx->skip_idct){
2821 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2822 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2823 || s->avctx->skip_idct >= AVDISCARD_ALL)
2827 /* add dct residue */
2828 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2829 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2830 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2831 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2832 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2833 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2835 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2836 if (s->chroma_y_shift){
2837 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2838 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2842 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2843 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2844 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2845 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2848 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2849 add_dct(s, block[0], 0, dest_y , dct_linesize);
2850 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2851 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2852 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2854 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2855 if(s->chroma_y_shift){//Chroma420
2856 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2857 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2860 dct_linesize = uvlinesize << s->interlaced_dct;
2861 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2863 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2864 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2865 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2866 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2867 if(!s->chroma_x_shift){//Chroma444
2868 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2869 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2870 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2871 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2876 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2877 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2880 /* dct only in intra block */
2881 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2882 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2883 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2884 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2885 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2887 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2888 if(s->chroma_y_shift){
2889 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2890 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2894 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2895 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2896 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2897 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2901 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2902 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2903 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2904 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2906 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2907 if(s->chroma_y_shift){
2908 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2909 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2912 dct_linesize = uvlinesize << s->interlaced_dct;
2913 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2915 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2916 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2917 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2918 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2919 if(!s->chroma_x_shift){//Chroma444
2920 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2921 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2922 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2923 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2931 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2932 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2933 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2938 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2940 if(s->out_format == FMT_MPEG1) {
2941 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2942 else MPV_decode_mb_internal(s, block, 0, 1);
2945 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2946 else MPV_decode_mb_internal(s, block, 0, 0);
2950 * @param h is the normal height, this will be reduced automatically if needed for the last row
2952 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2953 Picture *last, int y, int h, int picture_structure,
2954 int first_field, int draw_edges, int low_delay,
2955 int v_edge_pos, int h_edge_pos)
2957 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2958 int hshift = desc->log2_chroma_w;
2959 int vshift = desc->log2_chroma_h;
2960 const int field_pic = picture_structure != PICT_FRAME;
2966 if (!avctx->hwaccel &&
2967 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2970 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2971 int *linesize = cur->f.linesize;
2972 int sides = 0, edge_h;
2973 if (y==0) sides |= EDGE_TOP;
2974 if (y + h >= v_edge_pos)
2975 sides |= EDGE_BOTTOM;
2977 edge_h= FFMIN(h, v_edge_pos - y);
2979 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2980 linesize[0], h_edge_pos, edge_h,
2981 EDGE_WIDTH, EDGE_WIDTH, sides);
2982 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2983 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2984 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2985 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2986 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2987 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2990 h = FFMIN(h, avctx->height - y);
2992 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2994 if (avctx->draw_horiz_band) {
2996 int offset[AV_NUM_DATA_POINTERS];
2999 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
3000 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
3007 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
3008 picture_structure == PICT_FRAME &&
3009 avctx->codec_id != AV_CODEC_ID_SVQ3) {
3010 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
3013 offset[0]= y * src->linesize[0];
3015 offset[2]= (y >> vshift) * src->linesize[1];
3016 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
3022 avctx->draw_horiz_band(avctx, src, offset,
3023 y, picture_structure, h);
3027 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3029 int draw_edges = s->unrestricted_mv && !s->intra_only;
3030 ff_draw_horiz_band(s->avctx, &s->dsp, s->current_picture_ptr,
3031 s->last_picture_ptr, y, h, s->picture_structure,
3032 s->first_field, draw_edges, s->low_delay,
3033 s->v_edge_pos, s->h_edge_pos);
3036 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3037 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3038 const int uvlinesize = s->current_picture.f.linesize[1];
3039 const int mb_size= 4 - s->avctx->lowres;
3041 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3042 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3043 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3044 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3045 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3046 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3047 //block_index is not used by mpeg2, so it is not affected by chroma_format
3049 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3050 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3051 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3053 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3055 if(s->picture_structure==PICT_FRAME){
3056 s->dest[0] += s->mb_y * linesize << mb_size;
3057 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3058 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3060 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3061 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3062 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3063 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3069 * Permute an 8x8 block.
3070 * @param block the block which will be permuted according to the given permutation vector
3071 * @param permutation the permutation vector
3072 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3073 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3074 * (inverse) permutated to scantable order!
3076 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3082 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3084 for(i=0; i<=last; i++){
3085 const int j= scantable[i];
3090 for(i=0; i<=last; i++){
3091 const int j= scantable[i];
3092 const int perm_j= permutation[j];
3093 block[perm_j]= temp[j];
3097 void ff_mpeg_flush(AVCodecContext *avctx){
3099 MpegEncContext *s = avctx->priv_data;
3101 if(s==NULL || s->picture==NULL)
3104 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3105 ff_mpeg_unref_picture(s, &s->picture[i]);
3106 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3108 ff_mpeg_unref_picture(s, &s->current_picture);
3109 ff_mpeg_unref_picture(s, &s->last_picture);
3110 ff_mpeg_unref_picture(s, &s->next_picture);
3112 s->mb_x= s->mb_y= 0;
3115 s->parse_context.state= -1;
3116 s->parse_context.frame_start_found= 0;
3117 s->parse_context.overread= 0;
3118 s->parse_context.overread_index= 0;
3119 s->parse_context.index= 0;
3120 s->parse_context.last_index= 0;
3121 s->bitstream_buffer_size=0;
3125 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3126 int16_t *block, int n, int qscale)
3128 int i, level, nCoeffs;
3129 const uint16_t *quant_matrix;
3131 nCoeffs= s->block_last_index[n];
3133 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3134 /* XXX: only mpeg1 */
3135 quant_matrix = s->intra_matrix;
3136 for(i=1;i<=nCoeffs;i++) {
3137 int j= s->intra_scantable.permutated[i];
3142 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3143 level = (level - 1) | 1;
3146 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3147 level = (level - 1) | 1;
3154 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3155 int16_t *block, int n, int qscale)
3157 int i, level, nCoeffs;
3158 const uint16_t *quant_matrix;
3160 nCoeffs= s->block_last_index[n];
3162 quant_matrix = s->inter_matrix;
3163 for(i=0; i<=nCoeffs; i++) {
3164 int j= s->intra_scantable.permutated[i];
3169 level = (((level << 1) + 1) * qscale *
3170 ((int) (quant_matrix[j]))) >> 4;
3171 level = (level - 1) | 1;
3174 level = (((level << 1) + 1) * qscale *
3175 ((int) (quant_matrix[j]))) >> 4;
3176 level = (level - 1) | 1;
3183 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3184 int16_t *block, int n, int qscale)
3186 int i, level, nCoeffs;
3187 const uint16_t *quant_matrix;
3189 if(s->alternate_scan) nCoeffs= 63;
3190 else nCoeffs= s->block_last_index[n];
3192 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3193 quant_matrix = s->intra_matrix;
3194 for(i=1;i<=nCoeffs;i++) {
3195 int j= s->intra_scantable.permutated[i];
3200 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3203 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3210 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3211 int16_t *block, int n, int qscale)
3213 int i, level, nCoeffs;
3214 const uint16_t *quant_matrix;
3217 if(s->alternate_scan) nCoeffs= 63;
3218 else nCoeffs= s->block_last_index[n];
3220 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3222 quant_matrix = s->intra_matrix;
3223 for(i=1;i<=nCoeffs;i++) {
3224 int j= s->intra_scantable.permutated[i];
3229 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3232 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3241 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3242 int16_t *block, int n, int qscale)
3244 int i, level, nCoeffs;
3245 const uint16_t *quant_matrix;
3248 if(s->alternate_scan) nCoeffs= 63;
3249 else nCoeffs= s->block_last_index[n];
3251 quant_matrix = s->inter_matrix;
3252 for(i=0; i<=nCoeffs; i++) {
3253 int j= s->intra_scantable.permutated[i];
3258 level = (((level << 1) + 1) * qscale *
3259 ((int) (quant_matrix[j]))) >> 4;
3262 level = (((level << 1) + 1) * qscale *
3263 ((int) (quant_matrix[j]))) >> 4;
3272 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3273 int16_t *block, int n, int qscale)
3275 int i, level, qmul, qadd;
3278 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
3283 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3284 qadd = (qscale - 1) | 1;
3291 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3293 for(i=1; i<=nCoeffs; i++) {
3297 level = level * qmul - qadd;
3299 level = level * qmul + qadd;
3306 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3307 int16_t *block, int n, int qscale)
3309 int i, level, qmul, qadd;
3312 av_assert2(s->block_last_index[n]>=0);
3314 qadd = (qscale - 1) | 1;
3317 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3319 for(i=0; i<=nCoeffs; i++) {
3323 level = level * qmul - qadd;
3325 level = level * qmul + qadd;
3333 * set qscale and update qscale dependent variables.
3335 void ff_set_qscale(MpegEncContext * s, int qscale)
3339 else if (qscale > 31)
3343 s->chroma_qscale= s->chroma_qscale_table[qscale];
3345 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3346 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3349 void ff_MPV_report_decode_progress(MpegEncContext *s)
3351 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3352 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3355 #if CONFIG_ERROR_RESILIENCE
3356 void ff_mpeg_er_frame_start(MpegEncContext *s)
3358 ERContext *er = &s->er;
3360 er->cur_pic = s->current_picture_ptr;
3361 er->last_pic = s->last_picture_ptr;
3362 er->next_pic = s->next_picture_ptr;
3364 er->pp_time = s->pp_time;
3365 er->pb_time = s->pb_time;
3366 er->quarter_sample = s->quarter_sample;
3367 er->partitioned_frame = s->partitioned_frame;
3369 ff_er_frame_start(er);
3371 #endif /* CONFIG_ERROR_RESILIENCE */