2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
36 #include "h264chroma.h"
39 #include "mpegvideo.h"
42 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 int16_t *block, int n, int qscale);
61 static const uint8_t ff_default_chroma_qscale_table[32] = {
62 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
63 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
64 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
67 const uint8_t ff_mpeg1_dc_scale_table[128] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 static const uint8_t mpeg2_dc_scale_table1[128] = {
80 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 static const uint8_t mpeg2_dc_scale_table2[128] = {
92 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 static const uint8_t mpeg2_dc_scale_table3[128] = {
104 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
116 ff_mpeg1_dc_scale_table,
117 mpeg2_dc_scale_table1,
118 mpeg2_dc_scale_table2,
119 mpeg2_dc_scale_table3,
122 const enum AVPixelFormat ff_pixfmt_list_420[] = {
127 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
129 int mb_x, int mb_y, int mb_intra, int mb_skipped)
131 MpegEncContext *s = opaque;
134 s->mv_type = mv_type;
135 s->mb_intra = mb_intra;
136 s->mb_skipped = mb_skipped;
139 memcpy(s->mv, mv, sizeof(*mv));
141 ff_init_block_index(s);
142 ff_update_block_index(s);
144 s->dsp.clear_blocks(s->block[0]);
146 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
147 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
148 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
151 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
152 ff_MPV_decode_mb(s, s->block);
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 ff_dsputil_init(&s->dsp, s->avctx);
159 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
160 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
161 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
163 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
164 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
165 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
166 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
167 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
168 if (s->flags & CODEC_FLAG_BITEXACT)
169 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
170 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
173 ff_MPV_common_init_axp(s);
175 ff_MPV_common_init_arm(s);
177 ff_MPV_common_init_bfin(s);
179 ff_MPV_common_init_ppc(s);
181 ff_MPV_common_init_x86(s);
183 /* load & permutate scantables
184 * note: only wmv uses different ones
186 if (s->alternate_scan) {
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
201 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
203 // edge emu needs blocksize + filter length - 1
204 // (= 17x17 for halfpel / 21x21 for h264)
205 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
206 // at uvlinesize. It supports only YUV420 so 24x24 is enough
207 // linesize * interlaced * MBsize
208 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
211 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
213 s->me.temp = s->me.scratchpad;
214 s->rd_scratchpad = s->me.scratchpad;
215 s->b_scratchpad = s->me.scratchpad;
216 s->obmc_scratchpad = s->me.scratchpad + 16;
220 av_freep(&s->edge_emu_buffer);
221 return AVERROR(ENOMEM);
225 * Allocate a frame buffer
227 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
232 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
233 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
234 s->codec_id != AV_CODEC_ID_MSS2)
235 r = ff_thread_get_buffer(s->avctx, &pic->tf,
236 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
238 pic->f.width = s->avctx->width;
239 pic->f.height = s->avctx->height;
240 pic->f.format = s->avctx->pix_fmt;
241 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
244 if (r < 0 || !pic->f.data[0]) {
245 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
250 if (s->avctx->hwaccel) {
251 assert(!pic->hwaccel_picture_private);
252 if (s->avctx->hwaccel->priv_data_size) {
253 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
254 if (!pic->hwaccel_priv_buf) {
255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
258 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
262 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
263 s->uvlinesize != pic->f.linesize[1])) {
264 av_log(s->avctx, AV_LOG_ERROR,
265 "get_buffer() failed (stride changed)\n");
266 ff_mpeg_unref_picture(s, pic);
270 if (pic->f.linesize[1] != pic->f.linesize[2]) {
271 av_log(s->avctx, AV_LOG_ERROR,
272 "get_buffer() failed (uv stride mismatch)\n");
273 ff_mpeg_unref_picture(s, pic);
277 if (!s->edge_emu_buffer &&
278 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
279 av_log(s->avctx, AV_LOG_ERROR,
280 "get_buffer() failed to allocate context scratch buffers.\n");
281 ff_mpeg_unref_picture(s, pic);
288 static void free_picture_tables(Picture *pic)
292 pic->alloc_mb_width =
293 pic->alloc_mb_height = 0;
295 av_buffer_unref(&pic->mb_var_buf);
296 av_buffer_unref(&pic->mc_mb_var_buf);
297 av_buffer_unref(&pic->mb_mean_buf);
298 av_buffer_unref(&pic->mbskip_table_buf);
299 av_buffer_unref(&pic->qscale_table_buf);
300 av_buffer_unref(&pic->mb_type_buf);
302 for (i = 0; i < 2; i++) {
303 av_buffer_unref(&pic->motion_val_buf[i]);
304 av_buffer_unref(&pic->ref_index_buf[i]);
308 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
310 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
311 const int mb_array_size = s->mb_stride * s->mb_height;
312 const int b8_array_size = s->b8_stride * s->mb_height * 2;
316 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
317 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
318 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
320 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
321 return AVERROR(ENOMEM);
324 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
326 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
327 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
328 return AVERROR(ENOMEM);
331 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
332 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
333 int ref_index_size = 4 * mb_array_size;
335 for (i = 0; mv_size && i < 2; i++) {
336 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
338 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339 return AVERROR(ENOMEM);
343 pic->alloc_mb_width = s->mb_width;
344 pic->alloc_mb_height = s->mb_height;
349 static int make_tables_writable(Picture *pic)
352 #define MAKE_WRITABLE(table) \
355 (ret = av_buffer_make_writable(&pic->table)) < 0)\
359 MAKE_WRITABLE(mb_var_buf);
360 MAKE_WRITABLE(mc_mb_var_buf);
361 MAKE_WRITABLE(mb_mean_buf);
362 MAKE_WRITABLE(mbskip_table_buf);
363 MAKE_WRITABLE(qscale_table_buf);
364 MAKE_WRITABLE(mb_type_buf);
366 for (i = 0; i < 2; i++) {
367 MAKE_WRITABLE(motion_val_buf[i]);
368 MAKE_WRITABLE(ref_index_buf[i]);
375 * Allocate a Picture.
376 * The pixels are allocated/set by calling get_buffer() if shared = 0
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
382 if (pic->qscale_table_buf)
383 if ( pic->alloc_mb_width != s->mb_width
384 || pic->alloc_mb_height != s->mb_height)
385 free_picture_tables(pic);
388 av_assert0(pic->f.data[0]);
391 av_assert0(!pic->f.data[0]);
393 if (alloc_frame_buffer(s, pic) < 0)
396 s->linesize = pic->f.linesize[0];
397 s->uvlinesize = pic->f.linesize[1];
400 if (!pic->qscale_table_buf)
401 ret = alloc_picture_tables(s, pic);
403 ret = make_tables_writable(pic);
408 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
409 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410 pic->mb_mean = pic->mb_mean_buf->data;
413 pic->mbskip_table = pic->mbskip_table_buf->data;
414 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
417 if (pic->motion_val_buf[0]) {
418 for (i = 0; i < 2; i++) {
419 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420 pic->ref_index[i] = pic->ref_index_buf[i]->data;
426 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427 ff_mpeg_unref_picture(s, pic);
428 free_picture_tables(pic);
429 return AVERROR(ENOMEM);
433 * Deallocate a picture.
435 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
437 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
440 /* WM Image / Screen codecs allocate internal buffers with different
441 * dimensions / colorspaces; ignore user-defined callbacks for these. */
442 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
443 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
444 s->codec_id != AV_CODEC_ID_MSS2)
445 ff_thread_release_buffer(s->avctx, &pic->tf);
447 av_frame_unref(&pic->f);
449 av_buffer_unref(&pic->hwaccel_priv_buf);
451 if (pic->needs_realloc)
452 free_picture_tables(pic);
454 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
457 static int update_picture_tables(Picture *dst, Picture *src)
461 #define UPDATE_TABLE(table)\
464 (!dst->table || dst->table->buffer != src->table->buffer)) {\
465 av_buffer_unref(&dst->table);\
466 dst->table = av_buffer_ref(src->table);\
468 free_picture_tables(dst);\
469 return AVERROR(ENOMEM);\
474 UPDATE_TABLE(mb_var_buf);
475 UPDATE_TABLE(mc_mb_var_buf);
476 UPDATE_TABLE(mb_mean_buf);
477 UPDATE_TABLE(mbskip_table_buf);
478 UPDATE_TABLE(qscale_table_buf);
479 UPDATE_TABLE(mb_type_buf);
480 for (i = 0; i < 2; i++) {
481 UPDATE_TABLE(motion_val_buf[i]);
482 UPDATE_TABLE(ref_index_buf[i]);
485 dst->mb_var = src->mb_var;
486 dst->mc_mb_var = src->mc_mb_var;
487 dst->mb_mean = src->mb_mean;
488 dst->mbskip_table = src->mbskip_table;
489 dst->qscale_table = src->qscale_table;
490 dst->mb_type = src->mb_type;
491 for (i = 0; i < 2; i++) {
492 dst->motion_val[i] = src->motion_val[i];
493 dst->ref_index[i] = src->ref_index[i];
496 dst->alloc_mb_width = src->alloc_mb_width;
497 dst->alloc_mb_height = src->alloc_mb_height;
502 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
506 av_assert0(!dst->f.buf[0]);
507 av_assert0(src->f.buf[0]);
511 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
515 ret = update_picture_tables(dst, src);
519 if (src->hwaccel_picture_private) {
520 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
521 if (!dst->hwaccel_priv_buf)
523 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
526 dst->field_picture = src->field_picture;
527 dst->mb_var_sum = src->mb_var_sum;
528 dst->mc_mb_var_sum = src->mc_mb_var_sum;
529 dst->b_frame_score = src->b_frame_score;
530 dst->needs_realloc = src->needs_realloc;
531 dst->reference = src->reference;
532 dst->shared = src->shared;
536 ff_mpeg_unref_picture(s, dst);
540 static void exchange_uv(MpegEncContext *s)
545 s->pblocks[4] = s->pblocks[5];
549 static int init_duplicate_context(MpegEncContext *s)
551 int y_size = s->b8_stride * (2 * s->mb_height + 1);
552 int c_size = s->mb_stride * (s->mb_height + 1);
553 int yc_size = y_size + 2 * c_size;
561 s->obmc_scratchpad = NULL;
564 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
565 ME_MAP_SIZE * sizeof(uint32_t), fail)
566 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
567 ME_MAP_SIZE * sizeof(uint32_t), fail)
568 if (s->avctx->noise_reduction) {
569 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
570 2 * 64 * sizeof(int), fail)
573 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
574 s->block = s->blocks[0];
576 for (i = 0; i < 12; i++) {
577 s->pblocks[i] = &s->block[i];
579 if (s->avctx->codec_tag == AV_RL32("VCR2"))
582 if (s->out_format == FMT_H263) {
584 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
585 yc_size * sizeof(int16_t) * 16, fail);
586 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
587 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
588 s->ac_val[2] = s->ac_val[1] + c_size;
593 return -1; // free() through ff_MPV_common_end()
596 static void free_duplicate_context(MpegEncContext *s)
601 av_freep(&s->edge_emu_buffer);
602 av_freep(&s->me.scratchpad);
606 s->obmc_scratchpad = NULL;
608 av_freep(&s->dct_error_sum);
609 av_freep(&s->me.map);
610 av_freep(&s->me.score_map);
611 av_freep(&s->blocks);
612 av_freep(&s->ac_val_base);
616 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
618 #define COPY(a) bak->a = src->a
619 COPY(edge_emu_buffer);
624 COPY(obmc_scratchpad);
631 COPY(me.map_generation);
643 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
647 // FIXME copy only needed parts
649 backup_duplicate_context(&bak, dst);
650 memcpy(dst, src, sizeof(MpegEncContext));
651 backup_duplicate_context(dst, &bak);
652 for (i = 0; i < 12; i++) {
653 dst->pblocks[i] = &dst->block[i];
655 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
657 if (!dst->edge_emu_buffer &&
658 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
659 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
660 "scratch buffers.\n");
663 // STOP_TIMER("update_duplicate_context")
664 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
668 int ff_mpeg_update_thread_context(AVCodecContext *dst,
669 const AVCodecContext *src)
672 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
679 // FIXME can parameters change on I-frames?
680 // in that case dst may need a reinit
681 if (!s->context_initialized) {
682 memcpy(s, s1, sizeof(MpegEncContext));
685 s->bitstream_buffer = NULL;
686 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
688 if (s1->context_initialized){
689 // s->picture_range_start += MAX_PICTURE_COUNT;
690 // s->picture_range_end += MAX_PICTURE_COUNT;
691 if((ret = ff_MPV_common_init(s)) < 0){
692 memset(s, 0, sizeof(MpegEncContext));
699 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
700 s->context_reinit = 0;
701 s->height = s1->height;
702 s->width = s1->width;
703 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
707 s->avctx->coded_height = s1->avctx->coded_height;
708 s->avctx->coded_width = s1->avctx->coded_width;
709 s->avctx->width = s1->avctx->width;
710 s->avctx->height = s1->avctx->height;
712 s->coded_picture_number = s1->coded_picture_number;
713 s->picture_number = s1->picture_number;
714 s->input_picture_number = s1->input_picture_number;
716 av_assert0(!s->picture || s->picture != s1->picture);
718 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
719 ff_mpeg_unref_picture(s, &s->picture[i]);
720 if (s1->picture[i].f.data[0] &&
721 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
725 #define UPDATE_PICTURE(pic)\
727 ff_mpeg_unref_picture(s, &s->pic);\
728 if (s1->pic.f.data[0])\
729 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
731 ret = update_picture_tables(&s->pic, &s1->pic);\
736 UPDATE_PICTURE(current_picture);
737 UPDATE_PICTURE(last_picture);
738 UPDATE_PICTURE(next_picture);
740 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
741 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
742 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
744 // Error/bug resilience
745 s->next_p_frame_damaged = s1->next_p_frame_damaged;
746 s->workaround_bugs = s1->workaround_bugs;
747 s->padding_bug_score = s1->padding_bug_score;
750 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
751 (char *) &s1->shape - (char *) &s1->time_increment_bits);
754 s->max_b_frames = s1->max_b_frames;
755 s->low_delay = s1->low_delay;
756 s->droppable = s1->droppable;
758 // DivX handling (doesn't work)
759 s->divx_packed = s1->divx_packed;
761 if (s1->bitstream_buffer) {
762 if (s1->bitstream_buffer_size +
763 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
764 av_fast_malloc(&s->bitstream_buffer,
765 &s->allocated_bitstream_buffer_size,
766 s1->allocated_bitstream_buffer_size);
767 s->bitstream_buffer_size = s1->bitstream_buffer_size;
768 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
769 s1->bitstream_buffer_size);
770 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
771 FF_INPUT_BUFFER_PADDING_SIZE);
774 // linesize dependend scratch buffer allocation
775 if (!s->edge_emu_buffer)
777 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
778 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
779 "scratch buffers.\n");
780 return AVERROR(ENOMEM);
783 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
784 "be allocated due to unknown size.\n");
787 // MPEG2/interlacing info
788 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
789 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
791 if (!s1->first_field) {
792 s->last_pict_type = s1->pict_type;
793 if (s1->current_picture_ptr)
794 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
796 if (s1->pict_type != AV_PICTURE_TYPE_B) {
797 s->last_non_b_pict_type = s1->pict_type;
805 * Set the given MpegEncContext to common defaults
806 * (same for encoding and decoding).
807 * The changed fields will not depend upon the
808 * prior state of the MpegEncContext.
810 void ff_MPV_common_defaults(MpegEncContext *s)
812 s->y_dc_scale_table =
813 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
814 s->chroma_qscale_table = ff_default_chroma_qscale_table;
815 s->progressive_frame = 1;
816 s->progressive_sequence = 1;
817 s->picture_structure = PICT_FRAME;
819 s->coded_picture_number = 0;
820 s->picture_number = 0;
821 s->input_picture_number = 0;
823 s->picture_in_gop_number = 0;
828 s->slice_context_count = 1;
832 * Set the given MpegEncContext to defaults for decoding.
833 * the changed fields will not depend upon
834 * the prior state of the MpegEncContext.
836 void ff_MPV_decode_defaults(MpegEncContext *s)
838 ff_MPV_common_defaults(s);
841 static int init_er(MpegEncContext *s)
843 ERContext *er = &s->er;
844 int mb_array_size = s->mb_height * s->mb_stride;
847 er->avctx = s->avctx;
850 er->mb_index2xy = s->mb_index2xy;
851 er->mb_num = s->mb_num;
852 er->mb_width = s->mb_width;
853 er->mb_height = s->mb_height;
854 er->mb_stride = s->mb_stride;
855 er->b8_stride = s->b8_stride;
857 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
858 er->error_status_table = av_mallocz(mb_array_size);
859 if (!er->er_temp_buffer || !er->error_status_table)
862 er->mbskip_table = s->mbskip_table;
863 er->mbintra_table = s->mbintra_table;
865 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
866 er->dc_val[i] = s->dc_val[i];
868 er->decode_mb = mpeg_er_decode_mb;
873 av_freep(&er->er_temp_buffer);
874 av_freep(&er->error_status_table);
875 return AVERROR(ENOMEM);
879 * Initialize and allocates MpegEncContext fields dependent on the resolution.
881 static int init_context_frame(MpegEncContext *s)
883 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
885 s->mb_width = (s->width + 15) / 16;
886 s->mb_stride = s->mb_width + 1;
887 s->b8_stride = s->mb_width * 2 + 1;
888 s->b4_stride = s->mb_width * 4 + 1;
889 mb_array_size = s->mb_height * s->mb_stride;
890 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
892 /* set default edge pos, will be overriden
893 * in decode_header if needed */
894 s->h_edge_pos = s->mb_width * 16;
895 s->v_edge_pos = s->mb_height * 16;
897 s->mb_num = s->mb_width * s->mb_height;
902 s->block_wrap[3] = s->b8_stride;
904 s->block_wrap[5] = s->mb_stride;
906 y_size = s->b8_stride * (2 * s->mb_height + 1);
907 c_size = s->mb_stride * (s->mb_height + 1);
908 yc_size = y_size + 2 * c_size;
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
911 for (y = 0; y < s->mb_height; y++)
912 for (x = 0; x < s->mb_width; x++)
913 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
915 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
918 /* Allocate MV tables */
919 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
920 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
921 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
922 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
923 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
924 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
925 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
926 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
927 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
928 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
929 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
930 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
932 /* Allocate MB type table */
933 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
935 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
937 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
938 mb_array_size * sizeof(float), fail);
939 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
940 mb_array_size * sizeof(float), fail);
944 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
945 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
946 /* interlaced direct mode decoding tables */
947 for (i = 0; i < 2; i++) {
949 for (j = 0; j < 2; j++) {
950 for (k = 0; k < 2; k++) {
951 FF_ALLOCZ_OR_GOTO(s->avctx,
952 s->b_field_mv_table_base[i][j][k],
953 mv_table_size * 2 * sizeof(int16_t),
955 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
958 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
959 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
960 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
962 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
965 if (s->out_format == FMT_H263) {
967 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
968 s->coded_block = s->coded_block_base + s->b8_stride + 1;
970 /* cbp, ac_pred, pred_dir */
971 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
972 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
975 if (s->h263_pred || s->h263_plus || !s->encoding) {
977 // MN: we need these for error resilience of intra-frames
978 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
979 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
980 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
981 s->dc_val[2] = s->dc_val[1] + c_size;
982 for (i = 0; i < yc_size; i++)
983 s->dc_val_base[i] = 1024;
986 /* which mb is a intra block */
987 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
988 memset(s->mbintra_table, 1, mb_array_size);
990 /* init macroblock skip table */
991 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
992 // Note the + 1 is for a quicker mpeg4 slice_end detection
996 return AVERROR(ENOMEM);
1000 * init common structure for both encoder and decoder.
1001 * this assumes that some variables like width/height are already set
1003 av_cold int ff_MPV_common_init(MpegEncContext *s)
1006 int nb_slices = (HAVE_THREADS &&
1007 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
1008 s->avctx->thread_count : 1;
1010 if (s->encoding && s->avctx->slices)
1011 nb_slices = s->avctx->slices;
1013 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1014 s->mb_height = (s->height + 31) / 32 * 2;
1016 s->mb_height = (s->height + 15) / 16;
1018 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1019 av_log(s->avctx, AV_LOG_ERROR,
1020 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1024 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1027 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1029 max_slices = MAX_THREADS;
1030 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1031 " reducing to %d\n", nb_slices, max_slices);
1032 nb_slices = max_slices;
1035 if ((s->width || s->height) &&
1036 av_image_check_size(s->width, s->height, 0, s->avctx))
1039 ff_dct_common_init(s);
1041 s->flags = s->avctx->flags;
1042 s->flags2 = s->avctx->flags2;
1044 /* set chroma shifts */
1045 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1047 &s->chroma_y_shift);
1049 /* convert fourcc to upper case */
1050 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1052 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1054 s->avctx->coded_frame = &s->current_picture.f;
1057 if (s->msmpeg4_version) {
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1059 2 * 2 * (MAX_LEVEL + 1) *
1060 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1062 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1064 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1065 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1066 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1067 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1068 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1069 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1070 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1071 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1073 if (s->avctx->noise_reduction) {
1074 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1078 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1079 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1080 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1081 avcodec_get_frame_defaults(&s->picture[i].f);
1083 memset(&s->next_picture, 0, sizeof(s->next_picture));
1084 memset(&s->last_picture, 0, sizeof(s->last_picture));
1085 memset(&s->current_picture, 0, sizeof(s->current_picture));
1086 avcodec_get_frame_defaults(&s->next_picture.f);
1087 avcodec_get_frame_defaults(&s->last_picture.f);
1088 avcodec_get_frame_defaults(&s->current_picture.f);
1090 if (init_context_frame(s))
1093 s->parse_context.state = -1;
1095 s->context_initialized = 1;
1096 s->thread_context[0] = s;
1098 // if (s->width && s->height) {
1099 if (nb_slices > 1) {
1100 for (i = 1; i < nb_slices; i++) {
1101 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1102 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1105 for (i = 0; i < nb_slices; i++) {
1106 if (init_duplicate_context(s->thread_context[i]) < 0)
1108 s->thread_context[i]->start_mb_y =
1109 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1110 s->thread_context[i]->end_mb_y =
1111 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1114 if (init_duplicate_context(s) < 0)
1117 s->end_mb_y = s->mb_height;
1119 s->slice_context_count = nb_slices;
1124 ff_MPV_common_end(s);
1129 * Frees and resets MpegEncContext fields depending on the resolution.
1130 * Is used during resolution changes to avoid a full reinitialization of the
1133 static int free_context_frame(MpegEncContext *s)
1137 av_freep(&s->mb_type);
1138 av_freep(&s->p_mv_table_base);
1139 av_freep(&s->b_forw_mv_table_base);
1140 av_freep(&s->b_back_mv_table_base);
1141 av_freep(&s->b_bidir_forw_mv_table_base);
1142 av_freep(&s->b_bidir_back_mv_table_base);
1143 av_freep(&s->b_direct_mv_table_base);
1144 s->p_mv_table = NULL;
1145 s->b_forw_mv_table = NULL;
1146 s->b_back_mv_table = NULL;
1147 s->b_bidir_forw_mv_table = NULL;
1148 s->b_bidir_back_mv_table = NULL;
1149 s->b_direct_mv_table = NULL;
1150 for (i = 0; i < 2; i++) {
1151 for (j = 0; j < 2; j++) {
1152 for (k = 0; k < 2; k++) {
1153 av_freep(&s->b_field_mv_table_base[i][j][k]);
1154 s->b_field_mv_table[i][j][k] = NULL;
1156 av_freep(&s->b_field_select_table[i][j]);
1157 av_freep(&s->p_field_mv_table_base[i][j]);
1158 s->p_field_mv_table[i][j] = NULL;
1160 av_freep(&s->p_field_select_table[i]);
1163 av_freep(&s->dc_val_base);
1164 av_freep(&s->coded_block_base);
1165 av_freep(&s->mbintra_table);
1166 av_freep(&s->cbp_table);
1167 av_freep(&s->pred_dir_table);
1169 av_freep(&s->mbskip_table);
1171 av_freep(&s->er.error_status_table);
1172 av_freep(&s->er.er_temp_buffer);
1173 av_freep(&s->mb_index2xy);
1174 av_freep(&s->lambda_table);
1176 av_freep(&s->cplx_tab);
1177 av_freep(&s->bits_tab);
1179 s->linesize = s->uvlinesize = 0;
1184 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1188 if (s->slice_context_count > 1) {
1189 for (i = 0; i < s->slice_context_count; i++) {
1190 free_duplicate_context(s->thread_context[i]);
1192 for (i = 1; i < s->slice_context_count; i++) {
1193 av_freep(&s->thread_context[i]);
1196 free_duplicate_context(s);
1198 if ((err = free_context_frame(s)) < 0)
1202 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1203 s->picture[i].needs_realloc = 1;
1206 s->last_picture_ptr =
1207 s->next_picture_ptr =
1208 s->current_picture_ptr = NULL;
1211 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1212 s->mb_height = (s->height + 31) / 32 * 2;
1214 s->mb_height = (s->height + 15) / 16;
1216 if ((s->width || s->height) &&
1217 av_image_check_size(s->width, s->height, 0, s->avctx))
1218 return AVERROR_INVALIDDATA;
1220 if ((err = init_context_frame(s)))
1223 s->thread_context[0] = s;
1225 if (s->width && s->height) {
1226 int nb_slices = s->slice_context_count;
1227 if (nb_slices > 1) {
1228 for (i = 1; i < nb_slices; i++) {
1229 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1230 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1233 for (i = 0; i < nb_slices; i++) {
1234 if (init_duplicate_context(s->thread_context[i]) < 0)
1236 s->thread_context[i]->start_mb_y =
1237 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1238 s->thread_context[i]->end_mb_y =
1239 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1242 err = init_duplicate_context(s);
1246 s->end_mb_y = s->mb_height;
1248 s->slice_context_count = nb_slices;
1253 ff_MPV_common_end(s);
1257 /* init common structure for both encoder and decoder */
1258 void ff_MPV_common_end(MpegEncContext *s)
1262 if (s->slice_context_count > 1) {
1263 for (i = 0; i < s->slice_context_count; i++) {
1264 free_duplicate_context(s->thread_context[i]);
1266 for (i = 1; i < s->slice_context_count; i++) {
1267 av_freep(&s->thread_context[i]);
1269 s->slice_context_count = 1;
1270 } else free_duplicate_context(s);
1272 av_freep(&s->parse_context.buffer);
1273 s->parse_context.buffer_size = 0;
1275 av_freep(&s->bitstream_buffer);
1276 s->allocated_bitstream_buffer_size = 0;
1278 av_freep(&s->avctx->stats_out);
1279 av_freep(&s->ac_stats);
1281 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1282 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1283 s->q_chroma_intra_matrix= NULL;
1284 s->q_chroma_intra_matrix16= NULL;
1285 av_freep(&s->q_intra_matrix);
1286 av_freep(&s->q_inter_matrix);
1287 av_freep(&s->q_intra_matrix16);
1288 av_freep(&s->q_inter_matrix16);
1289 av_freep(&s->input_picture);
1290 av_freep(&s->reordered_input_picture);
1291 av_freep(&s->dct_offset);
1294 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1295 free_picture_tables(&s->picture[i]);
1296 ff_mpeg_unref_picture(s, &s->picture[i]);
1299 av_freep(&s->picture);
1300 free_picture_tables(&s->last_picture);
1301 ff_mpeg_unref_picture(s, &s->last_picture);
1302 free_picture_tables(&s->current_picture);
1303 ff_mpeg_unref_picture(s, &s->current_picture);
1304 free_picture_tables(&s->next_picture);
1305 ff_mpeg_unref_picture(s, &s->next_picture);
1306 free_picture_tables(&s->new_picture);
1307 ff_mpeg_unref_picture(s, &s->new_picture);
1309 free_context_frame(s);
1311 s->context_initialized = 0;
1312 s->last_picture_ptr =
1313 s->next_picture_ptr =
1314 s->current_picture_ptr = NULL;
1315 s->linesize = s->uvlinesize = 0;
1318 av_cold void ff_init_rl(RLTable *rl,
1319 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1321 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1322 uint8_t index_run[MAX_RUN + 1];
1323 int last, run, level, start, end, i;
1325 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1326 if (static_store && rl->max_level[0])
1329 /* compute max_level[], max_run[] and index_run[] */
1330 for (last = 0; last < 2; last++) {
1339 memset(max_level, 0, MAX_RUN + 1);
1340 memset(max_run, 0, MAX_LEVEL + 1);
1341 memset(index_run, rl->n, MAX_RUN + 1);
1342 for (i = start; i < end; i++) {
1343 run = rl->table_run[i];
1344 level = rl->table_level[i];
1345 if (index_run[run] == rl->n)
1347 if (level > max_level[run])
1348 max_level[run] = level;
1349 if (run > max_run[level])
1350 max_run[level] = run;
1353 rl->max_level[last] = static_store[last];
1355 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1356 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1358 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1360 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1361 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1363 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1365 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1366 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1370 av_cold void ff_init_vlc_rl(RLTable *rl)
1374 for (q = 0; q < 32; q++) {
1376 int qadd = (q - 1) | 1;
1382 for (i = 0; i < rl->vlc.table_size; i++) {
1383 int code = rl->vlc.table[i][0];
1384 int len = rl->vlc.table[i][1];
1387 if (len == 0) { // illegal code
1390 } else if (len < 0) { // more bits needed
1394 if (code == rl->n) { // esc
1398 run = rl->table_run[code] + 1;
1399 level = rl->table_level[code] * qmul + qadd;
1400 if (code >= rl->last) run += 192;
1403 rl->rl_vlc[q][i].len = len;
1404 rl->rl_vlc[q][i].level = level;
1405 rl->rl_vlc[q][i].run = run;
1410 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1414 /* release non reference frames */
1415 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1416 if (!s->picture[i].reference &&
1417 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1418 ff_mpeg_unref_picture(s, &s->picture[i]);
1423 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1425 if (pic == s->last_picture_ptr)
1427 if (pic->f.data[0] == NULL)
1429 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1434 static int find_unused_picture(MpegEncContext *s, int shared)
1439 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1440 if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1444 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1445 if (pic_is_unused(s, &s->picture[i]))
1450 av_log(s->avctx, AV_LOG_FATAL,
1451 "Internal error, picture buffer overflow\n");
1452 /* We could return -1, but the codec would crash trying to draw into a
1453 * non-existing frame anyway. This is safer than waiting for a random crash.
1454 * Also the return of this is never useful, an encoder must only allocate
1455 * as much as allowed in the specification. This has no relationship to how
1456 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1457 * enough for such valid streams).
1458 * Plus, a decoder has to check stream validity and remove frames if too
1459 * many reference frames are around. Waiting for "OOM" is not correct at
1460 * all. Similarly, missing reference frames have to be replaced by
1461 * interpolated/MC frames, anything else is a bug in the codec ...
1467 int ff_find_unused_picture(MpegEncContext *s, int shared)
1469 int ret = find_unused_picture(s, shared);
1471 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1472 if (s->picture[ret].needs_realloc) {
1473 s->picture[ret].needs_realloc = 0;
1474 free_picture_tables(&s->picture[ret]);
1475 ff_mpeg_unref_picture(s, &s->picture[ret]);
1476 avcodec_get_frame_defaults(&s->picture[ret].f);
1482 static void update_noise_reduction(MpegEncContext *s)
1486 for (intra = 0; intra < 2; intra++) {
1487 if (s->dct_count[intra] > (1 << 16)) {
1488 for (i = 0; i < 64; i++) {
1489 s->dct_error_sum[intra][i] >>= 1;
1491 s->dct_count[intra] >>= 1;
1494 for (i = 0; i < 64; i++) {
1495 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1496 s->dct_count[intra] +
1497 s->dct_error_sum[intra][i] / 2) /
1498 (s->dct_error_sum[intra][i] + 1);
1504 * generic function for encode/decode called after coding/decoding
1505 * the header and before a frame is coded/decoded.
1507 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1513 if (!ff_thread_can_start_frame(avctx)) {
1514 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1518 /* mark & release old frames */
1519 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1520 s->last_picture_ptr != s->next_picture_ptr &&
1521 s->last_picture_ptr->f.data[0]) {
1522 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1525 /* release forgotten pictures */
1526 /* if (mpeg124/h263) */
1528 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1529 if (&s->picture[i] != s->last_picture_ptr &&
1530 &s->picture[i] != s->next_picture_ptr &&
1531 s->picture[i].reference && !s->picture[i].needs_realloc) {
1532 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1533 av_log(avctx, AV_LOG_ERROR,
1534 "releasing zombie picture\n");
1535 ff_mpeg_unref_picture(s, &s->picture[i]);
1540 ff_mpeg_unref_picture(s, &s->current_picture);
1543 ff_release_unused_pictures(s, 1);
1545 if (s->current_picture_ptr &&
1546 s->current_picture_ptr->f.data[0] == NULL) {
1547 // we already have a unused image
1548 // (maybe it was set before reading the header)
1549 pic = s->current_picture_ptr;
1551 i = ff_find_unused_picture(s, 0);
1553 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1556 pic = &s->picture[i];
1560 if (!s->droppable) {
1561 if (s->pict_type != AV_PICTURE_TYPE_B)
1565 pic->f.coded_picture_number = s->coded_picture_number++;
1567 if (ff_alloc_picture(s, pic, 0) < 0)
1570 s->current_picture_ptr = pic;
1571 // FIXME use only the vars from current_pic
1572 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1573 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1574 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1575 if (s->picture_structure != PICT_FRAME)
1576 s->current_picture_ptr->f.top_field_first =
1577 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1579 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1580 !s->progressive_sequence;
1581 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1584 s->current_picture_ptr->f.pict_type = s->pict_type;
1585 // if (s->flags && CODEC_FLAG_QSCALE)
1586 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1587 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1589 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1590 s->current_picture_ptr)) < 0)
1593 if (s->pict_type != AV_PICTURE_TYPE_B) {
1594 s->last_picture_ptr = s->next_picture_ptr;
1596 s->next_picture_ptr = s->current_picture_ptr;
1598 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1599 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1600 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1601 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1602 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1603 s->pict_type, s->droppable);
1605 if ((s->last_picture_ptr == NULL ||
1606 s->last_picture_ptr->f.data[0] == NULL) &&
1607 (s->pict_type != AV_PICTURE_TYPE_I ||
1608 s->picture_structure != PICT_FRAME)) {
1609 int h_chroma_shift, v_chroma_shift;
1610 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1611 &h_chroma_shift, &v_chroma_shift);
1612 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.data[0])
1613 av_log(avctx, AV_LOG_DEBUG,
1614 "allocating dummy last picture for B frame\n");
1615 else if (s->pict_type != AV_PICTURE_TYPE_I)
1616 av_log(avctx, AV_LOG_ERROR,
1617 "warning: first frame is no keyframe\n");
1618 else if (s->picture_structure != PICT_FRAME)
1619 av_log(avctx, AV_LOG_DEBUG,
1620 "allocate dummy last picture for field based first keyframe\n");
1622 /* Allocate a dummy frame */
1623 i = ff_find_unused_picture(s, 0);
1625 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1628 s->last_picture_ptr = &s->picture[i];
1629 s->last_picture_ptr->f.key_frame = 0;
1630 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1631 s->last_picture_ptr = NULL;
1635 memset(s->last_picture_ptr->f.data[0], 0x80,
1636 avctx->height * s->last_picture_ptr->f.linesize[0]);
1637 memset(s->last_picture_ptr->f.data[1], 0x80,
1638 (avctx->height >> v_chroma_shift) *
1639 s->last_picture_ptr->f.linesize[1]);
1640 memset(s->last_picture_ptr->f.data[2], 0x80,
1641 (avctx->height >> v_chroma_shift) *
1642 s->last_picture_ptr->f.linesize[2]);
1644 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1645 for(i=0; i<avctx->height; i++)
1646 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1649 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1650 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1652 if ((s->next_picture_ptr == NULL ||
1653 s->next_picture_ptr->f.data[0] == NULL) &&
1654 s->pict_type == AV_PICTURE_TYPE_B) {
1655 /* Allocate a dummy frame */
1656 i = ff_find_unused_picture(s, 0);
1658 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1661 s->next_picture_ptr = &s->picture[i];
1662 s->next_picture_ptr->f.key_frame = 0;
1663 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1664 s->next_picture_ptr = NULL;
1667 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1668 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1671 #if 0 // BUFREF-FIXME
1672 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1673 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1675 if (s->last_picture_ptr) {
1676 ff_mpeg_unref_picture(s, &s->last_picture);
1677 if (s->last_picture_ptr->f.data[0] &&
1678 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1679 s->last_picture_ptr)) < 0)
1682 if (s->next_picture_ptr) {
1683 ff_mpeg_unref_picture(s, &s->next_picture);
1684 if (s->next_picture_ptr->f.data[0] &&
1685 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1686 s->next_picture_ptr)) < 0)
1690 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1691 s->last_picture_ptr->f.data[0]));
1693 if (s->picture_structure!= PICT_FRAME) {
1695 for (i = 0; i < 4; i++) {
1696 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1697 s->current_picture.f.data[i] +=
1698 s->current_picture.f.linesize[i];
1700 s->current_picture.f.linesize[i] *= 2;
1701 s->last_picture.f.linesize[i] *= 2;
1702 s->next_picture.f.linesize[i] *= 2;
1706 s->err_recognition = avctx->err_recognition;
1708 /* set dequantizer, we can't do it during init as
1709 * it might change for mpeg4 and we can't do it in the header
1710 * decode as init is not called for mpeg4 there yet */
1711 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1712 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1713 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1714 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1715 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1716 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1718 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1719 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1722 if (s->dct_error_sum) {
1723 av_assert2(s->avctx->noise_reduction && s->encoding);
1724 update_noise_reduction(s);
1728 FF_DISABLE_DEPRECATION_WARNINGS
1729 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1730 return ff_xvmc_field_start(s, avctx);
1731 FF_ENABLE_DEPRECATION_WARNINGS
1732 #endif /* FF_API_XVMC */
1737 /* generic function for encode/decode called after a
1738 * frame has been coded/decoded. */
1739 void ff_MPV_frame_end(MpegEncContext *s)
1742 FF_DISABLE_DEPRECATION_WARNINGS
1743 /* redraw edges for the frame if decoding didn't complete */
1744 // just to make sure that all data is rendered.
1745 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1746 ff_xvmc_field_end(s);
1748 FF_ENABLE_DEPRECATION_WARNINGS
1749 #endif /* FF_API_XVMC */
1750 if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1751 !s->avctx->hwaccel &&
1752 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1753 s->unrestricted_mv &&
1754 s->current_picture.reference &&
1756 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1759 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1760 int hshift = desc->log2_chroma_w;
1761 int vshift = desc->log2_chroma_h;
1762 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1763 s->h_edge_pos, s->v_edge_pos,
1764 EDGE_WIDTH, EDGE_WIDTH,
1765 EDGE_TOP | EDGE_BOTTOM);
1766 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1767 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1768 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1769 EDGE_TOP | EDGE_BOTTOM);
1770 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1771 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1772 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1773 EDGE_TOP | EDGE_BOTTOM);
1778 s->last_pict_type = s->pict_type;
1779 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1780 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1781 s->last_non_b_pict_type = s->pict_type;
1784 /* copy back current_picture variables */
1785 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1786 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1787 s->picture[i] = s->current_picture;
1791 av_assert0(i < MAX_PICTURE_COUNT);
1794 // clear copies, to avoid confusion
1796 memset(&s->last_picture, 0, sizeof(Picture));
1797 memset(&s->next_picture, 0, sizeof(Picture));
1798 memset(&s->current_picture, 0, sizeof(Picture));
1800 s->avctx->coded_frame = &s->current_picture_ptr->f;
1802 if (s->current_picture.reference)
1803 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1807 * Draw a line from (ex, ey) -> (sx, sy).
1808 * @param w width of the image
1809 * @param h height of the image
1810 * @param stride stride/linesize of the image
1811 * @param color color of the arrow
1813 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1814 int w, int h, int stride, int color)
1818 sx = av_clip(sx, 0, w - 1);
1819 sy = av_clip(sy, 0, h - 1);
1820 ex = av_clip(ex, 0, w - 1);
1821 ey = av_clip(ey, 0, h - 1);
1823 buf[sy * stride + sx] += color;
1825 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1827 FFSWAP(int, sx, ex);
1828 FFSWAP(int, sy, ey);
1830 buf += sx + sy * stride;
1832 f = ((ey - sy) << 16) / ex;
1833 for (x = 0; x <= ex; x++) {
1835 fr = (x * f) & 0xFFFF;
1836 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1837 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1841 FFSWAP(int, sx, ex);
1842 FFSWAP(int, sy, ey);
1844 buf += sx + sy * stride;
1847 f = ((ex - sx) << 16) / ey;
1850 for(y= 0; y <= ey; y++){
1852 fr = (y*f) & 0xFFFF;
1853 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1854 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1860 * Draw an arrow from (ex, ey) -> (sx, sy).
1861 * @param w width of the image
1862 * @param h height of the image
1863 * @param stride stride/linesize of the image
1864 * @param color color of the arrow
1866 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1867 int ey, int w, int h, int stride, int color)
1871 sx = av_clip(sx, -100, w + 100);
1872 sy = av_clip(sy, -100, h + 100);
1873 ex = av_clip(ex, -100, w + 100);
1874 ey = av_clip(ey, -100, h + 100);
1879 if (dx * dx + dy * dy > 3 * 3) {
1882 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1884 // FIXME subpixel accuracy
1885 rx = ROUNDED_DIV(rx * 3 << 4, length);
1886 ry = ROUNDED_DIV(ry * 3 << 4, length);
1888 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1889 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1891 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1895 * Print debugging info for the given picture.
1897 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1899 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1901 if (avctx->hwaccel || !p || !p->mb_type
1902 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1906 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1909 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1910 av_get_picture_type_char(pict->pict_type));
1911 for (y = 0; y < mb_height; y++) {
1912 for (x = 0; x < mb_width; x++) {
1913 if (avctx->debug & FF_DEBUG_SKIP) {
1914 int count = mbskip_table[x + y * mb_stride];
1917 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1919 if (avctx->debug & FF_DEBUG_QP) {
1920 av_log(avctx, AV_LOG_DEBUG, "%2d",
1921 p->qscale_table[x + y * mb_stride]);
1923 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1924 int mb_type = p->mb_type[x + y * mb_stride];
1925 // Type & MV direction
1926 if (IS_PCM(mb_type))
1927 av_log(avctx, AV_LOG_DEBUG, "P");
1928 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1929 av_log(avctx, AV_LOG_DEBUG, "A");
1930 else if (IS_INTRA4x4(mb_type))
1931 av_log(avctx, AV_LOG_DEBUG, "i");
1932 else if (IS_INTRA16x16(mb_type))
1933 av_log(avctx, AV_LOG_DEBUG, "I");
1934 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1935 av_log(avctx, AV_LOG_DEBUG, "d");
1936 else if (IS_DIRECT(mb_type))
1937 av_log(avctx, AV_LOG_DEBUG, "D");
1938 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1939 av_log(avctx, AV_LOG_DEBUG, "g");
1940 else if (IS_GMC(mb_type))
1941 av_log(avctx, AV_LOG_DEBUG, "G");
1942 else if (IS_SKIP(mb_type))
1943 av_log(avctx, AV_LOG_DEBUG, "S");
1944 else if (!USES_LIST(mb_type, 1))
1945 av_log(avctx, AV_LOG_DEBUG, ">");
1946 else if (!USES_LIST(mb_type, 0))
1947 av_log(avctx, AV_LOG_DEBUG, "<");
1949 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1950 av_log(avctx, AV_LOG_DEBUG, "X");
1954 if (IS_8X8(mb_type))
1955 av_log(avctx, AV_LOG_DEBUG, "+");
1956 else if (IS_16X8(mb_type))
1957 av_log(avctx, AV_LOG_DEBUG, "-");
1958 else if (IS_8X16(mb_type))
1959 av_log(avctx, AV_LOG_DEBUG, "|");
1960 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1961 av_log(avctx, AV_LOG_DEBUG, " ");
1963 av_log(avctx, AV_LOG_DEBUG, "?");
1966 if (IS_INTERLACED(mb_type))
1967 av_log(avctx, AV_LOG_DEBUG, "=");
1969 av_log(avctx, AV_LOG_DEBUG, " ");
1972 av_log(avctx, AV_LOG_DEBUG, "\n");
1976 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1977 (avctx->debug_mv)) {
1978 const int shift = 1 + quarter_sample;
1982 int h_chroma_shift, v_chroma_shift, block_height;
1983 const int width = avctx->width;
1984 const int height = avctx->height;
1985 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1986 const int mv_stride = (mb_width << mv_sample_log2) +
1987 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1989 *low_delay = 0; // needed to see the vectors without trashing the buffers
1991 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1993 av_frame_make_writable(pict);
1995 pict->opaque = NULL;
1996 ptr = pict->data[0];
1997 block_height = 16 >> v_chroma_shift;
1999 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2001 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2002 const int mb_index = mb_x + mb_y * mb_stride;
2003 if ((avctx->debug_mv) && p->motion_val[0]) {
2005 for (type = 0; type < 3; type++) {
2009 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2010 (pict->pict_type!= AV_PICTURE_TYPE_P))
2015 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2016 (pict->pict_type!= AV_PICTURE_TYPE_B))
2021 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2022 (pict->pict_type!= AV_PICTURE_TYPE_B))
2027 if (!USES_LIST(p->mb_type[mb_index], direction))
2030 if (IS_8X8(p->mb_type[mb_index])) {
2032 for (i = 0; i < 4; i++) {
2033 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2034 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2035 int xy = (mb_x * 2 + (i & 1) +
2036 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2037 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2038 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2039 draw_arrow(ptr, sx, sy, mx, my, width,
2040 height, pict->linesize[0], 100);
2042 } else if (IS_16X8(p->mb_type[mb_index])) {
2044 for (i = 0; i < 2; i++) {
2045 int sx = mb_x * 16 + 8;
2046 int sy = mb_y * 16 + 4 + 8 * i;
2047 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2048 int mx = (p->motion_val[direction][xy][0] >> shift);
2049 int my = (p->motion_val[direction][xy][1] >> shift);
2051 if (IS_INTERLACED(p->mb_type[mb_index]))
2054 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2055 height, pict->linesize[0], 100);
2057 } else if (IS_8X16(p->mb_type[mb_index])) {
2059 for (i = 0; i < 2; i++) {
2060 int sx = mb_x * 16 + 4 + 8 * i;
2061 int sy = mb_y * 16 + 8;
2062 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2063 int mx = p->motion_val[direction][xy][0] >> shift;
2064 int my = p->motion_val[direction][xy][1] >> shift;
2066 if (IS_INTERLACED(p->mb_type[mb_index]))
2069 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2070 height, pict->linesize[0], 100);
2073 int sx= mb_x * 16 + 8;
2074 int sy= mb_y * 16 + 8;
2075 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2076 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2077 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2078 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2082 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2083 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2084 0x0101010101010101ULL;
2086 for (y = 0; y < block_height; y++) {
2087 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2088 (block_height * mb_y + y) *
2089 pict->linesize[1]) = c;
2090 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2091 (block_height * mb_y + y) *
2092 pict->linesize[2]) = c;
2095 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2097 int mb_type = p->mb_type[mb_index];
2100 #define COLOR(theta, r) \
2101 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2102 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2106 if (IS_PCM(mb_type)) {
2108 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2109 IS_INTRA16x16(mb_type)) {
2111 } else if (IS_INTRA4x4(mb_type)) {
2113 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2115 } else if (IS_DIRECT(mb_type)) {
2117 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2119 } else if (IS_GMC(mb_type)) {
2121 } else if (IS_SKIP(mb_type)) {
2123 } else if (!USES_LIST(mb_type, 1)) {
2125 } else if (!USES_LIST(mb_type, 0)) {
2128 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2132 u *= 0x0101010101010101ULL;
2133 v *= 0x0101010101010101ULL;
2134 for (y = 0; y < block_height; y++) {
2135 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2136 (block_height * mb_y + y) * pict->linesize[1]) = u;
2137 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2138 (block_height * mb_y + y) * pict->linesize[2]) = v;
2142 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2143 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2144 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2145 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2146 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2148 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2149 for (y = 0; y < 16; y++)
2150 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2151 pict->linesize[0]] ^= 0x80;
2153 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2154 int dm = 1 << (mv_sample_log2 - 2);
2155 for (i = 0; i < 4; i++) {
2156 int sx = mb_x * 16 + 8 * (i & 1);
2157 int sy = mb_y * 16 + 8 * (i >> 1);
2158 int xy = (mb_x * 2 + (i & 1) +
2159 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2161 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2162 if (mv[0] != mv[dm] ||
2163 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2164 for (y = 0; y < 8; y++)
2165 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2166 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2167 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2168 pict->linesize[0]) ^= 0x8080808080808080ULL;
2172 if (IS_INTERLACED(mb_type) &&
2173 avctx->codec->id == AV_CODEC_ID_H264) {
2177 mbskip_table[mb_index] = 0;
2183 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2185 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2186 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2189 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2191 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2192 int offset = 2*s->mb_stride + 1;
2194 return AVERROR(ENOMEM);
2195 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2196 ref->size -= offset;
2197 ref->data += offset;
2198 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2201 static inline int hpel_motion_lowres(MpegEncContext *s,
2202 uint8_t *dest, uint8_t *src,
2203 int field_based, int field_select,
2204 int src_x, int src_y,
2205 int width, int height, ptrdiff_t stride,
2206 int h_edge_pos, int v_edge_pos,
2207 int w, int h, h264_chroma_mc_func *pix_op,
2208 int motion_x, int motion_y)
2210 const int lowres = s->avctx->lowres;
2211 const int op_index = FFMIN(lowres, 3);
2212 const int s_mask = (2 << lowres) - 1;
2216 if (s->quarter_sample) {
2221 sx = motion_x & s_mask;
2222 sy = motion_y & s_mask;
2223 src_x += motion_x >> lowres + 1;
2224 src_y += motion_y >> lowres + 1;
2226 src += src_y * stride + src_x;
2228 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2229 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2230 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->linesize,
2231 src, s->linesize, w + 1,
2232 (h + 1) << field_based, src_x,
2233 src_y << field_based,
2236 src = s->edge_emu_buffer;
2240 sx = (sx << 2) >> lowres;
2241 sy = (sy << 2) >> lowres;
2244 pix_op[op_index](dest, src, stride, h, sx, sy);
2248 /* apply one mpeg motion vector to the three components */
2249 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2256 uint8_t **ref_picture,
2257 h264_chroma_mc_func *pix_op,
2258 int motion_x, int motion_y,
2261 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2262 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2263 ptrdiff_t uvlinesize, linesize;
2264 const int lowres = s->avctx->lowres;
2265 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2266 const int block_s = 8>>lowres;
2267 const int s_mask = (2 << lowres) - 1;
2268 const int h_edge_pos = s->h_edge_pos >> lowres;
2269 const int v_edge_pos = s->v_edge_pos >> lowres;
2270 linesize = s->current_picture.f.linesize[0] << field_based;
2271 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2273 // FIXME obviously not perfect but qpel will not work in lowres anyway
2274 if (s->quarter_sample) {
2280 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2283 sx = motion_x & s_mask;
2284 sy = motion_y & s_mask;
2285 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2286 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2288 if (s->out_format == FMT_H263) {
2289 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2290 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2291 uvsrc_x = src_x >> 1;
2292 uvsrc_y = src_y >> 1;
2293 } else if (s->out_format == FMT_H261) {
2294 // even chroma mv's are full pel in H261
2297 uvsx = (2 * mx) & s_mask;
2298 uvsy = (2 * my) & s_mask;
2299 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2300 uvsrc_y = mb_y * block_s + (my >> lowres);
2302 if(s->chroma_y_shift){
2307 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2308 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2310 if(s->chroma_x_shift){
2314 uvsy = motion_y & s_mask;
2316 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2319 uvsx = motion_x & s_mask;
2320 uvsy = motion_y & s_mask;
2327 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2328 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2329 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2331 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2332 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2333 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, linesize >> field_based, ptr_y,
2334 linesize >> field_based, 17, 17 + field_based,
2335 src_x, src_y << field_based, h_edge_pos,
2337 ptr_y = s->edge_emu_buffer;
2338 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2339 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2340 s->vdsp.emulated_edge_mc(uvbuf, uvlinesize >> field_based,
2341 ptr_cb, uvlinesize >> field_based, 9,
2343 uvsrc_x, uvsrc_y << field_based,
2344 h_edge_pos >> 1, v_edge_pos >> 1);
2345 s->vdsp.emulated_edge_mc(uvbuf + 16, uvlinesize >> field_based,
2346 ptr_cr, uvlinesize >> field_based, 9,
2348 uvsrc_x, uvsrc_y << field_based,
2349 h_edge_pos >> 1, v_edge_pos >> 1);
2351 ptr_cr = uvbuf + 16;
2355 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2357 dest_y += s->linesize;
2358 dest_cb += s->uvlinesize;
2359 dest_cr += s->uvlinesize;
2363 ptr_y += s->linesize;
2364 ptr_cb += s->uvlinesize;
2365 ptr_cr += s->uvlinesize;
2368 sx = (sx << 2) >> lowres;
2369 sy = (sy << 2) >> lowres;
2370 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2372 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2373 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2374 uvsx = (uvsx << 2) >> lowres;
2375 uvsy = (uvsy << 2) >> lowres;
2377 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2378 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2381 // FIXME h261 lowres loop filter
2384 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2385 uint8_t *dest_cb, uint8_t *dest_cr,
2386 uint8_t **ref_picture,
2387 h264_chroma_mc_func * pix_op,
2390 const int lowres = s->avctx->lowres;
2391 const int op_index = FFMIN(lowres, 3);
2392 const int block_s = 8 >> lowres;
2393 const int s_mask = (2 << lowres) - 1;
2394 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2395 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2396 int emu = 0, src_x, src_y, sx, sy;
2400 if (s->quarter_sample) {
2405 /* In case of 8X8, we construct a single chroma motion vector
2406 with a special rounding */
2407 mx = ff_h263_round_chroma(mx);
2408 my = ff_h263_round_chroma(my);
2412 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2413 src_y = s->mb_y * block_s + (my >> lowres + 1);
2415 offset = src_y * s->uvlinesize + src_x;
2416 ptr = ref_picture[1] + offset;
2417 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2418 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2419 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->uvlinesize, ptr, s->uvlinesize,
2420 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2421 ptr = s->edge_emu_buffer;
2424 sx = (sx << 2) >> lowres;
2425 sy = (sy << 2) >> lowres;
2426 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2428 ptr = ref_picture[2] + offset;
2430 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->uvlinesize,
2431 ptr, s->uvlinesize, 9, 9,
2432 src_x, src_y, h_edge_pos, v_edge_pos);
2433 ptr = s->edge_emu_buffer;
2435 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2439 * motion compensation of a single macroblock
2441 * @param dest_y luma destination pointer
2442 * @param dest_cb chroma cb/u destination pointer
2443 * @param dest_cr chroma cr/v destination pointer
2444 * @param dir direction (0->forward, 1->backward)
2445 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2446 * @param pix_op halfpel motion compensation function (average or put normally)
2447 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2449 static inline void MPV_motion_lowres(MpegEncContext *s,
2450 uint8_t *dest_y, uint8_t *dest_cb,
2452 int dir, uint8_t **ref_picture,
2453 h264_chroma_mc_func *pix_op)
2457 const int lowres = s->avctx->lowres;
2458 const int block_s = 8 >>lowres;
2463 switch (s->mv_type) {
2465 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2467 ref_picture, pix_op,
2468 s->mv[dir][0][0], s->mv[dir][0][1],
2474 for (i = 0; i < 4; i++) {
2475 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2476 s->linesize) * block_s,
2477 ref_picture[0], 0, 0,
2478 (2 * mb_x + (i & 1)) * block_s,
2479 (2 * mb_y + (i >> 1)) * block_s,
2480 s->width, s->height, s->linesize,
2481 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2482 block_s, block_s, pix_op,
2483 s->mv[dir][i][0], s->mv[dir][i][1]);
2485 mx += s->mv[dir][i][0];
2486 my += s->mv[dir][i][1];
2489 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2490 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2494 if (s->picture_structure == PICT_FRAME) {
2496 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2497 1, 0, s->field_select[dir][0],
2498 ref_picture, pix_op,
2499 s->mv[dir][0][0], s->mv[dir][0][1],
2502 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2503 1, 1, s->field_select[dir][1],
2504 ref_picture, pix_op,
2505 s->mv[dir][1][0], s->mv[dir][1][1],
2508 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2509 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2510 ref_picture = s->current_picture_ptr->f.data;
2513 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2514 0, 0, s->field_select[dir][0],
2515 ref_picture, pix_op,
2517 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2521 for (i = 0; i < 2; i++) {
2522 uint8_t **ref2picture;
2524 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2525 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2526 ref2picture = ref_picture;
2528 ref2picture = s->current_picture_ptr->f.data;
2531 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2532 0, 0, s->field_select[dir][i],
2533 ref2picture, pix_op,
2534 s->mv[dir][i][0], s->mv[dir][i][1] +
2535 2 * block_s * i, block_s, mb_y >> 1);
2537 dest_y += 2 * block_s * s->linesize;
2538 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2539 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2543 if (s->picture_structure == PICT_FRAME) {
2544 for (i = 0; i < 2; i++) {
2546 for (j = 0; j < 2; j++) {
2547 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2549 ref_picture, pix_op,
2550 s->mv[dir][2 * i + j][0],
2551 s->mv[dir][2 * i + j][1],
2554 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2557 for (i = 0; i < 2; i++) {
2558 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2559 0, 0, s->picture_structure != i + 1,
2560 ref_picture, pix_op,
2561 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2562 2 * block_s, mb_y >> 1);
2564 // after put we make avg of the same block
2565 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2567 // opposite parity is always in the same
2568 // frame if this is second field
2569 if (!s->first_field) {
2570 ref_picture = s->current_picture_ptr->f.data;
2581 * find the lowest MB row referenced in the MVs
2583 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2585 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2586 int my, off, i, mvs;
2588 if (s->picture_structure != PICT_FRAME || s->mcsel)
2591 switch (s->mv_type) {
2605 for (i = 0; i < mvs; i++) {
2606 my = s->mv[dir][i][1]<<qpel_shift;
2607 my_max = FFMAX(my_max, my);
2608 my_min = FFMIN(my_min, my);
2611 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2613 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2615 return s->mb_height-1;
2618 /* put block[] to dest[] */
2619 static inline void put_dct(MpegEncContext *s,
2620 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2622 s->dct_unquantize_intra(s, block, i, qscale);
2623 s->dsp.idct_put (dest, line_size, block);
2626 /* add block[] to dest[] */
2627 static inline void add_dct(MpegEncContext *s,
2628 int16_t *block, int i, uint8_t *dest, int line_size)
2630 if (s->block_last_index[i] >= 0) {
2631 s->dsp.idct_add (dest, line_size, block);
2635 static inline void add_dequant_dct(MpegEncContext *s,
2636 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2638 if (s->block_last_index[i] >= 0) {
2639 s->dct_unquantize_inter(s, block, i, qscale);
2641 s->dsp.idct_add (dest, line_size, block);
2646 * Clean dc, ac, coded_block for the current non-intra MB.
2648 void ff_clean_intra_table_entries(MpegEncContext *s)
2650 int wrap = s->b8_stride;
2651 int xy = s->block_index[0];
2654 s->dc_val[0][xy + 1 ] =
2655 s->dc_val[0][xy + wrap] =
2656 s->dc_val[0][xy + 1 + wrap] = 1024;
2658 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2659 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2660 if (s->msmpeg4_version>=3) {
2661 s->coded_block[xy ] =
2662 s->coded_block[xy + 1 ] =
2663 s->coded_block[xy + wrap] =
2664 s->coded_block[xy + 1 + wrap] = 0;
2667 wrap = s->mb_stride;
2668 xy = s->mb_x + s->mb_y * wrap;
2670 s->dc_val[2][xy] = 1024;
2672 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2673 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2675 s->mbintra_table[xy]= 0;
2678 /* generic function called after a macroblock has been parsed by the
2679 decoder or after it has been encoded by the encoder.
2681 Important variables used:
2682 s->mb_intra : true if intra macroblock
2683 s->mv_dir : motion vector direction
2684 s->mv_type : motion vector type
2685 s->mv : motion vector
2686 s->interlaced_dct : true if interlaced dct used (mpeg2)
2688 static av_always_inline
2689 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2690 int lowres_flag, int is_mpeg12)
2692 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2695 FF_DISABLE_DEPRECATION_WARNINGS
2696 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2697 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2700 FF_ENABLE_DEPRECATION_WARNINGS
2701 #endif /* FF_API_XVMC */
2703 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2704 /* print DCT coefficients */
2706 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2708 for(j=0; j<64; j++){
2709 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2711 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2715 s->current_picture.qscale_table[mb_xy] = s->qscale;
2717 /* update DC predictors for P macroblocks */
2719 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2720 if(s->mbintra_table[mb_xy])
2721 ff_clean_intra_table_entries(s);
2725 s->last_dc[2] = 128 << s->intra_dc_precision;
2728 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2729 s->mbintra_table[mb_xy]=1;
2731 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2732 uint8_t *dest_y, *dest_cb, *dest_cr;
2733 int dct_linesize, dct_offset;
2734 op_pixels_func (*op_pix)[4];
2735 qpel_mc_func (*op_qpix)[16];
2736 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2737 const int uvlinesize = s->current_picture.f.linesize[1];
2738 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2739 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2741 /* avoid copy if macroblock skipped in last frame too */
2742 /* skip only during decoding as we might trash the buffers during encoding a bit */
2744 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2746 if (s->mb_skipped) {
2748 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2750 } else if(!s->current_picture.reference) {
2753 *mbskip_ptr = 0; /* not skipped */
2757 dct_linesize = linesize << s->interlaced_dct;
2758 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2762 dest_cb= s->dest[1];
2763 dest_cr= s->dest[2];
2765 dest_y = s->b_scratchpad;
2766 dest_cb= s->b_scratchpad+16*linesize;
2767 dest_cr= s->b_scratchpad+32*linesize;
2771 /* motion handling */
2772 /* decoding or more than one mb_type (MC was already done otherwise) */
2775 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2776 if (s->mv_dir & MV_DIR_FORWARD) {
2777 ff_thread_await_progress(&s->last_picture_ptr->tf,
2778 ff_MPV_lowest_referenced_row(s, 0),
2781 if (s->mv_dir & MV_DIR_BACKWARD) {
2782 ff_thread_await_progress(&s->next_picture_ptr->tf,
2783 ff_MPV_lowest_referenced_row(s, 1),
2789 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2791 if (s->mv_dir & MV_DIR_FORWARD) {
2792 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2793 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2795 if (s->mv_dir & MV_DIR_BACKWARD) {
2796 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2799 op_qpix = s->me.qpel_put;
2800 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2801 op_pix = s->hdsp.put_pixels_tab;
2803 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2805 if (s->mv_dir & MV_DIR_FORWARD) {
2806 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2807 op_pix = s->hdsp.avg_pixels_tab;
2808 op_qpix= s->me.qpel_avg;
2810 if (s->mv_dir & MV_DIR_BACKWARD) {
2811 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2816 /* skip dequant / idct if we are really late ;) */
2817 if(s->avctx->skip_idct){
2818 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2819 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2820 || s->avctx->skip_idct >= AVDISCARD_ALL)
2824 /* add dct residue */
2825 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2826 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2827 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2828 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2829 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2830 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2832 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2833 if (s->chroma_y_shift){
2834 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2835 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2839 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2840 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2841 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2842 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2845 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2846 add_dct(s, block[0], 0, dest_y , dct_linesize);
2847 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2848 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2849 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2851 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2852 if(s->chroma_y_shift){//Chroma420
2853 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2854 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2857 dct_linesize = uvlinesize << s->interlaced_dct;
2858 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2860 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2861 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2862 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2863 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2864 if(!s->chroma_x_shift){//Chroma444
2865 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2866 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2867 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2868 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2873 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2874 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2877 /* dct only in intra block */
2878 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2879 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2880 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2881 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2882 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2884 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2885 if(s->chroma_y_shift){
2886 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2887 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2891 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2892 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2893 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2894 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2898 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2899 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2900 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2901 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2903 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2904 if(s->chroma_y_shift){
2905 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2906 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2909 dct_linesize = uvlinesize << s->interlaced_dct;
2910 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2912 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2913 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2914 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2915 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2916 if(!s->chroma_x_shift){//Chroma444
2917 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2918 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2919 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2920 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2928 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2929 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2930 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2935 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2937 if(s->out_format == FMT_MPEG1) {
2938 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2939 else MPV_decode_mb_internal(s, block, 0, 1);
2942 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2943 else MPV_decode_mb_internal(s, block, 0, 0);
2947 * @param h is the normal height, this will be reduced automatically if needed for the last row
2949 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2950 Picture *last, int y, int h, int picture_structure,
2951 int first_field, int draw_edges, int low_delay,
2952 int v_edge_pos, int h_edge_pos)
2954 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2955 int hshift = desc->log2_chroma_w;
2956 int vshift = desc->log2_chroma_h;
2957 const int field_pic = picture_structure != PICT_FRAME;
2963 if (!avctx->hwaccel &&
2964 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2967 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2968 int *linesize = cur->f.linesize;
2969 int sides = 0, edge_h;
2970 if (y==0) sides |= EDGE_TOP;
2971 if (y + h >= v_edge_pos)
2972 sides |= EDGE_BOTTOM;
2974 edge_h= FFMIN(h, v_edge_pos - y);
2976 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2977 linesize[0], h_edge_pos, edge_h,
2978 EDGE_WIDTH, EDGE_WIDTH, sides);
2979 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2980 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2981 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2982 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2983 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2984 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2987 h = FFMIN(h, avctx->height - y);
2989 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2991 if (avctx->draw_horiz_band) {
2993 int offset[AV_NUM_DATA_POINTERS];
2996 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2997 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
3004 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
3005 picture_structure == PICT_FRAME &&
3006 avctx->codec_id != AV_CODEC_ID_SVQ3) {
3007 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
3010 offset[0]= y * src->linesize[0];
3012 offset[2]= (y >> vshift) * src->linesize[1];
3013 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
3019 avctx->draw_horiz_band(avctx, src, offset,
3020 y, picture_structure, h);
3024 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3026 int draw_edges = s->unrestricted_mv && !s->intra_only;
3027 ff_draw_horiz_band(s->avctx, &s->dsp, s->current_picture_ptr,
3028 s->last_picture_ptr, y, h, s->picture_structure,
3029 s->first_field, draw_edges, s->low_delay,
3030 s->v_edge_pos, s->h_edge_pos);
3033 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3034 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3035 const int uvlinesize = s->current_picture.f.linesize[1];
3036 const int mb_size= 4 - s->avctx->lowres;
3038 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3039 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3040 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3041 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3042 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3043 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3044 //block_index is not used by mpeg2, so it is not affected by chroma_format
3046 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3047 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3048 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3050 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3052 if(s->picture_structure==PICT_FRAME){
3053 s->dest[0] += s->mb_y * linesize << mb_size;
3054 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3055 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3057 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3058 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3059 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3060 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3066 * Permute an 8x8 block.
3067 * @param block the block which will be permuted according to the given permutation vector
3068 * @param permutation the permutation vector
3069 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3070 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3071 * (inverse) permutated to scantable order!
3073 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3079 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3081 for(i=0; i<=last; i++){
3082 const int j= scantable[i];
3087 for(i=0; i<=last; i++){
3088 const int j= scantable[i];
3089 const int perm_j= permutation[j];
3090 block[perm_j]= temp[j];
3094 void ff_mpeg_flush(AVCodecContext *avctx){
3096 MpegEncContext *s = avctx->priv_data;
3098 if(s==NULL || s->picture==NULL)
3101 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3102 ff_mpeg_unref_picture(s, &s->picture[i]);
3103 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3105 ff_mpeg_unref_picture(s, &s->current_picture);
3106 ff_mpeg_unref_picture(s, &s->last_picture);
3107 ff_mpeg_unref_picture(s, &s->next_picture);
3109 s->mb_x= s->mb_y= 0;
3112 s->parse_context.state= -1;
3113 s->parse_context.frame_start_found= 0;
3114 s->parse_context.overread= 0;
3115 s->parse_context.overread_index= 0;
3116 s->parse_context.index= 0;
3117 s->parse_context.last_index= 0;
3118 s->bitstream_buffer_size=0;
3122 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3123 int16_t *block, int n, int qscale)
3125 int i, level, nCoeffs;
3126 const uint16_t *quant_matrix;
3128 nCoeffs= s->block_last_index[n];
3130 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3131 /* XXX: only mpeg1 */
3132 quant_matrix = s->intra_matrix;
3133 for(i=1;i<=nCoeffs;i++) {
3134 int j= s->intra_scantable.permutated[i];
3139 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3140 level = (level - 1) | 1;
3143 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3144 level = (level - 1) | 1;
3151 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3152 int16_t *block, int n, int qscale)
3154 int i, level, nCoeffs;
3155 const uint16_t *quant_matrix;
3157 nCoeffs= s->block_last_index[n];
3159 quant_matrix = s->inter_matrix;
3160 for(i=0; i<=nCoeffs; i++) {
3161 int j= s->intra_scantable.permutated[i];
3166 level = (((level << 1) + 1) * qscale *
3167 ((int) (quant_matrix[j]))) >> 4;
3168 level = (level - 1) | 1;
3171 level = (((level << 1) + 1) * qscale *
3172 ((int) (quant_matrix[j]))) >> 4;
3173 level = (level - 1) | 1;
3180 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3181 int16_t *block, int n, int qscale)
3183 int i, level, nCoeffs;
3184 const uint16_t *quant_matrix;
3186 if(s->alternate_scan) nCoeffs= 63;
3187 else nCoeffs= s->block_last_index[n];
3189 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3190 quant_matrix = s->intra_matrix;
3191 for(i=1;i<=nCoeffs;i++) {
3192 int j= s->intra_scantable.permutated[i];
3197 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3200 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3207 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3208 int16_t *block, int n, int qscale)
3210 int i, level, nCoeffs;
3211 const uint16_t *quant_matrix;
3214 if(s->alternate_scan) nCoeffs= 63;
3215 else nCoeffs= s->block_last_index[n];
3217 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3219 quant_matrix = s->intra_matrix;
3220 for(i=1;i<=nCoeffs;i++) {
3221 int j= s->intra_scantable.permutated[i];
3226 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3229 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3238 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3239 int16_t *block, int n, int qscale)
3241 int i, level, nCoeffs;
3242 const uint16_t *quant_matrix;
3245 if(s->alternate_scan) nCoeffs= 63;
3246 else nCoeffs= s->block_last_index[n];
3248 quant_matrix = s->inter_matrix;
3249 for(i=0; i<=nCoeffs; i++) {
3250 int j= s->intra_scantable.permutated[i];
3255 level = (((level << 1) + 1) * qscale *
3256 ((int) (quant_matrix[j]))) >> 4;
3259 level = (((level << 1) + 1) * qscale *
3260 ((int) (quant_matrix[j]))) >> 4;
3269 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3270 int16_t *block, int n, int qscale)
3272 int i, level, qmul, qadd;
3275 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
3280 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3281 qadd = (qscale - 1) | 1;
3288 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3290 for(i=1; i<=nCoeffs; i++) {
3294 level = level * qmul - qadd;
3296 level = level * qmul + qadd;
3303 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3304 int16_t *block, int n, int qscale)
3306 int i, level, qmul, qadd;
3309 av_assert2(s->block_last_index[n]>=0);
3311 qadd = (qscale - 1) | 1;
3314 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3316 for(i=0; i<=nCoeffs; i++) {
3320 level = level * qmul - qadd;
3322 level = level * qmul + qadd;
3330 * set qscale and update qscale dependent variables.
3332 void ff_set_qscale(MpegEncContext * s, int qscale)
3336 else if (qscale > 31)
3340 s->chroma_qscale= s->chroma_qscale_table[qscale];
3342 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3343 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3346 void ff_MPV_report_decode_progress(MpegEncContext *s)
3348 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3349 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3352 #if CONFIG_ERROR_RESILIENCE
3353 void ff_mpeg_er_frame_start(MpegEncContext *s)
3355 ERContext *er = &s->er;
3357 er->cur_pic = s->current_picture_ptr;
3358 er->last_pic = s->last_picture_ptr;
3359 er->next_pic = s->next_picture_ptr;
3361 er->pp_time = s->pp_time;
3362 er->pb_time = s->pb_time;
3363 er->quarter_sample = s->quarter_sample;
3364 er->partitioned_frame = s->partitioned_frame;
3366 ff_er_frame_start(er);
3368 #endif /* CONFIG_ERROR_RESILIENCE */