2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
35 #include "h264chroma.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
130 MpegEncContext *s = opaque;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
138 memcpy(s->mv, mv, sizeof(*mv));
140 ff_init_block_index(s);
141 ff_update_block_index(s);
143 s->dsp.clear_blocks(s->block[0]);
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
150 ff_MPV_decode_mb(s, s->block);
153 /* init common dct for both encoder and decoder */
154 av_cold int ff_dct_common_init(MpegEncContext *s)
156 ff_dsputil_init(&s->dsp, s->avctx);
157 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
158 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
159 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
161 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
162 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
163 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
164 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
165 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
166 if (s->flags & CODEC_FLAG_BITEXACT)
167 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
168 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
171 ff_MPV_common_init_x86(s);
173 ff_MPV_common_init_axp(s);
175 ff_MPV_common_init_arm(s);
177 ff_MPV_common_init_bfin(s);
179 ff_MPV_common_init_ppc(s);
182 /* load & permutate scantables
183 * note: only wmv uses different ones
185 if (s->alternate_scan) {
186 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
187 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
198 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
200 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
202 // edge emu needs blocksize + filter length - 1
203 // (= 17x17 for halfpel / 21x21 for h264)
204 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
205 // at uvlinesize. It supports only YUV420 so 24x24 is enough
206 // linesize * interlaced * MBsize
207 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
210 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
212 s->me.temp = s->me.scratchpad;
213 s->rd_scratchpad = s->me.scratchpad;
214 s->b_scratchpad = s->me.scratchpad;
215 s->obmc_scratchpad = s->me.scratchpad + 16;
219 av_freep(&s->edge_emu_buffer);
220 return AVERROR(ENOMEM);
224 * Allocate a frame buffer
226 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
231 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
233 s->codec_id != AV_CODEC_ID_MSS2)
234 r = ff_thread_get_buffer(s->avctx, &pic->tf,
235 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
237 pic->f.width = s->avctx->width;
238 pic->f.height = s->avctx->height;
239 pic->f.format = s->avctx->pix_fmt;
240 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
243 if (r < 0 || !pic->f.data[0]) {
244 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
249 if (s->avctx->hwaccel) {
250 assert(!pic->hwaccel_picture_private);
251 if (s->avctx->hwaccel->priv_data_size) {
252 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
253 if (!pic->hwaccel_priv_buf) {
254 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
257 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
261 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
262 s->uvlinesize != pic->f.linesize[1])) {
263 av_log(s->avctx, AV_LOG_ERROR,
264 "get_buffer() failed (stride changed)\n");
265 ff_mpeg_unref_picture(s, pic);
269 if (pic->f.linesize[1] != pic->f.linesize[2]) {
270 av_log(s->avctx, AV_LOG_ERROR,
271 "get_buffer() failed (uv stride mismatch)\n");
272 ff_mpeg_unref_picture(s, pic);
276 if (!s->edge_emu_buffer &&
277 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
278 av_log(s->avctx, AV_LOG_ERROR,
279 "get_buffer() failed to allocate context scratch buffers.\n");
280 ff_mpeg_unref_picture(s, pic);
287 static void free_picture_tables(Picture *pic)
291 pic->alloc_mb_width =
292 pic->alloc_mb_height = 0;
294 av_buffer_unref(&pic->mb_var_buf);
295 av_buffer_unref(&pic->mc_mb_var_buf);
296 av_buffer_unref(&pic->mb_mean_buf);
297 av_buffer_unref(&pic->mbskip_table_buf);
298 av_buffer_unref(&pic->qscale_table_buf);
299 av_buffer_unref(&pic->mb_type_buf);
301 for (i = 0; i < 2; i++) {
302 av_buffer_unref(&pic->motion_val_buf[i]);
303 av_buffer_unref(&pic->ref_index_buf[i]);
307 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
309 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
310 const int mb_array_size = s->mb_stride * s->mb_height;
311 const int b8_array_size = s->b8_stride * s->mb_height * 2;
315 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
316 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
317 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
319 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
320 return AVERROR(ENOMEM);
323 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
324 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
326 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
327 return AVERROR(ENOMEM);
330 if (s->out_format == FMT_H263 || s->encoding ||
331 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
332 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
333 int ref_index_size = 4 * mb_array_size;
335 for (i = 0; mv_size && i < 2; i++) {
336 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
338 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339 return AVERROR(ENOMEM);
343 pic->alloc_mb_width = s->mb_width;
344 pic->alloc_mb_height = s->mb_height;
349 static int make_tables_writable(Picture *pic)
352 #define MAKE_WRITABLE(table) \
355 (ret = av_buffer_make_writable(&pic->table)) < 0)\
359 MAKE_WRITABLE(mb_var_buf);
360 MAKE_WRITABLE(mc_mb_var_buf);
361 MAKE_WRITABLE(mb_mean_buf);
362 MAKE_WRITABLE(mbskip_table_buf);
363 MAKE_WRITABLE(qscale_table_buf);
364 MAKE_WRITABLE(mb_type_buf);
366 for (i = 0; i < 2; i++) {
367 MAKE_WRITABLE(motion_val_buf[i]);
368 MAKE_WRITABLE(ref_index_buf[i]);
375 * Allocate a Picture.
376 * The pixels are allocated/set by calling get_buffer() if shared = 0
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
382 if (pic->qscale_table_buf)
383 if ( pic->alloc_mb_width != s->mb_width
384 || pic->alloc_mb_height != s->mb_height)
385 free_picture_tables(pic);
388 assert(pic->f.data[0]);
391 assert(!pic->f.data[0]);
393 if (alloc_frame_buffer(s, pic) < 0)
396 s->linesize = pic->f.linesize[0];
397 s->uvlinesize = pic->f.linesize[1];
400 if (!pic->qscale_table_buf)
401 ret = alloc_picture_tables(s, pic);
403 ret = make_tables_writable(pic);
408 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
409 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410 pic->mb_mean = pic->mb_mean_buf->data;
413 pic->mbskip_table = pic->mbskip_table_buf->data;
414 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
417 if (pic->motion_val_buf[0]) {
418 for (i = 0; i < 2; i++) {
419 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420 pic->ref_index[i] = pic->ref_index_buf[i]->data;
426 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427 ff_mpeg_unref_picture(s, pic);
428 free_picture_tables(pic);
429 return AVERROR(ENOMEM);
433 * Deallocate a picture.
435 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
437 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
440 /* WM Image / Screen codecs allocate internal buffers with different
441 * dimensions / colorspaces; ignore user-defined callbacks for these. */
442 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
443 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
444 s->codec_id != AV_CODEC_ID_MSS2)
445 ff_thread_release_buffer(s->avctx, &pic->tf);
447 av_frame_unref(&pic->f);
449 av_buffer_unref(&pic->hwaccel_priv_buf);
451 if (pic->needs_realloc)
452 free_picture_tables(pic);
454 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
457 static int update_picture_tables(Picture *dst, Picture *src)
461 #define UPDATE_TABLE(table)\
464 (!dst->table || dst->table->buffer != src->table->buffer)) {\
465 av_buffer_unref(&dst->table);\
466 dst->table = av_buffer_ref(src->table);\
468 free_picture_tables(dst);\
469 return AVERROR(ENOMEM);\
474 UPDATE_TABLE(mb_var_buf);
475 UPDATE_TABLE(mc_mb_var_buf);
476 UPDATE_TABLE(mb_mean_buf);
477 UPDATE_TABLE(mbskip_table_buf);
478 UPDATE_TABLE(qscale_table_buf);
479 UPDATE_TABLE(mb_type_buf);
480 for (i = 0; i < 2; i++) {
481 UPDATE_TABLE(motion_val_buf[i]);
482 UPDATE_TABLE(ref_index_buf[i]);
485 dst->mb_var = src->mb_var;
486 dst->mc_mb_var = src->mc_mb_var;
487 dst->mb_mean = src->mb_mean;
488 dst->mbskip_table = src->mbskip_table;
489 dst->qscale_table = src->qscale_table;
490 dst->mb_type = src->mb_type;
491 for (i = 0; i < 2; i++) {
492 dst->motion_val[i] = src->motion_val[i];
493 dst->ref_index[i] = src->ref_index[i];
496 dst->alloc_mb_width = src->alloc_mb_width;
497 dst->alloc_mb_height = src->alloc_mb_height;
502 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
506 av_assert0(!dst->f.buf[0]);
507 av_assert0(src->f.buf[0]);
511 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
515 ret = update_picture_tables(dst, src);
519 if (src->hwaccel_picture_private) {
520 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
521 if (!dst->hwaccel_priv_buf)
523 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
526 dst->field_picture = src->field_picture;
527 dst->mb_var_sum = src->mb_var_sum;
528 dst->mc_mb_var_sum = src->mc_mb_var_sum;
529 dst->b_frame_score = src->b_frame_score;
530 dst->needs_realloc = src->needs_realloc;
531 dst->reference = src->reference;
532 dst->shared = src->shared;
536 ff_mpeg_unref_picture(s, dst);
540 static int init_duplicate_context(MpegEncContext *s)
542 int y_size = s->b8_stride * (2 * s->mb_height + 1);
543 int c_size = s->mb_stride * (s->mb_height + 1);
544 int yc_size = y_size + 2 * c_size;
552 s->obmc_scratchpad = NULL;
555 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
556 ME_MAP_SIZE * sizeof(uint32_t), fail)
557 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
558 ME_MAP_SIZE * sizeof(uint32_t), fail)
559 if (s->avctx->noise_reduction) {
560 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
561 2 * 64 * sizeof(int), fail)
564 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
565 s->block = s->blocks[0];
567 for (i = 0; i < 12; i++) {
568 s->pblocks[i] = &s->block[i];
571 if (s->out_format == FMT_H263) {
573 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
574 yc_size * sizeof(int16_t) * 16, fail);
575 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
576 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
577 s->ac_val[2] = s->ac_val[1] + c_size;
582 return -1; // free() through ff_MPV_common_end()
585 static void free_duplicate_context(MpegEncContext *s)
590 av_freep(&s->edge_emu_buffer);
591 av_freep(&s->me.scratchpad);
595 s->obmc_scratchpad = NULL;
597 av_freep(&s->dct_error_sum);
598 av_freep(&s->me.map);
599 av_freep(&s->me.score_map);
600 av_freep(&s->blocks);
601 av_freep(&s->ac_val_base);
605 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
607 #define COPY(a) bak->a = src->a
608 COPY(edge_emu_buffer);
613 COPY(obmc_scratchpad);
620 COPY(me.map_generation);
632 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
636 // FIXME copy only needed parts
638 backup_duplicate_context(&bak, dst);
639 memcpy(dst, src, sizeof(MpegEncContext));
640 backup_duplicate_context(dst, &bak);
641 for (i = 0; i < 12; i++) {
642 dst->pblocks[i] = &dst->block[i];
644 if (!dst->edge_emu_buffer &&
645 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
646 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
647 "scratch buffers.\n");
650 // STOP_TIMER("update_duplicate_context")
651 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
655 int ff_mpeg_update_thread_context(AVCodecContext *dst,
656 const AVCodecContext *src)
659 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
666 // FIXME can parameters change on I-frames?
667 // in that case dst may need a reinit
668 if (!s->context_initialized) {
669 memcpy(s, s1, sizeof(MpegEncContext));
672 s->bitstream_buffer = NULL;
673 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
675 if (s1->context_initialized){
676 // s->picture_range_start += MAX_PICTURE_COUNT;
677 // s->picture_range_end += MAX_PICTURE_COUNT;
678 if((ret = ff_MPV_common_init(s)) < 0){
679 memset(s, 0, sizeof(MpegEncContext));
686 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
687 s->context_reinit = 0;
688 s->height = s1->height;
689 s->width = s1->width;
690 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
694 s->avctx->coded_height = s1->avctx->coded_height;
695 s->avctx->coded_width = s1->avctx->coded_width;
696 s->avctx->width = s1->avctx->width;
697 s->avctx->height = s1->avctx->height;
699 s->coded_picture_number = s1->coded_picture_number;
700 s->picture_number = s1->picture_number;
701 s->input_picture_number = s1->input_picture_number;
703 av_assert0(!s->picture || s->picture != s1->picture);
705 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
706 ff_mpeg_unref_picture(s, &s->picture[i]);
707 if (s1->picture[i].f.data[0] &&
708 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
712 #define UPDATE_PICTURE(pic)\
714 ff_mpeg_unref_picture(s, &s->pic);\
715 if (s1->pic.f.data[0])\
716 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
718 ret = update_picture_tables(&s->pic, &s1->pic);\
723 UPDATE_PICTURE(current_picture);
724 UPDATE_PICTURE(last_picture);
725 UPDATE_PICTURE(next_picture);
727 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
728 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
729 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
731 // Error/bug resilience
732 s->next_p_frame_damaged = s1->next_p_frame_damaged;
733 s->workaround_bugs = s1->workaround_bugs;
734 s->padding_bug_score = s1->padding_bug_score;
737 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
738 (char *) &s1->shape - (char *) &s1->time_increment_bits);
741 s->max_b_frames = s1->max_b_frames;
742 s->low_delay = s1->low_delay;
743 s->droppable = s1->droppable;
745 // DivX handling (doesn't work)
746 s->divx_packed = s1->divx_packed;
748 if (s1->bitstream_buffer) {
749 if (s1->bitstream_buffer_size +
750 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
751 av_fast_malloc(&s->bitstream_buffer,
752 &s->allocated_bitstream_buffer_size,
753 s1->allocated_bitstream_buffer_size);
754 s->bitstream_buffer_size = s1->bitstream_buffer_size;
755 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
756 s1->bitstream_buffer_size);
757 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
758 FF_INPUT_BUFFER_PADDING_SIZE);
761 // linesize dependend scratch buffer allocation
762 if (!s->edge_emu_buffer)
764 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
765 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
766 "scratch buffers.\n");
767 return AVERROR(ENOMEM);
770 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
771 "be allocated due to unknown size.\n");
774 // MPEG2/interlacing info
775 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
776 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
778 if (!s1->first_field) {
779 s->last_pict_type = s1->pict_type;
780 if (s1->current_picture_ptr)
781 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
783 if (s1->pict_type != AV_PICTURE_TYPE_B) {
784 s->last_non_b_pict_type = s1->pict_type;
792 * Set the given MpegEncContext to common defaults
793 * (same for encoding and decoding).
794 * The changed fields will not depend upon the
795 * prior state of the MpegEncContext.
797 void ff_MPV_common_defaults(MpegEncContext *s)
799 s->y_dc_scale_table =
800 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
801 s->chroma_qscale_table = ff_default_chroma_qscale_table;
802 s->progressive_frame = 1;
803 s->progressive_sequence = 1;
804 s->picture_structure = PICT_FRAME;
806 s->coded_picture_number = 0;
807 s->picture_number = 0;
808 s->input_picture_number = 0;
810 s->picture_in_gop_number = 0;
815 s->slice_context_count = 1;
819 * Set the given MpegEncContext to defaults for decoding.
820 * the changed fields will not depend upon
821 * the prior state of the MpegEncContext.
823 void ff_MPV_decode_defaults(MpegEncContext *s)
825 ff_MPV_common_defaults(s);
828 static int init_er(MpegEncContext *s)
830 ERContext *er = &s->er;
831 int mb_array_size = s->mb_height * s->mb_stride;
834 er->avctx = s->avctx;
837 er->mb_index2xy = s->mb_index2xy;
838 er->mb_num = s->mb_num;
839 er->mb_width = s->mb_width;
840 er->mb_height = s->mb_height;
841 er->mb_stride = s->mb_stride;
842 er->b8_stride = s->b8_stride;
844 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
845 er->error_status_table = av_mallocz(mb_array_size);
846 if (!er->er_temp_buffer || !er->error_status_table)
849 er->mbskip_table = s->mbskip_table;
850 er->mbintra_table = s->mbintra_table;
852 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
853 er->dc_val[i] = s->dc_val[i];
855 er->decode_mb = mpeg_er_decode_mb;
860 av_freep(&er->er_temp_buffer);
861 av_freep(&er->error_status_table);
862 return AVERROR(ENOMEM);
866 * Initialize and allocates MpegEncContext fields dependent on the resolution.
868 static int init_context_frame(MpegEncContext *s)
870 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
872 s->mb_width = (s->width + 15) / 16;
873 s->mb_stride = s->mb_width + 1;
874 s->b8_stride = s->mb_width * 2 + 1;
875 s->b4_stride = s->mb_width * 4 + 1;
876 mb_array_size = s->mb_height * s->mb_stride;
877 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
879 /* set default edge pos, will be overriden
880 * in decode_header if needed */
881 s->h_edge_pos = s->mb_width * 16;
882 s->v_edge_pos = s->mb_height * 16;
884 s->mb_num = s->mb_width * s->mb_height;
889 s->block_wrap[3] = s->b8_stride;
891 s->block_wrap[5] = s->mb_stride;
893 y_size = s->b8_stride * (2 * s->mb_height + 1);
894 c_size = s->mb_stride * (s->mb_height + 1);
895 yc_size = y_size + 2 * c_size;
897 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
898 for (y = 0; y < s->mb_height; y++)
899 for (x = 0; x < s->mb_width; x++)
900 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
902 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
905 /* Allocate MV tables */
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
907 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
911 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
912 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
913 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
914 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
915 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
916 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
917 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
919 /* Allocate MB type table */
920 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
922 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
924 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
925 mb_array_size * sizeof(float), fail);
926 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
927 mb_array_size * sizeof(float), fail);
931 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
932 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
933 /* interlaced direct mode decoding tables */
934 for (i = 0; i < 2; i++) {
936 for (j = 0; j < 2; j++) {
937 for (k = 0; k < 2; k++) {
938 FF_ALLOCZ_OR_GOTO(s->avctx,
939 s->b_field_mv_table_base[i][j][k],
940 mv_table_size * 2 * sizeof(int16_t),
942 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
946 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
947 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
952 if (s->out_format == FMT_H263) {
954 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
955 s->coded_block = s->coded_block_base + s->b8_stride + 1;
957 /* cbp, ac_pred, pred_dir */
958 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
959 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
962 if (s->h263_pred || s->h263_plus || !s->encoding) {
964 // MN: we need these for error resilience of intra-frames
965 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
966 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
967 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
968 s->dc_val[2] = s->dc_val[1] + c_size;
969 for (i = 0; i < yc_size; i++)
970 s->dc_val_base[i] = 1024;
973 /* which mb is a intra block */
974 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
975 memset(s->mbintra_table, 1, mb_array_size);
977 /* init macroblock skip table */
978 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
979 // Note the + 1 is for a quicker mpeg4 slice_end detection
983 return AVERROR(ENOMEM);
987 * init common structure for both encoder and decoder.
988 * this assumes that some variables like width/height are already set
990 av_cold int ff_MPV_common_init(MpegEncContext *s)
993 int nb_slices = (HAVE_THREADS &&
994 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
995 s->avctx->thread_count : 1;
997 if (s->encoding && s->avctx->slices)
998 nb_slices = s->avctx->slices;
1000 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1001 s->mb_height = (s->height + 31) / 32 * 2;
1003 s->mb_height = (s->height + 15) / 16;
1005 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1006 av_log(s->avctx, AV_LOG_ERROR,
1007 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1011 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1014 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1016 max_slices = MAX_THREADS;
1017 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1018 " reducing to %d\n", nb_slices, max_slices);
1019 nb_slices = max_slices;
1022 if ((s->width || s->height) &&
1023 av_image_check_size(s->width, s->height, 0, s->avctx))
1026 ff_dct_common_init(s);
1028 s->flags = s->avctx->flags;
1029 s->flags2 = s->avctx->flags2;
1031 /* set chroma shifts */
1032 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
1034 /* convert fourcc to upper case */
1035 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1036 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1038 s->avctx->coded_frame = &s->current_picture.f;
1041 if (s->msmpeg4_version) {
1042 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1043 2 * 2 * (MAX_LEVEL + 1) *
1044 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1046 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1048 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1057 if (s->avctx->noise_reduction) {
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1062 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1063 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1064 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1065 avcodec_get_frame_defaults(&s->picture[i].f);
1067 memset(&s->next_picture, 0, sizeof(s->next_picture));
1068 memset(&s->last_picture, 0, sizeof(s->last_picture));
1069 memset(&s->current_picture, 0, sizeof(s->current_picture));
1070 avcodec_get_frame_defaults(&s->next_picture.f);
1071 avcodec_get_frame_defaults(&s->last_picture.f);
1072 avcodec_get_frame_defaults(&s->current_picture.f);
1074 if (init_context_frame(s))
1077 s->parse_context.state = -1;
1079 s->context_initialized = 1;
1080 s->thread_context[0] = s;
1082 // if (s->width && s->height) {
1083 if (nb_slices > 1) {
1084 for (i = 1; i < nb_slices; i++) {
1085 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1086 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1089 for (i = 0; i < nb_slices; i++) {
1090 if (init_duplicate_context(s->thread_context[i]) < 0)
1092 s->thread_context[i]->start_mb_y =
1093 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1094 s->thread_context[i]->end_mb_y =
1095 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1098 if (init_duplicate_context(s) < 0)
1101 s->end_mb_y = s->mb_height;
1103 s->slice_context_count = nb_slices;
1108 ff_MPV_common_end(s);
1113 * Frees and resets MpegEncContext fields depending on the resolution.
1114 * Is used during resolution changes to avoid a full reinitialization of the
1117 static int free_context_frame(MpegEncContext *s)
1121 av_freep(&s->mb_type);
1122 av_freep(&s->p_mv_table_base);
1123 av_freep(&s->b_forw_mv_table_base);
1124 av_freep(&s->b_back_mv_table_base);
1125 av_freep(&s->b_bidir_forw_mv_table_base);
1126 av_freep(&s->b_bidir_back_mv_table_base);
1127 av_freep(&s->b_direct_mv_table_base);
1128 s->p_mv_table = NULL;
1129 s->b_forw_mv_table = NULL;
1130 s->b_back_mv_table = NULL;
1131 s->b_bidir_forw_mv_table = NULL;
1132 s->b_bidir_back_mv_table = NULL;
1133 s->b_direct_mv_table = NULL;
1134 for (i = 0; i < 2; i++) {
1135 for (j = 0; j < 2; j++) {
1136 for (k = 0; k < 2; k++) {
1137 av_freep(&s->b_field_mv_table_base[i][j][k]);
1138 s->b_field_mv_table[i][j][k] = NULL;
1140 av_freep(&s->b_field_select_table[i][j]);
1141 av_freep(&s->p_field_mv_table_base[i][j]);
1142 s->p_field_mv_table[i][j] = NULL;
1144 av_freep(&s->p_field_select_table[i]);
1147 av_freep(&s->dc_val_base);
1148 av_freep(&s->coded_block_base);
1149 av_freep(&s->mbintra_table);
1150 av_freep(&s->cbp_table);
1151 av_freep(&s->pred_dir_table);
1153 av_freep(&s->mbskip_table);
1155 av_freep(&s->er.error_status_table);
1156 av_freep(&s->er.er_temp_buffer);
1157 av_freep(&s->mb_index2xy);
1158 av_freep(&s->lambda_table);
1160 av_freep(&s->cplx_tab);
1161 av_freep(&s->bits_tab);
1163 s->linesize = s->uvlinesize = 0;
1168 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1172 if (s->slice_context_count > 1) {
1173 for (i = 0; i < s->slice_context_count; i++) {
1174 free_duplicate_context(s->thread_context[i]);
1176 for (i = 1; i < s->slice_context_count; i++) {
1177 av_freep(&s->thread_context[i]);
1180 free_duplicate_context(s);
1182 if ((err = free_context_frame(s)) < 0)
1186 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1187 s->picture[i].needs_realloc = 1;
1190 s->last_picture_ptr =
1191 s->next_picture_ptr =
1192 s->current_picture_ptr = NULL;
1195 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1196 s->mb_height = (s->height + 31) / 32 * 2;
1198 s->mb_height = (s->height + 15) / 16;
1200 if ((s->width || s->height) &&
1201 av_image_check_size(s->width, s->height, 0, s->avctx))
1202 return AVERROR_INVALIDDATA;
1204 if ((err = init_context_frame(s)))
1207 s->thread_context[0] = s;
1209 if (s->width && s->height) {
1210 int nb_slices = s->slice_context_count;
1211 if (nb_slices > 1) {
1212 for (i = 1; i < nb_slices; i++) {
1213 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1214 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1217 for (i = 0; i < nb_slices; i++) {
1218 if (init_duplicate_context(s->thread_context[i]) < 0)
1220 s->thread_context[i]->start_mb_y =
1221 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1222 s->thread_context[i]->end_mb_y =
1223 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1226 if (init_duplicate_context(s) < 0)
1229 s->end_mb_y = s->mb_height;
1231 s->slice_context_count = nb_slices;
1236 ff_MPV_common_end(s);
1240 /* init common structure for both encoder and decoder */
1241 void ff_MPV_common_end(MpegEncContext *s)
1245 if (s->slice_context_count > 1) {
1246 for (i = 0; i < s->slice_context_count; i++) {
1247 free_duplicate_context(s->thread_context[i]);
1249 for (i = 1; i < s->slice_context_count; i++) {
1250 av_freep(&s->thread_context[i]);
1252 s->slice_context_count = 1;
1253 } else free_duplicate_context(s);
1255 av_freep(&s->parse_context.buffer);
1256 s->parse_context.buffer_size = 0;
1258 av_freep(&s->bitstream_buffer);
1259 s->allocated_bitstream_buffer_size = 0;
1261 av_freep(&s->avctx->stats_out);
1262 av_freep(&s->ac_stats);
1264 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1265 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1266 s->q_chroma_intra_matrix= NULL;
1267 s->q_chroma_intra_matrix16= NULL;
1268 av_freep(&s->q_intra_matrix);
1269 av_freep(&s->q_inter_matrix);
1270 av_freep(&s->q_intra_matrix16);
1271 av_freep(&s->q_inter_matrix16);
1272 av_freep(&s->input_picture);
1273 av_freep(&s->reordered_input_picture);
1274 av_freep(&s->dct_offset);
1277 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1278 free_picture_tables(&s->picture[i]);
1279 ff_mpeg_unref_picture(s, &s->picture[i]);
1282 av_freep(&s->picture);
1283 free_picture_tables(&s->last_picture);
1284 ff_mpeg_unref_picture(s, &s->last_picture);
1285 free_picture_tables(&s->current_picture);
1286 ff_mpeg_unref_picture(s, &s->current_picture);
1287 free_picture_tables(&s->next_picture);
1288 ff_mpeg_unref_picture(s, &s->next_picture);
1289 free_picture_tables(&s->new_picture);
1290 ff_mpeg_unref_picture(s, &s->new_picture);
1292 free_context_frame(s);
1294 s->context_initialized = 0;
1295 s->last_picture_ptr =
1296 s->next_picture_ptr =
1297 s->current_picture_ptr = NULL;
1298 s->linesize = s->uvlinesize = 0;
1301 av_cold void ff_init_rl(RLTable *rl,
1302 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1304 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1305 uint8_t index_run[MAX_RUN + 1];
1306 int last, run, level, start, end, i;
1308 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1309 if (static_store && rl->max_level[0])
1312 /* compute max_level[], max_run[] and index_run[] */
1313 for (last = 0; last < 2; last++) {
1322 memset(max_level, 0, MAX_RUN + 1);
1323 memset(max_run, 0, MAX_LEVEL + 1);
1324 memset(index_run, rl->n, MAX_RUN + 1);
1325 for (i = start; i < end; i++) {
1326 run = rl->table_run[i];
1327 level = rl->table_level[i];
1328 if (index_run[run] == rl->n)
1330 if (level > max_level[run])
1331 max_level[run] = level;
1332 if (run > max_run[level])
1333 max_run[level] = run;
1336 rl->max_level[last] = static_store[last];
1338 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1339 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1341 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1343 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1344 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1346 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1348 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1349 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1353 av_cold void ff_init_vlc_rl(RLTable *rl)
1357 for (q = 0; q < 32; q++) {
1359 int qadd = (q - 1) | 1;
1365 for (i = 0; i < rl->vlc.table_size; i++) {
1366 int code = rl->vlc.table[i][0];
1367 int len = rl->vlc.table[i][1];
1370 if (len == 0) { // illegal code
1373 } else if (len < 0) { // more bits needed
1377 if (code == rl->n) { // esc
1381 run = rl->table_run[code] + 1;
1382 level = rl->table_level[code] * qmul + qadd;
1383 if (code >= rl->last) run += 192;
1386 rl->rl_vlc[q][i].len = len;
1387 rl->rl_vlc[q][i].level = level;
1388 rl->rl_vlc[q][i].run = run;
1393 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1397 /* release non reference frames */
1398 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1399 if (!s->picture[i].reference &&
1400 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1401 ff_mpeg_unref_picture(s, &s->picture[i]);
1406 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1408 if (pic == s->last_picture_ptr)
1410 if (pic->f.data[0] == NULL)
1412 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1417 static int find_unused_picture(MpegEncContext *s, int shared)
1422 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1423 if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1427 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1428 if (pic_is_unused(s, &s->picture[i]))
1433 av_log(s->avctx, AV_LOG_FATAL,
1434 "Internal error, picture buffer overflow\n");
1435 /* We could return -1, but the codec would crash trying to draw into a
1436 * non-existing frame anyway. This is safer than waiting for a random crash.
1437 * Also the return of this is never useful, an encoder must only allocate
1438 * as much as allowed in the specification. This has no relationship to how
1439 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1440 * enough for such valid streams).
1441 * Plus, a decoder has to check stream validity and remove frames if too
1442 * many reference frames are around. Waiting for "OOM" is not correct at
1443 * all. Similarly, missing reference frames have to be replaced by
1444 * interpolated/MC frames, anything else is a bug in the codec ...
1450 int ff_find_unused_picture(MpegEncContext *s, int shared)
1452 int ret = find_unused_picture(s, shared);
1454 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1455 if (s->picture[ret].needs_realloc) {
1456 s->picture[ret].needs_realloc = 0;
1457 free_picture_tables(&s->picture[ret]);
1458 ff_mpeg_unref_picture(s, &s->picture[ret]);
1459 avcodec_get_frame_defaults(&s->picture[ret].f);
1465 static void update_noise_reduction(MpegEncContext *s)
1469 for (intra = 0; intra < 2; intra++) {
1470 if (s->dct_count[intra] > (1 << 16)) {
1471 for (i = 0; i < 64; i++) {
1472 s->dct_error_sum[intra][i] >>= 1;
1474 s->dct_count[intra] >>= 1;
1477 for (i = 0; i < 64; i++) {
1478 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1479 s->dct_count[intra] +
1480 s->dct_error_sum[intra][i] / 2) /
1481 (s->dct_error_sum[intra][i] + 1);
1487 * generic function for encode/decode called after coding/decoding
1488 * the header and before a frame is coded/decoded.
1490 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1496 if (!ff_thread_can_start_frame(avctx)) {
1497 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1501 /* mark & release old frames */
1502 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1503 s->last_picture_ptr != s->next_picture_ptr &&
1504 s->last_picture_ptr->f.data[0]) {
1505 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1508 /* release forgotten pictures */
1509 /* if (mpeg124/h263) */
1511 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1512 if (&s->picture[i] != s->last_picture_ptr &&
1513 &s->picture[i] != s->next_picture_ptr &&
1514 s->picture[i].reference && !s->picture[i].needs_realloc) {
1515 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1516 av_log(avctx, AV_LOG_ERROR,
1517 "releasing zombie picture\n");
1518 ff_mpeg_unref_picture(s, &s->picture[i]);
1523 ff_mpeg_unref_picture(s, &s->current_picture);
1526 ff_release_unused_pictures(s, 1);
1528 if (s->current_picture_ptr &&
1529 s->current_picture_ptr->f.data[0] == NULL) {
1530 // we already have a unused image
1531 // (maybe it was set before reading the header)
1532 pic = s->current_picture_ptr;
1534 i = ff_find_unused_picture(s, 0);
1536 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1539 pic = &s->picture[i];
1543 if (!s->droppable) {
1544 if (s->pict_type != AV_PICTURE_TYPE_B)
1548 pic->f.coded_picture_number = s->coded_picture_number++;
1550 if (ff_alloc_picture(s, pic, 0) < 0)
1553 s->current_picture_ptr = pic;
1554 // FIXME use only the vars from current_pic
1555 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1556 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1557 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1558 if (s->picture_structure != PICT_FRAME)
1559 s->current_picture_ptr->f.top_field_first =
1560 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1562 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1563 !s->progressive_sequence;
1564 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1567 s->current_picture_ptr->f.pict_type = s->pict_type;
1568 // if (s->flags && CODEC_FLAG_QSCALE)
1569 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1570 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1572 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1573 s->current_picture_ptr)) < 0)
1576 if (s->pict_type != AV_PICTURE_TYPE_B) {
1577 s->last_picture_ptr = s->next_picture_ptr;
1579 s->next_picture_ptr = s->current_picture_ptr;
1581 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1582 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1583 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1584 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1585 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1586 s->pict_type, s->droppable);
1588 if ((s->last_picture_ptr == NULL ||
1589 s->last_picture_ptr->f.data[0] == NULL) &&
1590 (s->pict_type != AV_PICTURE_TYPE_I ||
1591 s->picture_structure != PICT_FRAME)) {
1592 int h_chroma_shift, v_chroma_shift;
1593 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1594 &h_chroma_shift, &v_chroma_shift);
1595 if (s->pict_type != AV_PICTURE_TYPE_I)
1596 av_log(avctx, AV_LOG_ERROR,
1597 "warning: first frame is no keyframe\n");
1598 else if (s->picture_structure != PICT_FRAME)
1599 av_log(avctx, AV_LOG_INFO,
1600 "allocate dummy last picture for field based first keyframe\n");
1602 /* Allocate a dummy frame */
1603 i = ff_find_unused_picture(s, 0);
1605 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1608 s->last_picture_ptr = &s->picture[i];
1609 s->last_picture_ptr->f.key_frame = 0;
1610 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1611 s->last_picture_ptr = NULL;
1615 memset(s->last_picture_ptr->f.data[0], 0x80,
1616 avctx->height * s->last_picture_ptr->f.linesize[0]);
1617 memset(s->last_picture_ptr->f.data[1], 0x80,
1618 (avctx->height >> v_chroma_shift) *
1619 s->last_picture_ptr->f.linesize[1]);
1620 memset(s->last_picture_ptr->f.data[2], 0x80,
1621 (avctx->height >> v_chroma_shift) *
1622 s->last_picture_ptr->f.linesize[2]);
1624 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1625 for(i=0; i<avctx->height; i++)
1626 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1629 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1630 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1632 if ((s->next_picture_ptr == NULL ||
1633 s->next_picture_ptr->f.data[0] == NULL) &&
1634 s->pict_type == AV_PICTURE_TYPE_B) {
1635 /* Allocate a dummy frame */
1636 i = ff_find_unused_picture(s, 0);
1638 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1641 s->next_picture_ptr = &s->picture[i];
1642 s->next_picture_ptr->f.key_frame = 0;
1643 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1644 s->next_picture_ptr = NULL;
1647 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1648 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1651 #if 0 // BUFREF-FIXME
1652 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1653 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1655 if (s->last_picture_ptr) {
1656 ff_mpeg_unref_picture(s, &s->last_picture);
1657 if (s->last_picture_ptr->f.data[0] &&
1658 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1659 s->last_picture_ptr)) < 0)
1662 if (s->next_picture_ptr) {
1663 ff_mpeg_unref_picture(s, &s->next_picture);
1664 if (s->next_picture_ptr->f.data[0] &&
1665 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1666 s->next_picture_ptr)) < 0)
1670 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1671 s->last_picture_ptr->f.data[0]));
1673 if (s->picture_structure!= PICT_FRAME) {
1675 for (i = 0; i < 4; i++) {
1676 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1677 s->current_picture.f.data[i] +=
1678 s->current_picture.f.linesize[i];
1680 s->current_picture.f.linesize[i] *= 2;
1681 s->last_picture.f.linesize[i] *= 2;
1682 s->next_picture.f.linesize[i] *= 2;
1686 s->err_recognition = avctx->err_recognition;
1688 /* set dequantizer, we can't do it during init as
1689 * it might change for mpeg4 and we can't do it in the header
1690 * decode as init is not called for mpeg4 there yet */
1691 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1692 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1693 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1694 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1695 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1696 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1698 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1699 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1702 if (s->dct_error_sum) {
1703 av_assert2(s->avctx->noise_reduction && s->encoding);
1704 update_noise_reduction(s);
1707 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1708 return ff_xvmc_field_start(s, avctx);
1713 /* generic function for encode/decode called after a
1714 * frame has been coded/decoded. */
1715 void ff_MPV_frame_end(MpegEncContext *s)
1717 /* redraw edges for the frame if decoding didn't complete */
1718 // just to make sure that all data is rendered.
1719 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1720 ff_xvmc_field_end(s);
1721 } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1722 !s->avctx->hwaccel &&
1723 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1724 s->unrestricted_mv &&
1725 s->current_picture.reference &&
1727 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1730 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1731 int hshift = desc->log2_chroma_w;
1732 int vshift = desc->log2_chroma_h;
1733 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1734 s->h_edge_pos, s->v_edge_pos,
1735 EDGE_WIDTH, EDGE_WIDTH,
1736 EDGE_TOP | EDGE_BOTTOM);
1737 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1738 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1739 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1740 EDGE_TOP | EDGE_BOTTOM);
1741 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1742 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1743 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1744 EDGE_TOP | EDGE_BOTTOM);
1749 s->last_pict_type = s->pict_type;
1750 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1751 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1752 s->last_non_b_pict_type = s->pict_type;
1755 /* copy back current_picture variables */
1756 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1757 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1758 s->picture[i] = s->current_picture;
1762 assert(i < MAX_PICTURE_COUNT);
1765 // clear copies, to avoid confusion
1767 memset(&s->last_picture, 0, sizeof(Picture));
1768 memset(&s->next_picture, 0, sizeof(Picture));
1769 memset(&s->current_picture, 0, sizeof(Picture));
1771 s->avctx->coded_frame = &s->current_picture_ptr->f;
1773 if (s->current_picture.reference)
1774 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1778 * Draw a line from (ex, ey) -> (sx, sy).
1779 * @param w width of the image
1780 * @param h height of the image
1781 * @param stride stride/linesize of the image
1782 * @param color color of the arrow
1784 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1785 int w, int h, int stride, int color)
1789 sx = av_clip(sx, 0, w - 1);
1790 sy = av_clip(sy, 0, h - 1);
1791 ex = av_clip(ex, 0, w - 1);
1792 ey = av_clip(ey, 0, h - 1);
1794 buf[sy * stride + sx] += color;
1796 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1798 FFSWAP(int, sx, ex);
1799 FFSWAP(int, sy, ey);
1801 buf += sx + sy * stride;
1803 f = ((ey - sy) << 16) / ex;
1804 for (x = 0; x <= ex; x++) {
1806 fr = (x * f) & 0xFFFF;
1807 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1808 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1812 FFSWAP(int, sx, ex);
1813 FFSWAP(int, sy, ey);
1815 buf += sx + sy * stride;
1818 f = ((ex - sx) << 16) / ey;
1821 for(y= 0; y <= ey; y++){
1823 fr = (y*f) & 0xFFFF;
1824 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1825 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1831 * Draw an arrow from (ex, ey) -> (sx, sy).
1832 * @param w width of the image
1833 * @param h height of the image
1834 * @param stride stride/linesize of the image
1835 * @param color color of the arrow
1837 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1838 int ey, int w, int h, int stride, int color)
1842 sx = av_clip(sx, -100, w + 100);
1843 sy = av_clip(sy, -100, h + 100);
1844 ex = av_clip(ex, -100, w + 100);
1845 ey = av_clip(ey, -100, h + 100);
1850 if (dx * dx + dy * dy > 3 * 3) {
1853 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1855 // FIXME subpixel accuracy
1856 rx = ROUNDED_DIV(rx * 3 << 4, length);
1857 ry = ROUNDED_DIV(ry * 3 << 4, length);
1859 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1860 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1862 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1866 * Print debugging info for the given picture.
1868 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1870 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1872 if (avctx->hwaccel || !p || !p->mb_type
1873 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1877 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1880 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1881 av_get_picture_type_char(pict->pict_type));
1882 for (y = 0; y < mb_height; y++) {
1883 for (x = 0; x < mb_width; x++) {
1884 if (avctx->debug & FF_DEBUG_SKIP) {
1885 int count = mbskip_table[x + y * mb_stride];
1888 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1890 if (avctx->debug & FF_DEBUG_QP) {
1891 av_log(avctx, AV_LOG_DEBUG, "%2d",
1892 p->qscale_table[x + y * mb_stride]);
1894 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1895 int mb_type = p->mb_type[x + y * mb_stride];
1896 // Type & MV direction
1897 if (IS_PCM(mb_type))
1898 av_log(avctx, AV_LOG_DEBUG, "P");
1899 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1900 av_log(avctx, AV_LOG_DEBUG, "A");
1901 else if (IS_INTRA4x4(mb_type))
1902 av_log(avctx, AV_LOG_DEBUG, "i");
1903 else if (IS_INTRA16x16(mb_type))
1904 av_log(avctx, AV_LOG_DEBUG, "I");
1905 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1906 av_log(avctx, AV_LOG_DEBUG, "d");
1907 else if (IS_DIRECT(mb_type))
1908 av_log(avctx, AV_LOG_DEBUG, "D");
1909 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1910 av_log(avctx, AV_LOG_DEBUG, "g");
1911 else if (IS_GMC(mb_type))
1912 av_log(avctx, AV_LOG_DEBUG, "G");
1913 else if (IS_SKIP(mb_type))
1914 av_log(avctx, AV_LOG_DEBUG, "S");
1915 else if (!USES_LIST(mb_type, 1))
1916 av_log(avctx, AV_LOG_DEBUG, ">");
1917 else if (!USES_LIST(mb_type, 0))
1918 av_log(avctx, AV_LOG_DEBUG, "<");
1920 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1921 av_log(avctx, AV_LOG_DEBUG, "X");
1925 if (IS_8X8(mb_type))
1926 av_log(avctx, AV_LOG_DEBUG, "+");
1927 else if (IS_16X8(mb_type))
1928 av_log(avctx, AV_LOG_DEBUG, "-");
1929 else if (IS_8X16(mb_type))
1930 av_log(avctx, AV_LOG_DEBUG, "|");
1931 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1932 av_log(avctx, AV_LOG_DEBUG, " ");
1934 av_log(avctx, AV_LOG_DEBUG, "?");
1937 if (IS_INTERLACED(mb_type))
1938 av_log(avctx, AV_LOG_DEBUG, "=");
1940 av_log(avctx, AV_LOG_DEBUG, " ");
1943 av_log(avctx, AV_LOG_DEBUG, "\n");
1947 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1948 (avctx->debug_mv)) {
1949 const int shift = 1 + quarter_sample;
1953 int h_chroma_shift, v_chroma_shift, block_height;
1954 const int width = avctx->width;
1955 const int height = avctx->height;
1956 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1957 const int mv_stride = (mb_width << mv_sample_log2) +
1958 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1960 *low_delay = 0; // needed to see the vectors without trashing the buffers
1962 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1964 av_frame_make_writable(pict);
1966 pict->opaque = NULL;
1967 ptr = pict->data[0];
1968 block_height = 16 >> v_chroma_shift;
1970 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1972 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1973 const int mb_index = mb_x + mb_y * mb_stride;
1974 if ((avctx->debug_mv) && p->motion_val[0]) {
1976 for (type = 0; type < 3; type++) {
1980 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1981 (pict->pict_type!= AV_PICTURE_TYPE_P))
1986 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1987 (pict->pict_type!= AV_PICTURE_TYPE_B))
1992 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1993 (pict->pict_type!= AV_PICTURE_TYPE_B))
1998 if (!USES_LIST(p->mb_type[mb_index], direction))
2001 if (IS_8X8(p->mb_type[mb_index])) {
2003 for (i = 0; i < 4; i++) {
2004 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2005 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2006 int xy = (mb_x * 2 + (i & 1) +
2007 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2008 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2009 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2010 draw_arrow(ptr, sx, sy, mx, my, width,
2011 height, pict->linesize[0], 100);
2013 } else if (IS_16X8(p->mb_type[mb_index])) {
2015 for (i = 0; i < 2; i++) {
2016 int sx = mb_x * 16 + 8;
2017 int sy = mb_y * 16 + 4 + 8 * i;
2018 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2019 int mx = (p->motion_val[direction][xy][0] >> shift);
2020 int my = (p->motion_val[direction][xy][1] >> shift);
2022 if (IS_INTERLACED(p->mb_type[mb_index]))
2025 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2026 height, pict->linesize[0], 100);
2028 } else if (IS_8X16(p->mb_type[mb_index])) {
2030 for (i = 0; i < 2; i++) {
2031 int sx = mb_x * 16 + 4 + 8 * i;
2032 int sy = mb_y * 16 + 8;
2033 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2034 int mx = p->motion_val[direction][xy][0] >> shift;
2035 int my = p->motion_val[direction][xy][1] >> shift;
2037 if (IS_INTERLACED(p->mb_type[mb_index]))
2040 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2041 height, pict->linesize[0], 100);
2044 int sx= mb_x * 16 + 8;
2045 int sy= mb_y * 16 + 8;
2046 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2047 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2048 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2049 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2053 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2054 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2055 0x0101010101010101ULL;
2057 for (y = 0; y < block_height; y++) {
2058 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2059 (block_height * mb_y + y) *
2060 pict->linesize[1]) = c;
2061 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2062 (block_height * mb_y + y) *
2063 pict->linesize[2]) = c;
2066 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2068 int mb_type = p->mb_type[mb_index];
2071 #define COLOR(theta, r) \
2072 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2073 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2077 if (IS_PCM(mb_type)) {
2079 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2080 IS_INTRA16x16(mb_type)) {
2082 } else if (IS_INTRA4x4(mb_type)) {
2084 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2086 } else if (IS_DIRECT(mb_type)) {
2088 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2090 } else if (IS_GMC(mb_type)) {
2092 } else if (IS_SKIP(mb_type)) {
2094 } else if (!USES_LIST(mb_type, 1)) {
2096 } else if (!USES_LIST(mb_type, 0)) {
2099 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2103 u *= 0x0101010101010101ULL;
2104 v *= 0x0101010101010101ULL;
2105 for (y = 0; y < block_height; y++) {
2106 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2107 (block_height * mb_y + y) * pict->linesize[1]) = u;
2108 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2109 (block_height * mb_y + y) * pict->linesize[2]) = v;
2113 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2114 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2115 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2116 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2117 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2119 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2120 for (y = 0; y < 16; y++)
2121 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2122 pict->linesize[0]] ^= 0x80;
2124 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2125 int dm = 1 << (mv_sample_log2 - 2);
2126 for (i = 0; i < 4; i++) {
2127 int sx = mb_x * 16 + 8 * (i & 1);
2128 int sy = mb_y * 16 + 8 * (i >> 1);
2129 int xy = (mb_x * 2 + (i & 1) +
2130 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2132 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2133 if (mv[0] != mv[dm] ||
2134 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2135 for (y = 0; y < 8; y++)
2136 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2137 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2138 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2139 pict->linesize[0]) ^= 0x8080808080808080ULL;
2143 if (IS_INTERLACED(mb_type) &&
2144 avctx->codec->id == AV_CODEC_ID_H264) {
2148 mbskip_table[mb_index] = 0;
2154 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2156 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2157 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2160 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2162 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2163 int offset = 2*s->mb_stride + 1;
2165 return AVERROR(ENOMEM);
2166 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2167 ref->size -= offset;
2168 ref->data += offset;
2169 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2172 static inline int hpel_motion_lowres(MpegEncContext *s,
2173 uint8_t *dest, uint8_t *src,
2174 int field_based, int field_select,
2175 int src_x, int src_y,
2176 int width, int height, int stride,
2177 int h_edge_pos, int v_edge_pos,
2178 int w, int h, h264_chroma_mc_func *pix_op,
2179 int motion_x, int motion_y)
2181 const int lowres = s->avctx->lowres;
2182 const int op_index = FFMIN(lowres, 3);
2183 const int s_mask = (2 << lowres) - 1;
2187 if (s->quarter_sample) {
2192 sx = motion_x & s_mask;
2193 sy = motion_y & s_mask;
2194 src_x += motion_x >> lowres + 1;
2195 src_y += motion_y >> lowres + 1;
2197 src += src_y * stride + src_x;
2199 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2200 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2201 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2202 (h + 1) << field_based, src_x,
2203 src_y << field_based,
2206 src = s->edge_emu_buffer;
2210 sx = (sx << 2) >> lowres;
2211 sy = (sy << 2) >> lowres;
2214 pix_op[op_index](dest, src, stride, h, sx, sy);
2218 /* apply one mpeg motion vector to the three components */
2219 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2226 uint8_t **ref_picture,
2227 h264_chroma_mc_func *pix_op,
2228 int motion_x, int motion_y,
2231 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2232 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2234 const int lowres = s->avctx->lowres;
2235 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2236 const int block_s = 8>>lowres;
2237 const int s_mask = (2 << lowres) - 1;
2238 const int h_edge_pos = s->h_edge_pos >> lowres;
2239 const int v_edge_pos = s->v_edge_pos >> lowres;
2240 linesize = s->current_picture.f.linesize[0] << field_based;
2241 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2243 // FIXME obviously not perfect but qpel will not work in lowres anyway
2244 if (s->quarter_sample) {
2250 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2253 sx = motion_x & s_mask;
2254 sy = motion_y & s_mask;
2255 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2256 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2258 if (s->out_format == FMT_H263) {
2259 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2260 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2261 uvsrc_x = src_x >> 1;
2262 uvsrc_y = src_y >> 1;
2263 } else if (s->out_format == FMT_H261) {
2264 // even chroma mv's are full pel in H261
2267 uvsx = (2 * mx) & s_mask;
2268 uvsy = (2 * my) & s_mask;
2269 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2270 uvsrc_y = mb_y * block_s + (my >> lowres);
2272 if(s->chroma_y_shift){
2277 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2278 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2280 if(s->chroma_x_shift){
2284 uvsy = motion_y & s_mask;
2286 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2289 uvsx = motion_x & s_mask;
2290 uvsy = motion_y & s_mask;
2297 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2298 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2299 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2301 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2302 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2303 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2304 linesize >> field_based, 17, 17 + field_based,
2305 src_x, src_y << field_based, h_edge_pos,
2307 ptr_y = s->edge_emu_buffer;
2308 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2309 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2310 s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2312 uvsrc_x, uvsrc_y << field_based,
2313 h_edge_pos >> 1, v_edge_pos >> 1);
2314 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2316 uvsrc_x, uvsrc_y << field_based,
2317 h_edge_pos >> 1, v_edge_pos >> 1);
2319 ptr_cr = uvbuf + 16;
2323 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2325 dest_y += s->linesize;
2326 dest_cb += s->uvlinesize;
2327 dest_cr += s->uvlinesize;
2331 ptr_y += s->linesize;
2332 ptr_cb += s->uvlinesize;
2333 ptr_cr += s->uvlinesize;
2336 sx = (sx << 2) >> lowres;
2337 sy = (sy << 2) >> lowres;
2338 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2340 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2341 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2342 uvsx = (uvsx << 2) >> lowres;
2343 uvsy = (uvsy << 2) >> lowres;
2345 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2346 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2349 // FIXME h261 lowres loop filter
2352 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2353 uint8_t *dest_cb, uint8_t *dest_cr,
2354 uint8_t **ref_picture,
2355 h264_chroma_mc_func * pix_op,
2358 const int lowres = s->avctx->lowres;
2359 const int op_index = FFMIN(lowres, 3);
2360 const int block_s = 8 >> lowres;
2361 const int s_mask = (2 << lowres) - 1;
2362 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2363 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2364 int emu = 0, src_x, src_y, offset, sx, sy;
2367 if (s->quarter_sample) {
2372 /* In case of 8X8, we construct a single chroma motion vector
2373 with a special rounding */
2374 mx = ff_h263_round_chroma(mx);
2375 my = ff_h263_round_chroma(my);
2379 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2380 src_y = s->mb_y * block_s + (my >> lowres + 1);
2382 offset = src_y * s->uvlinesize + src_x;
2383 ptr = ref_picture[1] + offset;
2384 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2385 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2386 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2387 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2388 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2389 ptr = s->edge_emu_buffer;
2393 sx = (sx << 2) >> lowres;
2394 sy = (sy << 2) >> lowres;
2395 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2397 ptr = ref_picture[2] + offset;
2399 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2400 src_x, src_y, h_edge_pos, v_edge_pos);
2401 ptr = s->edge_emu_buffer;
2403 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2407 * motion compensation of a single macroblock
2409 * @param dest_y luma destination pointer
2410 * @param dest_cb chroma cb/u destination pointer
2411 * @param dest_cr chroma cr/v destination pointer
2412 * @param dir direction (0->forward, 1->backward)
2413 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2414 * @param pix_op halfpel motion compensation function (average or put normally)
2415 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2417 static inline void MPV_motion_lowres(MpegEncContext *s,
2418 uint8_t *dest_y, uint8_t *dest_cb,
2420 int dir, uint8_t **ref_picture,
2421 h264_chroma_mc_func *pix_op)
2425 const int lowres = s->avctx->lowres;
2426 const int block_s = 8 >>lowres;
2431 switch (s->mv_type) {
2433 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2435 ref_picture, pix_op,
2436 s->mv[dir][0][0], s->mv[dir][0][1],
2442 for (i = 0; i < 4; i++) {
2443 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2444 s->linesize) * block_s,
2445 ref_picture[0], 0, 0,
2446 (2 * mb_x + (i & 1)) * block_s,
2447 (2 * mb_y + (i >> 1)) * block_s,
2448 s->width, s->height, s->linesize,
2449 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2450 block_s, block_s, pix_op,
2451 s->mv[dir][i][0], s->mv[dir][i][1]);
2453 mx += s->mv[dir][i][0];
2454 my += s->mv[dir][i][1];
2457 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2458 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2462 if (s->picture_structure == PICT_FRAME) {
2464 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2465 1, 0, s->field_select[dir][0],
2466 ref_picture, pix_op,
2467 s->mv[dir][0][0], s->mv[dir][0][1],
2470 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2471 1, 1, s->field_select[dir][1],
2472 ref_picture, pix_op,
2473 s->mv[dir][1][0], s->mv[dir][1][1],
2476 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2477 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2478 ref_picture = s->current_picture_ptr->f.data;
2481 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2482 0, 0, s->field_select[dir][0],
2483 ref_picture, pix_op,
2485 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2489 for (i = 0; i < 2; i++) {
2490 uint8_t **ref2picture;
2492 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2493 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2494 ref2picture = ref_picture;
2496 ref2picture = s->current_picture_ptr->f.data;
2499 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2500 0, 0, s->field_select[dir][i],
2501 ref2picture, pix_op,
2502 s->mv[dir][i][0], s->mv[dir][i][1] +
2503 2 * block_s * i, block_s, mb_y >> 1);
2505 dest_y += 2 * block_s * s->linesize;
2506 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2507 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2511 if (s->picture_structure == PICT_FRAME) {
2512 for (i = 0; i < 2; i++) {
2514 for (j = 0; j < 2; j++) {
2515 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2517 ref_picture, pix_op,
2518 s->mv[dir][2 * i + j][0],
2519 s->mv[dir][2 * i + j][1],
2522 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2525 for (i = 0; i < 2; i++) {
2526 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2527 0, 0, s->picture_structure != i + 1,
2528 ref_picture, pix_op,
2529 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2530 2 * block_s, mb_y >> 1);
2532 // after put we make avg of the same block
2533 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2535 // opposite parity is always in the same
2536 // frame if this is second field
2537 if (!s->first_field) {
2538 ref_picture = s->current_picture_ptr->f.data;
2549 * find the lowest MB row referenced in the MVs
2551 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2553 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2554 int my, off, i, mvs;
2556 if (s->picture_structure != PICT_FRAME || s->mcsel)
2559 switch (s->mv_type) {
2573 for (i = 0; i < mvs; i++) {
2574 my = s->mv[dir][i][1]<<qpel_shift;
2575 my_max = FFMAX(my_max, my);
2576 my_min = FFMIN(my_min, my);
2579 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2581 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2583 return s->mb_height-1;
2586 /* put block[] to dest[] */
2587 static inline void put_dct(MpegEncContext *s,
2588 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2590 s->dct_unquantize_intra(s, block, i, qscale);
2591 s->dsp.idct_put (dest, line_size, block);
2594 /* add block[] to dest[] */
2595 static inline void add_dct(MpegEncContext *s,
2596 int16_t *block, int i, uint8_t *dest, int line_size)
2598 if (s->block_last_index[i] >= 0) {
2599 s->dsp.idct_add (dest, line_size, block);
2603 static inline void add_dequant_dct(MpegEncContext *s,
2604 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2606 if (s->block_last_index[i] >= 0) {
2607 s->dct_unquantize_inter(s, block, i, qscale);
2609 s->dsp.idct_add (dest, line_size, block);
2614 * Clean dc, ac, coded_block for the current non-intra MB.
2616 void ff_clean_intra_table_entries(MpegEncContext *s)
2618 int wrap = s->b8_stride;
2619 int xy = s->block_index[0];
2622 s->dc_val[0][xy + 1 ] =
2623 s->dc_val[0][xy + wrap] =
2624 s->dc_val[0][xy + 1 + wrap] = 1024;
2626 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2627 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2628 if (s->msmpeg4_version>=3) {
2629 s->coded_block[xy ] =
2630 s->coded_block[xy + 1 ] =
2631 s->coded_block[xy + wrap] =
2632 s->coded_block[xy + 1 + wrap] = 0;
2635 wrap = s->mb_stride;
2636 xy = s->mb_x + s->mb_y * wrap;
2638 s->dc_val[2][xy] = 1024;
2640 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2641 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2643 s->mbintra_table[xy]= 0;
2646 /* generic function called after a macroblock has been parsed by the
2647 decoder or after it has been encoded by the encoder.
2649 Important variables used:
2650 s->mb_intra : true if intra macroblock
2651 s->mv_dir : motion vector direction
2652 s->mv_type : motion vector type
2653 s->mv : motion vector
2654 s->interlaced_dct : true if interlaced dct used (mpeg2)
2656 static av_always_inline
2657 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2658 int lowres_flag, int is_mpeg12)
2660 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2661 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2662 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2666 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2667 /* print DCT coefficients */
2669 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2671 for(j=0; j<64; j++){
2672 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2674 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2678 s->current_picture.qscale_table[mb_xy] = s->qscale;
2680 /* update DC predictors for P macroblocks */
2682 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2683 if(s->mbintra_table[mb_xy])
2684 ff_clean_intra_table_entries(s);
2688 s->last_dc[2] = 128 << s->intra_dc_precision;
2691 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2692 s->mbintra_table[mb_xy]=1;
2694 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2695 uint8_t *dest_y, *dest_cb, *dest_cr;
2696 int dct_linesize, dct_offset;
2697 op_pixels_func (*op_pix)[4];
2698 qpel_mc_func (*op_qpix)[16];
2699 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2700 const int uvlinesize = s->current_picture.f.linesize[1];
2701 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2702 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2704 /* avoid copy if macroblock skipped in last frame too */
2705 /* skip only during decoding as we might trash the buffers during encoding a bit */
2707 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2709 if (s->mb_skipped) {
2711 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2713 } else if(!s->current_picture.reference) {
2716 *mbskip_ptr = 0; /* not skipped */
2720 dct_linesize = linesize << s->interlaced_dct;
2721 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2725 dest_cb= s->dest[1];
2726 dest_cr= s->dest[2];
2728 dest_y = s->b_scratchpad;
2729 dest_cb= s->b_scratchpad+16*linesize;
2730 dest_cr= s->b_scratchpad+32*linesize;
2734 /* motion handling */
2735 /* decoding or more than one mb_type (MC was already done otherwise) */
2738 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2739 if (s->mv_dir & MV_DIR_FORWARD) {
2740 ff_thread_await_progress(&s->last_picture_ptr->tf,
2741 ff_MPV_lowest_referenced_row(s, 0),
2744 if (s->mv_dir & MV_DIR_BACKWARD) {
2745 ff_thread_await_progress(&s->next_picture_ptr->tf,
2746 ff_MPV_lowest_referenced_row(s, 1),
2752 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2754 if (s->mv_dir & MV_DIR_FORWARD) {
2755 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2756 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2758 if (s->mv_dir & MV_DIR_BACKWARD) {
2759 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2762 op_qpix= s->me.qpel_put;
2763 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2764 op_pix = s->hdsp.put_pixels_tab;
2766 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2768 if (s->mv_dir & MV_DIR_FORWARD) {
2769 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2770 op_pix = s->hdsp.avg_pixels_tab;
2771 op_qpix= s->me.qpel_avg;
2773 if (s->mv_dir & MV_DIR_BACKWARD) {
2774 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2779 /* skip dequant / idct if we are really late ;) */
2780 if(s->avctx->skip_idct){
2781 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2782 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2783 || s->avctx->skip_idct >= AVDISCARD_ALL)
2787 /* add dct residue */
2788 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2789 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2790 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2791 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2792 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2793 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2795 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2796 if (s->chroma_y_shift){
2797 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2798 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2802 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2803 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2804 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2805 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2808 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2809 add_dct(s, block[0], 0, dest_y , dct_linesize);
2810 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2811 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2812 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2814 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2815 if(s->chroma_y_shift){//Chroma420
2816 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2817 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2820 dct_linesize = uvlinesize << s->interlaced_dct;
2821 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2823 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2824 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2825 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2826 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2827 if(!s->chroma_x_shift){//Chroma444
2828 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2829 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2830 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2831 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2836 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2837 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2840 /* dct only in intra block */
2841 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2842 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2843 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2844 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2845 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2847 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2848 if(s->chroma_y_shift){
2849 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2850 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2854 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2855 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2856 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2857 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2861 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2862 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2863 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2864 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2866 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2867 if(s->chroma_y_shift){
2868 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2869 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2872 dct_linesize = uvlinesize << s->interlaced_dct;
2873 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2875 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2876 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2877 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2878 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2879 if(!s->chroma_x_shift){//Chroma444
2880 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2881 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2882 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2883 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2891 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2892 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2893 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2898 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2900 if(s->out_format == FMT_MPEG1) {
2901 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2902 else MPV_decode_mb_internal(s, block, 0, 1);
2905 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2906 else MPV_decode_mb_internal(s, block, 0, 0);
2910 * @param h is the normal height, this will be reduced automatically if needed for the last row
2912 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2913 Picture *last, int y, int h, int picture_structure,
2914 int first_field, int draw_edges, int low_delay,
2915 int v_edge_pos, int h_edge_pos)
2917 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2918 int hshift = desc->log2_chroma_w;
2919 int vshift = desc->log2_chroma_h;
2920 const int field_pic = picture_structure != PICT_FRAME;
2926 if (!avctx->hwaccel &&
2927 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2930 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2931 int *linesize = cur->f.linesize;
2932 int sides = 0, edge_h;
2933 if (y==0) sides |= EDGE_TOP;
2934 if (y + h >= v_edge_pos)
2935 sides |= EDGE_BOTTOM;
2937 edge_h= FFMIN(h, v_edge_pos - y);
2939 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2940 linesize[0], h_edge_pos, edge_h,
2941 EDGE_WIDTH, EDGE_WIDTH, sides);
2942 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2943 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2944 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2945 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2946 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2947 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2950 h = FFMIN(h, avctx->height - y);
2952 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2954 if (avctx->draw_horiz_band) {
2956 int offset[AV_NUM_DATA_POINTERS];
2959 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2960 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2967 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2968 picture_structure == PICT_FRAME &&
2969 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2970 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2973 offset[0]= y * src->linesize[0];
2975 offset[2]= (y >> vshift) * src->linesize[1];
2976 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2982 avctx->draw_horiz_band(avctx, src, offset,
2983 y, picture_structure, h);
2987 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2989 int draw_edges = s->unrestricted_mv && !s->intra_only;
2990 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2991 &s->last_picture, y, h, s->picture_structure,
2992 s->first_field, draw_edges, s->low_delay,
2993 s->v_edge_pos, s->h_edge_pos);
2996 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2997 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2998 const int uvlinesize = s->current_picture.f.linesize[1];
2999 const int mb_size= 4 - s->avctx->lowres;
3001 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3002 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3003 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3004 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3005 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3006 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3007 //block_index is not used by mpeg2, so it is not affected by chroma_format
3009 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3010 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3011 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3013 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3015 if(s->picture_structure==PICT_FRAME){
3016 s->dest[0] += s->mb_y * linesize << mb_size;
3017 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3018 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3020 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3021 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3022 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3023 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3029 * Permute an 8x8 block.
3030 * @param block the block which will be permuted according to the given permutation vector
3031 * @param permutation the permutation vector
3032 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3033 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3034 * (inverse) permutated to scantable order!
3036 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3042 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3044 for(i=0; i<=last; i++){
3045 const int j= scantable[i];
3050 for(i=0; i<=last; i++){
3051 const int j= scantable[i];
3052 const int perm_j= permutation[j];
3053 block[perm_j]= temp[j];
3057 void ff_mpeg_flush(AVCodecContext *avctx){
3059 MpegEncContext *s = avctx->priv_data;
3061 if(s==NULL || s->picture==NULL)
3064 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3065 ff_mpeg_unref_picture(s, &s->picture[i]);
3066 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3068 ff_mpeg_unref_picture(s, &s->current_picture);
3069 ff_mpeg_unref_picture(s, &s->last_picture);
3070 ff_mpeg_unref_picture(s, &s->next_picture);
3072 s->mb_x= s->mb_y= 0;
3075 s->parse_context.state= -1;
3076 s->parse_context.frame_start_found= 0;
3077 s->parse_context.overread= 0;
3078 s->parse_context.overread_index= 0;
3079 s->parse_context.index= 0;
3080 s->parse_context.last_index= 0;
3081 s->bitstream_buffer_size=0;
3085 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3086 int16_t *block, int n, int qscale)
3088 int i, level, nCoeffs;
3089 const uint16_t *quant_matrix;
3091 nCoeffs= s->block_last_index[n];
3093 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3094 /* XXX: only mpeg1 */
3095 quant_matrix = s->intra_matrix;
3096 for(i=1;i<=nCoeffs;i++) {
3097 int j= s->intra_scantable.permutated[i];
3102 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3103 level = (level - 1) | 1;
3106 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3107 level = (level - 1) | 1;
3114 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3115 int16_t *block, int n, int qscale)
3117 int i, level, nCoeffs;
3118 const uint16_t *quant_matrix;
3120 nCoeffs= s->block_last_index[n];
3122 quant_matrix = s->inter_matrix;
3123 for(i=0; i<=nCoeffs; i++) {
3124 int j= s->intra_scantable.permutated[i];
3129 level = (((level << 1) + 1) * qscale *
3130 ((int) (quant_matrix[j]))) >> 4;
3131 level = (level - 1) | 1;
3134 level = (((level << 1) + 1) * qscale *
3135 ((int) (quant_matrix[j]))) >> 4;
3136 level = (level - 1) | 1;
3143 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3144 int16_t *block, int n, int qscale)
3146 int i, level, nCoeffs;
3147 const uint16_t *quant_matrix;
3149 if(s->alternate_scan) nCoeffs= 63;
3150 else nCoeffs= s->block_last_index[n];
3152 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3153 quant_matrix = s->intra_matrix;
3154 for(i=1;i<=nCoeffs;i++) {
3155 int j= s->intra_scantable.permutated[i];
3160 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3163 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3170 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3171 int16_t *block, int n, int qscale)
3173 int i, level, nCoeffs;
3174 const uint16_t *quant_matrix;
3177 if(s->alternate_scan) nCoeffs= 63;
3178 else nCoeffs= s->block_last_index[n];
3180 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3182 quant_matrix = s->intra_matrix;
3183 for(i=1;i<=nCoeffs;i++) {
3184 int j= s->intra_scantable.permutated[i];
3189 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3192 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3201 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3202 int16_t *block, int n, int qscale)
3204 int i, level, nCoeffs;
3205 const uint16_t *quant_matrix;
3208 if(s->alternate_scan) nCoeffs= 63;
3209 else nCoeffs= s->block_last_index[n];
3211 quant_matrix = s->inter_matrix;
3212 for(i=0; i<=nCoeffs; i++) {
3213 int j= s->intra_scantable.permutated[i];
3218 level = (((level << 1) + 1) * qscale *
3219 ((int) (quant_matrix[j]))) >> 4;
3222 level = (((level << 1) + 1) * qscale *
3223 ((int) (quant_matrix[j]))) >> 4;
3232 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3233 int16_t *block, int n, int qscale)
3235 int i, level, qmul, qadd;
3238 av_assert2(s->block_last_index[n]>=0);
3243 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3244 qadd = (qscale - 1) | 1;
3251 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3253 for(i=1; i<=nCoeffs; i++) {
3257 level = level * qmul - qadd;
3259 level = level * qmul + qadd;
3266 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3267 int16_t *block, int n, int qscale)
3269 int i, level, qmul, qadd;
3272 av_assert2(s->block_last_index[n]>=0);
3274 qadd = (qscale - 1) | 1;
3277 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3279 for(i=0; i<=nCoeffs; i++) {
3283 level = level * qmul - qadd;
3285 level = level * qmul + qadd;
3293 * set qscale and update qscale dependent variables.
3295 void ff_set_qscale(MpegEncContext * s, int qscale)
3299 else if (qscale > 31)
3303 s->chroma_qscale= s->chroma_qscale_table[qscale];
3305 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3306 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3309 void ff_MPV_report_decode_progress(MpegEncContext *s)
3311 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3312 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3315 #if CONFIG_ERROR_RESILIENCE
3316 void ff_mpeg_er_frame_start(MpegEncContext *s)
3318 ERContext *er = &s->er;
3320 er->cur_pic = s->current_picture_ptr;
3321 er->last_pic = s->last_picture_ptr;
3322 er->next_pic = s->next_picture_ptr;
3324 er->pp_time = s->pp_time;
3325 er->pb_time = s->pb_time;
3326 er->quarter_sample = s->quarter_sample;
3327 er->partitioned_frame = s->partitioned_frame;
3329 ff_er_frame_start(er);
3331 #endif /* CONFIG_ERROR_RESILIENCE */