2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/avassert.h"
31 #include "libavutil/imgutils.h"
34 #include "h264chroma.h"
37 #include "mpegvideo.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 int16_t *block, int n, int qscale);
66 static const uint8_t ff_default_chroma_qscale_table[32] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
69 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
72 const uint8_t ff_mpeg1_dc_scale_table[128] = {
73 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 static const uint8_t mpeg2_dc_scale_table1[128] = {
85 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 static const uint8_t mpeg2_dc_scale_table2[128] = {
97 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 static const uint8_t mpeg2_dc_scale_table3[128] = {
109 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
121 ff_mpeg1_dc_scale_table,
122 mpeg2_dc_scale_table1,
123 mpeg2_dc_scale_table2,
124 mpeg2_dc_scale_table3,
127 const enum AVPixelFormat ff_pixfmt_list_420[] = {
132 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
134 int mb_x, int mb_y, int mb_intra, int mb_skipped)
136 MpegEncContext *s = opaque;
139 s->mv_type = mv_type;
140 s->mb_intra = mb_intra;
141 s->mb_skipped = mb_skipped;
144 memcpy(s->mv, mv, sizeof(*mv));
146 ff_init_block_index(s);
147 ff_update_block_index(s);
149 s->dsp.clear_blocks(s->block[0]);
151 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
152 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
153 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
156 ff_MPV_decode_mb(s, s->block);
159 /* init common dct for both encoder and decoder */
160 av_cold int ff_dct_common_init(MpegEncContext *s)
162 ff_dsputil_init(&s->dsp, s->avctx);
163 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
164 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
165 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
167 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
168 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
169 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
170 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
171 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
172 if (s->flags & CODEC_FLAG_BITEXACT)
173 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
174 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
177 ff_MPV_common_init_x86(s);
179 ff_MPV_common_init_axp(s);
181 ff_MPV_common_init_arm(s);
183 ff_MPV_common_init_altivec(s);
185 ff_MPV_common_init_bfin(s);
188 /* load & permutate scantables
189 * note: only wmv uses different ones
191 if (s->alternate_scan) {
192 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
195 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
196 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
198 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
199 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
204 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
206 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
208 // edge emu needs blocksize + filter length - 1
209 // (= 17x17 for halfpel / 21x21 for h264)
210 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
211 // at uvlinesize. It supports only YUV420 so 24x24 is enough
212 // linesize * interlaced * MBsize
213 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
216 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
218 s->me.temp = s->me.scratchpad;
219 s->rd_scratchpad = s->me.scratchpad;
220 s->b_scratchpad = s->me.scratchpad;
221 s->obmc_scratchpad = s->me.scratchpad + 16;
225 av_freep(&s->edge_emu_buffer);
226 return AVERROR(ENOMEM);
230 * Allocate a frame buffer
232 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
237 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
238 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
239 s->codec_id != AV_CODEC_ID_MSS2)
240 r = ff_thread_get_buffer(s->avctx, &pic->tf,
241 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
243 pic->f.width = s->avctx->width;
244 pic->f.height = s->avctx->height;
245 pic->f.format = s->avctx->pix_fmt;
246 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
249 if (r < 0 || !pic->f.data[0]) {
250 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
255 if (s->avctx->hwaccel) {
256 assert(!pic->hwaccel_picture_private);
257 if (s->avctx->hwaccel->priv_data_size) {
258 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
259 if (!pic->hwaccel_priv_buf) {
260 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
263 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
267 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
268 s->uvlinesize != pic->f.linesize[1])) {
269 av_log(s->avctx, AV_LOG_ERROR,
270 "get_buffer() failed (stride changed)\n");
271 ff_mpeg_unref_picture(s, pic);
275 if (pic->f.linesize[1] != pic->f.linesize[2]) {
276 av_log(s->avctx, AV_LOG_ERROR,
277 "get_buffer() failed (uv stride mismatch)\n");
278 ff_mpeg_unref_picture(s, pic);
282 if (!s->edge_emu_buffer &&
283 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
284 av_log(s->avctx, AV_LOG_ERROR,
285 "get_buffer() failed to allocate context scratch buffers.\n");
286 ff_mpeg_unref_picture(s, pic);
293 static void free_picture_tables(Picture *pic)
297 pic->alloc_mb_width =
298 pic->alloc_mb_height = 0;
300 av_buffer_unref(&pic->mb_var_buf);
301 av_buffer_unref(&pic->mc_mb_var_buf);
302 av_buffer_unref(&pic->mb_mean_buf);
303 av_buffer_unref(&pic->mbskip_table_buf);
304 av_buffer_unref(&pic->qscale_table_buf);
305 av_buffer_unref(&pic->mb_type_buf);
307 for (i = 0; i < 2; i++) {
308 av_buffer_unref(&pic->motion_val_buf[i]);
309 av_buffer_unref(&pic->ref_index_buf[i]);
313 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
315 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
316 const int mb_array_size = s->mb_stride * s->mb_height;
317 const int b8_array_size = s->b8_stride * s->mb_height * 2;
321 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
322 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
323 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
325 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
326 return AVERROR(ENOMEM);
329 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
330 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
331 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
332 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
333 return AVERROR(ENOMEM);
336 if (s->out_format == FMT_H263 || s->encoding ||
337 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
338 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
339 int ref_index_size = 4 * mb_array_size;
341 for (i = 0; mv_size && i < 2; i++) {
342 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
343 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
344 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
345 return AVERROR(ENOMEM);
349 pic->alloc_mb_width = s->mb_width;
350 pic->alloc_mb_height = s->mb_height;
355 static int make_tables_writable(Picture *pic)
358 #define MAKE_WRITABLE(table) \
361 (ret = av_buffer_make_writable(&pic->table)) < 0)\
365 MAKE_WRITABLE(mb_var_buf);
366 MAKE_WRITABLE(mc_mb_var_buf);
367 MAKE_WRITABLE(mb_mean_buf);
368 MAKE_WRITABLE(mbskip_table_buf);
369 MAKE_WRITABLE(qscale_table_buf);
370 MAKE_WRITABLE(mb_type_buf);
372 for (i = 0; i < 2; i++) {
373 MAKE_WRITABLE(motion_val_buf[i]);
374 MAKE_WRITABLE(ref_index_buf[i]);
381 * Allocate a Picture.
382 * The pixels are allocated/set by calling get_buffer() if shared = 0
384 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
388 if (pic->qscale_table_buf)
389 if ( pic->alloc_mb_width != s->mb_width
390 || pic->alloc_mb_height != s->mb_height)
391 free_picture_tables(pic);
394 assert(pic->f.data[0]);
397 assert(!pic->f.data[0]);
399 if (alloc_frame_buffer(s, pic) < 0)
402 s->linesize = pic->f.linesize[0];
403 s->uvlinesize = pic->f.linesize[1];
406 if (!pic->qscale_table_buf)
407 ret = alloc_picture_tables(s, pic);
409 ret = make_tables_writable(pic);
414 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
415 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
416 pic->mb_mean = pic->mb_mean_buf->data;
419 pic->mbskip_table = pic->mbskip_table_buf->data;
420 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
421 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
423 if (pic->motion_val_buf[0]) {
424 for (i = 0; i < 2; i++) {
425 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
426 pic->ref_index[i] = pic->ref_index_buf[i]->data;
432 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
433 ff_mpeg_unref_picture(s, pic);
434 free_picture_tables(pic);
435 return AVERROR(ENOMEM);
439 * Deallocate a picture.
441 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
443 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
446 /* WM Image / Screen codecs allocate internal buffers with different
447 * dimensions / colorspaces; ignore user-defined callbacks for these. */
448 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
449 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
450 s->codec_id != AV_CODEC_ID_MSS2)
451 ff_thread_release_buffer(s->avctx, &pic->tf);
453 av_frame_unref(&pic->f);
455 av_buffer_unref(&pic->hwaccel_priv_buf);
457 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
460 static int update_picture_tables(Picture *dst, Picture *src)
464 #define UPDATE_TABLE(table)\
467 (!dst->table || dst->table->buffer != src->table->buffer)) {\
468 av_buffer_unref(&dst->table);\
469 dst->table = av_buffer_ref(src->table);\
471 free_picture_tables(dst);\
472 return AVERROR(ENOMEM);\
477 UPDATE_TABLE(mb_var_buf);
478 UPDATE_TABLE(mc_mb_var_buf);
479 UPDATE_TABLE(mb_mean_buf);
480 UPDATE_TABLE(mbskip_table_buf);
481 UPDATE_TABLE(qscale_table_buf);
482 UPDATE_TABLE(mb_type_buf);
483 for (i = 0; i < 2; i++) {
484 UPDATE_TABLE(motion_val_buf[i]);
485 UPDATE_TABLE(ref_index_buf[i]);
488 dst->mb_var = src->mb_var;
489 dst->mc_mb_var = src->mc_mb_var;
490 dst->mb_mean = src->mb_mean;
491 dst->mbskip_table = src->mbskip_table;
492 dst->qscale_table = src->qscale_table;
493 dst->mb_type = src->mb_type;
494 for (i = 0; i < 2; i++) {
495 dst->motion_val[i] = src->motion_val[i];
496 dst->ref_index[i] = src->ref_index[i];
499 dst->alloc_mb_width = src->alloc_mb_width;
500 dst->alloc_mb_height = src->alloc_mb_height;
505 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
509 av_assert0(!dst->f.buf[0]);
510 av_assert0(src->f.buf[0]);
514 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
518 ret = update_picture_tables(dst, src);
522 if (src->hwaccel_picture_private) {
523 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
524 if (!dst->hwaccel_priv_buf)
526 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
529 dst->field_picture = src->field_picture;
530 dst->mb_var_sum = src->mb_var_sum;
531 dst->mc_mb_var_sum = src->mc_mb_var_sum;
532 dst->b_frame_score = src->b_frame_score;
533 dst->needs_realloc = src->needs_realloc;
534 dst->reference = src->reference;
535 dst->shared = src->shared;
539 ff_mpeg_unref_picture(s, dst);
543 static int init_duplicate_context(MpegEncContext *s)
545 int y_size = s->b8_stride * (2 * s->mb_height + 1);
546 int c_size = s->mb_stride * (s->mb_height + 1);
547 int yc_size = y_size + 2 * c_size;
555 s->obmc_scratchpad = NULL;
558 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
559 ME_MAP_SIZE * sizeof(uint32_t), fail)
560 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
561 ME_MAP_SIZE * sizeof(uint32_t), fail)
562 if (s->avctx->noise_reduction) {
563 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
564 2 * 64 * sizeof(int), fail)
567 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
568 s->block = s->blocks[0];
570 for (i = 0; i < 12; i++) {
571 s->pblocks[i] = &s->block[i];
574 if (s->out_format == FMT_H263) {
576 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
577 yc_size * sizeof(int16_t) * 16, fail);
578 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
579 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
580 s->ac_val[2] = s->ac_val[1] + c_size;
585 return -1; // free() through ff_MPV_common_end()
588 static void free_duplicate_context(MpegEncContext *s)
593 av_freep(&s->edge_emu_buffer);
594 av_freep(&s->me.scratchpad);
598 s->obmc_scratchpad = NULL;
600 av_freep(&s->dct_error_sum);
601 av_freep(&s->me.map);
602 av_freep(&s->me.score_map);
603 av_freep(&s->blocks);
604 av_freep(&s->ac_val_base);
608 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
610 #define COPY(a) bak->a = src->a
611 COPY(edge_emu_buffer);
616 COPY(obmc_scratchpad);
623 COPY(me.map_generation);
635 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
639 // FIXME copy only needed parts
641 backup_duplicate_context(&bak, dst);
642 memcpy(dst, src, sizeof(MpegEncContext));
643 backup_duplicate_context(dst, &bak);
644 for (i = 0; i < 12; i++) {
645 dst->pblocks[i] = &dst->block[i];
647 if (!dst->edge_emu_buffer &&
648 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
649 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
650 "scratch buffers.\n");
653 // STOP_TIMER("update_duplicate_context")
654 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
658 int ff_mpeg_update_thread_context(AVCodecContext *dst,
659 const AVCodecContext *src)
662 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
669 // FIXME can parameters change on I-frames?
670 // in that case dst may need a reinit
671 if (!s->context_initialized) {
672 memcpy(s, s1, sizeof(MpegEncContext));
675 s->bitstream_buffer = NULL;
676 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
678 if (s1->context_initialized){
679 // s->picture_range_start += MAX_PICTURE_COUNT;
680 // s->picture_range_end += MAX_PICTURE_COUNT;
681 if((ret = ff_MPV_common_init(s)) < 0){
682 memset(s, 0, sizeof(MpegEncContext));
689 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
690 s->context_reinit = 0;
691 s->height = s1->height;
692 s->width = s1->width;
693 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
697 s->avctx->coded_height = s1->avctx->coded_height;
698 s->avctx->coded_width = s1->avctx->coded_width;
699 s->avctx->width = s1->avctx->width;
700 s->avctx->height = s1->avctx->height;
702 s->coded_picture_number = s1->coded_picture_number;
703 s->picture_number = s1->picture_number;
704 s->input_picture_number = s1->input_picture_number;
706 av_assert0(!s->picture || s->picture != s1->picture);
708 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
709 ff_mpeg_unref_picture(s, &s->picture[i]);
710 if (s1->picture[i].f.data[0] &&
711 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
715 #define UPDATE_PICTURE(pic)\
717 ff_mpeg_unref_picture(s, &s->pic);\
718 if (s1->pic.f.data[0])\
719 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
721 ret = update_picture_tables(&s->pic, &s1->pic);\
726 UPDATE_PICTURE(current_picture);
727 UPDATE_PICTURE(last_picture);
728 UPDATE_PICTURE(next_picture);
730 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
731 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
732 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
734 // Error/bug resilience
735 s->next_p_frame_damaged = s1->next_p_frame_damaged;
736 s->workaround_bugs = s1->workaround_bugs;
737 s->padding_bug_score = s1->padding_bug_score;
740 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
741 (char *) &s1->shape - (char *) &s1->time_increment_bits);
744 s->max_b_frames = s1->max_b_frames;
745 s->low_delay = s1->low_delay;
746 s->droppable = s1->droppable;
748 // DivX handling (doesn't work)
749 s->divx_packed = s1->divx_packed;
751 if (s1->bitstream_buffer) {
752 if (s1->bitstream_buffer_size +
753 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
754 av_fast_malloc(&s->bitstream_buffer,
755 &s->allocated_bitstream_buffer_size,
756 s1->allocated_bitstream_buffer_size);
757 s->bitstream_buffer_size = s1->bitstream_buffer_size;
758 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
759 s1->bitstream_buffer_size);
760 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
761 FF_INPUT_BUFFER_PADDING_SIZE);
764 // linesize dependend scratch buffer allocation
765 if (!s->edge_emu_buffer)
767 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
768 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
769 "scratch buffers.\n");
770 return AVERROR(ENOMEM);
773 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
774 "be allocated due to unknown size.\n");
777 // MPEG2/interlacing info
778 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
779 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
781 if (!s1->first_field) {
782 s->last_pict_type = s1->pict_type;
783 if (s1->current_picture_ptr)
784 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
786 if (s1->pict_type != AV_PICTURE_TYPE_B) {
787 s->last_non_b_pict_type = s1->pict_type;
795 * Set the given MpegEncContext to common defaults
796 * (same for encoding and decoding).
797 * The changed fields will not depend upon the
798 * prior state of the MpegEncContext.
800 void ff_MPV_common_defaults(MpegEncContext *s)
802 s->y_dc_scale_table =
803 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
804 s->chroma_qscale_table = ff_default_chroma_qscale_table;
805 s->progressive_frame = 1;
806 s->progressive_sequence = 1;
807 s->picture_structure = PICT_FRAME;
809 s->coded_picture_number = 0;
810 s->picture_number = 0;
811 s->input_picture_number = 0;
813 s->picture_in_gop_number = 0;
818 s->slice_context_count = 1;
822 * Set the given MpegEncContext to defaults for decoding.
823 * the changed fields will not depend upon
824 * the prior state of the MpegEncContext.
826 void ff_MPV_decode_defaults(MpegEncContext *s)
828 ff_MPV_common_defaults(s);
831 static int init_er(MpegEncContext *s)
833 ERContext *er = &s->er;
834 int mb_array_size = s->mb_height * s->mb_stride;
837 er->avctx = s->avctx;
840 er->mb_index2xy = s->mb_index2xy;
841 er->mb_num = s->mb_num;
842 er->mb_width = s->mb_width;
843 er->mb_height = s->mb_height;
844 er->mb_stride = s->mb_stride;
845 er->b8_stride = s->b8_stride;
847 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
848 er->error_status_table = av_mallocz(mb_array_size);
849 if (!er->er_temp_buffer || !er->error_status_table)
852 er->mbskip_table = s->mbskip_table;
853 er->mbintra_table = s->mbintra_table;
855 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
856 er->dc_val[i] = s->dc_val[i];
858 er->decode_mb = mpeg_er_decode_mb;
863 av_freep(&er->er_temp_buffer);
864 av_freep(&er->error_status_table);
865 return AVERROR(ENOMEM);
869 * Initialize and allocates MpegEncContext fields dependent on the resolution.
871 static int init_context_frame(MpegEncContext *s)
873 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
875 s->mb_width = (s->width + 15) / 16;
876 s->mb_stride = s->mb_width + 1;
877 s->b8_stride = s->mb_width * 2 + 1;
878 s->b4_stride = s->mb_width * 4 + 1;
879 mb_array_size = s->mb_height * s->mb_stride;
880 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
882 /* set default edge pos, will be overriden
883 * in decode_header if needed */
884 s->h_edge_pos = s->mb_width * 16;
885 s->v_edge_pos = s->mb_height * 16;
887 s->mb_num = s->mb_width * s->mb_height;
892 s->block_wrap[3] = s->b8_stride;
894 s->block_wrap[5] = s->mb_stride;
896 y_size = s->b8_stride * (2 * s->mb_height + 1);
897 c_size = s->mb_stride * (s->mb_height + 1);
898 yc_size = y_size + 2 * c_size;
900 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
901 for (y = 0; y < s->mb_height; y++)
902 for (x = 0; x < s->mb_width; x++)
903 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
905 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
908 /* Allocate MV tables */
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
911 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
912 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
913 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
914 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
915 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
916 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
917 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
918 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
919 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
920 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
922 /* Allocate MB type table */
923 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
925 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
927 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
928 mb_array_size * sizeof(float), fail);
929 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
930 mb_array_size * sizeof(float), fail);
934 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
935 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
936 /* interlaced direct mode decoding tables */
937 for (i = 0; i < 2; i++) {
939 for (j = 0; j < 2; j++) {
940 for (k = 0; k < 2; k++) {
941 FF_ALLOCZ_OR_GOTO(s->avctx,
942 s->b_field_mv_table_base[i][j][k],
943 mv_table_size * 2 * sizeof(int16_t),
945 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
948 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
950 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
952 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
955 if (s->out_format == FMT_H263) {
957 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
958 s->coded_block = s->coded_block_base + s->b8_stride + 1;
960 /* cbp, ac_pred, pred_dir */
961 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
962 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
965 if (s->h263_pred || s->h263_plus || !s->encoding) {
967 // MN: we need these for error resilience of intra-frames
968 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
969 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
970 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
971 s->dc_val[2] = s->dc_val[1] + c_size;
972 for (i = 0; i < yc_size; i++)
973 s->dc_val_base[i] = 1024;
976 /* which mb is a intra block */
977 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
978 memset(s->mbintra_table, 1, mb_array_size);
980 /* init macroblock skip table */
981 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
982 // Note the + 1 is for a quicker mpeg4 slice_end detection
986 return AVERROR(ENOMEM);
990 * init common structure for both encoder and decoder.
991 * this assumes that some variables like width/height are already set
993 av_cold int ff_MPV_common_init(MpegEncContext *s)
996 int nb_slices = (HAVE_THREADS &&
997 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
998 s->avctx->thread_count : 1;
1000 if (s->encoding && s->avctx->slices)
1001 nb_slices = s->avctx->slices;
1003 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1004 s->mb_height = (s->height + 31) / 32 * 2;
1006 s->mb_height = (s->height + 15) / 16;
1008 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1009 av_log(s->avctx, AV_LOG_ERROR,
1010 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1014 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1017 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1019 max_slices = MAX_THREADS;
1020 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1021 " reducing to %d\n", nb_slices, max_slices);
1022 nb_slices = max_slices;
1025 if ((s->width || s->height) &&
1026 av_image_check_size(s->width, s->height, 0, s->avctx))
1029 ff_dct_common_init(s);
1031 s->flags = s->avctx->flags;
1032 s->flags2 = s->avctx->flags2;
1034 /* set chroma shifts */
1035 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
1037 /* convert fourcc to upper case */
1038 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1039 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1041 s->avctx->coded_frame = &s->current_picture.f;
1044 if (s->msmpeg4_version) {
1045 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1046 2 * 2 * (MAX_LEVEL + 1) *
1047 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1056 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1057 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1060 if (s->avctx->noise_reduction) {
1061 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1065 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1066 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1067 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1068 avcodec_get_frame_defaults(&s->picture[i].f);
1070 memset(&s->next_picture, 0, sizeof(s->next_picture));
1071 memset(&s->last_picture, 0, sizeof(s->last_picture));
1072 memset(&s->current_picture, 0, sizeof(s->current_picture));
1073 avcodec_get_frame_defaults(&s->next_picture.f);
1074 avcodec_get_frame_defaults(&s->last_picture.f);
1075 avcodec_get_frame_defaults(&s->current_picture.f);
1077 if (init_context_frame(s))
1080 s->parse_context.state = -1;
1082 s->context_initialized = 1;
1083 s->thread_context[0] = s;
1085 // if (s->width && s->height) {
1086 if (nb_slices > 1) {
1087 for (i = 1; i < nb_slices; i++) {
1088 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1089 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1092 for (i = 0; i < nb_slices; i++) {
1093 if (init_duplicate_context(s->thread_context[i]) < 0)
1095 s->thread_context[i]->start_mb_y =
1096 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1097 s->thread_context[i]->end_mb_y =
1098 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1101 if (init_duplicate_context(s) < 0)
1104 s->end_mb_y = s->mb_height;
1106 s->slice_context_count = nb_slices;
1111 ff_MPV_common_end(s);
1116 * Frees and resets MpegEncContext fields depending on the resolution.
1117 * Is used during resolution changes to avoid a full reinitialization of the
1120 static int free_context_frame(MpegEncContext *s)
1124 av_freep(&s->mb_type);
1125 av_freep(&s->p_mv_table_base);
1126 av_freep(&s->b_forw_mv_table_base);
1127 av_freep(&s->b_back_mv_table_base);
1128 av_freep(&s->b_bidir_forw_mv_table_base);
1129 av_freep(&s->b_bidir_back_mv_table_base);
1130 av_freep(&s->b_direct_mv_table_base);
1131 s->p_mv_table = NULL;
1132 s->b_forw_mv_table = NULL;
1133 s->b_back_mv_table = NULL;
1134 s->b_bidir_forw_mv_table = NULL;
1135 s->b_bidir_back_mv_table = NULL;
1136 s->b_direct_mv_table = NULL;
1137 for (i = 0; i < 2; i++) {
1138 for (j = 0; j < 2; j++) {
1139 for (k = 0; k < 2; k++) {
1140 av_freep(&s->b_field_mv_table_base[i][j][k]);
1141 s->b_field_mv_table[i][j][k] = NULL;
1143 av_freep(&s->b_field_select_table[i][j]);
1144 av_freep(&s->p_field_mv_table_base[i][j]);
1145 s->p_field_mv_table[i][j] = NULL;
1147 av_freep(&s->p_field_select_table[i]);
1150 av_freep(&s->dc_val_base);
1151 av_freep(&s->coded_block_base);
1152 av_freep(&s->mbintra_table);
1153 av_freep(&s->cbp_table);
1154 av_freep(&s->pred_dir_table);
1156 av_freep(&s->mbskip_table);
1158 av_freep(&s->er.error_status_table);
1159 av_freep(&s->er.er_temp_buffer);
1160 av_freep(&s->mb_index2xy);
1161 av_freep(&s->lambda_table);
1163 av_freep(&s->cplx_tab);
1164 av_freep(&s->bits_tab);
1166 s->linesize = s->uvlinesize = 0;
1171 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1175 if (s->slice_context_count > 1) {
1176 for (i = 0; i < s->slice_context_count; i++) {
1177 free_duplicate_context(s->thread_context[i]);
1179 for (i = 1; i < s->slice_context_count; i++) {
1180 av_freep(&s->thread_context[i]);
1183 free_duplicate_context(s);
1185 if ((err = free_context_frame(s)) < 0)
1189 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1190 s->picture[i].needs_realloc = 1;
1193 s->last_picture_ptr =
1194 s->next_picture_ptr =
1195 s->current_picture_ptr = NULL;
1198 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1199 s->mb_height = (s->height + 31) / 32 * 2;
1201 s->mb_height = (s->height + 15) / 16;
1203 if ((s->width || s->height) &&
1204 av_image_check_size(s->width, s->height, 0, s->avctx))
1205 return AVERROR_INVALIDDATA;
1207 if ((err = init_context_frame(s)))
1210 s->thread_context[0] = s;
1212 if (s->width && s->height) {
1213 int nb_slices = s->slice_context_count;
1214 if (nb_slices > 1) {
1215 for (i = 1; i < nb_slices; i++) {
1216 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1217 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1220 for (i = 0; i < nb_slices; i++) {
1221 if (init_duplicate_context(s->thread_context[i]) < 0)
1223 s->thread_context[i]->start_mb_y =
1224 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1225 s->thread_context[i]->end_mb_y =
1226 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1229 if (init_duplicate_context(s) < 0)
1232 s->end_mb_y = s->mb_height;
1234 s->slice_context_count = nb_slices;
1239 ff_MPV_common_end(s);
1243 /* init common structure for both encoder and decoder */
1244 void ff_MPV_common_end(MpegEncContext *s)
1248 if (s->slice_context_count > 1) {
1249 for (i = 0; i < s->slice_context_count; i++) {
1250 free_duplicate_context(s->thread_context[i]);
1252 for (i = 1; i < s->slice_context_count; i++) {
1253 av_freep(&s->thread_context[i]);
1255 s->slice_context_count = 1;
1256 } else free_duplicate_context(s);
1258 av_freep(&s->parse_context.buffer);
1259 s->parse_context.buffer_size = 0;
1261 av_freep(&s->bitstream_buffer);
1262 s->allocated_bitstream_buffer_size = 0;
1264 av_freep(&s->avctx->stats_out);
1265 av_freep(&s->ac_stats);
1267 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1268 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1269 s->q_chroma_intra_matrix= NULL;
1270 s->q_chroma_intra_matrix16= NULL;
1271 av_freep(&s->q_intra_matrix);
1272 av_freep(&s->q_inter_matrix);
1273 av_freep(&s->q_intra_matrix16);
1274 av_freep(&s->q_inter_matrix16);
1275 av_freep(&s->input_picture);
1276 av_freep(&s->reordered_input_picture);
1277 av_freep(&s->dct_offset);
1280 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1281 free_picture_tables(&s->picture[i]);
1282 ff_mpeg_unref_picture(s, &s->picture[i]);
1285 av_freep(&s->picture);
1286 free_picture_tables(&s->last_picture);
1287 ff_mpeg_unref_picture(s, &s->last_picture);
1288 free_picture_tables(&s->current_picture);
1289 ff_mpeg_unref_picture(s, &s->current_picture);
1290 free_picture_tables(&s->next_picture);
1291 ff_mpeg_unref_picture(s, &s->next_picture);
1292 free_picture_tables(&s->new_picture);
1293 ff_mpeg_unref_picture(s, &s->new_picture);
1295 free_context_frame(s);
1297 s->context_initialized = 0;
1298 s->last_picture_ptr =
1299 s->next_picture_ptr =
1300 s->current_picture_ptr = NULL;
1301 s->linesize = s->uvlinesize = 0;
1304 void ff_init_rl(RLTable *rl,
1305 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1307 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1308 uint8_t index_run[MAX_RUN + 1];
1309 int last, run, level, start, end, i;
1311 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1312 if (static_store && rl->max_level[0])
1315 /* compute max_level[], max_run[] and index_run[] */
1316 for (last = 0; last < 2; last++) {
1325 memset(max_level, 0, MAX_RUN + 1);
1326 memset(max_run, 0, MAX_LEVEL + 1);
1327 memset(index_run, rl->n, MAX_RUN + 1);
1328 for (i = start; i < end; i++) {
1329 run = rl->table_run[i];
1330 level = rl->table_level[i];
1331 if (index_run[run] == rl->n)
1333 if (level > max_level[run])
1334 max_level[run] = level;
1335 if (run > max_run[level])
1336 max_run[level] = run;
1339 rl->max_level[last] = static_store[last];
1341 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1342 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1344 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1346 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1347 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1349 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1351 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1352 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1356 void ff_init_vlc_rl(RLTable *rl)
1360 for (q = 0; q < 32; q++) {
1362 int qadd = (q - 1) | 1;
1368 for (i = 0; i < rl->vlc.table_size; i++) {
1369 int code = rl->vlc.table[i][0];
1370 int len = rl->vlc.table[i][1];
1373 if (len == 0) { // illegal code
1376 } else if (len < 0) { // more bits needed
1380 if (code == rl->n) { // esc
1384 run = rl->table_run[code] + 1;
1385 level = rl->table_level[code] * qmul + qadd;
1386 if (code >= rl->last) run += 192;
1389 rl->rl_vlc[q][i].len = len;
1390 rl->rl_vlc[q][i].level = level;
1391 rl->rl_vlc[q][i].run = run;
1396 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1400 /* release non reference frames */
1401 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1402 if (!s->picture[i].reference &&
1403 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1404 ff_mpeg_unref_picture(s, &s->picture[i]);
1409 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1411 if (pic == s->last_picture_ptr)
1413 if (pic->f.data[0] == NULL)
1415 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1420 static int find_unused_picture(MpegEncContext *s, int shared)
1425 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1426 if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1430 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1431 if (pic_is_unused(s, &s->picture[i]))
1436 av_log(s->avctx, AV_LOG_FATAL,
1437 "Internal error, picture buffer overflow\n");
1438 /* We could return -1, but the codec would crash trying to draw into a
1439 * non-existing frame anyway. This is safer than waiting for a random crash.
1440 * Also the return of this is never useful, an encoder must only allocate
1441 * as much as allowed in the specification. This has no relationship to how
1442 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1443 * enough for such valid streams).
1444 * Plus, a decoder has to check stream validity and remove frames if too
1445 * many reference frames are around. Waiting for "OOM" is not correct at
1446 * all. Similarly, missing reference frames have to be replaced by
1447 * interpolated/MC frames, anything else is a bug in the codec ...
1453 int ff_find_unused_picture(MpegEncContext *s, int shared)
1455 int ret = find_unused_picture(s, shared);
1457 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1458 if (s->picture[ret].needs_realloc) {
1459 s->picture[ret].needs_realloc = 0;
1460 free_picture_tables(&s->picture[ret]);
1461 ff_mpeg_unref_picture(s, &s->picture[ret]);
1462 avcodec_get_frame_defaults(&s->picture[ret].f);
1468 static void update_noise_reduction(MpegEncContext *s)
1472 for (intra = 0; intra < 2; intra++) {
1473 if (s->dct_count[intra] > (1 << 16)) {
1474 for (i = 0; i < 64; i++) {
1475 s->dct_error_sum[intra][i] >>= 1;
1477 s->dct_count[intra] >>= 1;
1480 for (i = 0; i < 64; i++) {
1481 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1482 s->dct_count[intra] +
1483 s->dct_error_sum[intra][i] / 2) /
1484 (s->dct_error_sum[intra][i] + 1);
1490 * generic function for encode/decode called after coding/decoding
1491 * the header and before a frame is coded/decoded.
1493 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1499 if (!ff_thread_can_start_frame(avctx)) {
1500 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1504 /* mark & release old frames */
1505 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1506 s->last_picture_ptr != s->next_picture_ptr &&
1507 s->last_picture_ptr->f.data[0]) {
1508 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1511 /* release forgotten pictures */
1512 /* if (mpeg124/h263) */
1514 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1515 if (&s->picture[i] != s->last_picture_ptr &&
1516 &s->picture[i] != s->next_picture_ptr &&
1517 s->picture[i].reference && !s->picture[i].needs_realloc) {
1518 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1519 av_log(avctx, AV_LOG_ERROR,
1520 "releasing zombie picture\n");
1521 ff_mpeg_unref_picture(s, &s->picture[i]);
1527 ff_release_unused_pictures(s, 1);
1529 if (s->current_picture_ptr &&
1530 s->current_picture_ptr->f.data[0] == NULL) {
1531 // we already have a unused image
1532 // (maybe it was set before reading the header)
1533 pic = s->current_picture_ptr;
1535 i = ff_find_unused_picture(s, 0);
1537 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1540 pic = &s->picture[i];
1544 if (!s->droppable) {
1545 if (s->pict_type != AV_PICTURE_TYPE_B)
1549 pic->f.coded_picture_number = s->coded_picture_number++;
1551 if (ff_alloc_picture(s, pic, 0) < 0)
1554 s->current_picture_ptr = pic;
1555 // FIXME use only the vars from current_pic
1556 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1557 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1558 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1559 if (s->picture_structure != PICT_FRAME)
1560 s->current_picture_ptr->f.top_field_first =
1561 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1563 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1564 !s->progressive_sequence;
1565 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1568 s->current_picture_ptr->f.pict_type = s->pict_type;
1569 // if (s->flags && CODEC_FLAG_QSCALE)
1570 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1571 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1573 ff_mpeg_unref_picture(s, &s->current_picture);
1574 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1575 s->current_picture_ptr)) < 0)
1578 if (s->pict_type != AV_PICTURE_TYPE_B) {
1579 s->last_picture_ptr = s->next_picture_ptr;
1581 s->next_picture_ptr = s->current_picture_ptr;
1583 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1584 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1585 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1586 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1587 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1588 s->pict_type, s->droppable);
1590 if ((s->last_picture_ptr == NULL ||
1591 s->last_picture_ptr->f.data[0] == NULL) &&
1592 (s->pict_type != AV_PICTURE_TYPE_I ||
1593 s->picture_structure != PICT_FRAME)) {
1594 int h_chroma_shift, v_chroma_shift;
1595 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1596 &h_chroma_shift, &v_chroma_shift);
1597 if (s->pict_type != AV_PICTURE_TYPE_I)
1598 av_log(avctx, AV_LOG_ERROR,
1599 "warning: first frame is no keyframe\n");
1600 else if (s->picture_structure != PICT_FRAME)
1601 av_log(avctx, AV_LOG_INFO,
1602 "allocate dummy last picture for field based first keyframe\n");
1604 /* Allocate a dummy frame */
1605 i = ff_find_unused_picture(s, 0);
1607 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1610 s->last_picture_ptr = &s->picture[i];
1611 s->last_picture_ptr->f.key_frame = 0;
1612 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1613 s->last_picture_ptr = NULL;
1617 memset(s->last_picture_ptr->f.data[0], 0x80,
1618 avctx->height * s->last_picture_ptr->f.linesize[0]);
1619 memset(s->last_picture_ptr->f.data[1], 0x80,
1620 (avctx->height >> v_chroma_shift) *
1621 s->last_picture_ptr->f.linesize[1]);
1622 memset(s->last_picture_ptr->f.data[2], 0x80,
1623 (avctx->height >> v_chroma_shift) *
1624 s->last_picture_ptr->f.linesize[2]);
1626 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1627 for(i=0; i<avctx->height; i++)
1628 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1631 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1632 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1634 if ((s->next_picture_ptr == NULL ||
1635 s->next_picture_ptr->f.data[0] == NULL) &&
1636 s->pict_type == AV_PICTURE_TYPE_B) {
1637 /* Allocate a dummy frame */
1638 i = ff_find_unused_picture(s, 0);
1640 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1643 s->next_picture_ptr = &s->picture[i];
1644 s->next_picture_ptr->f.key_frame = 0;
1645 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1646 s->next_picture_ptr = NULL;
1649 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1650 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1653 #if 0 // BUFREF-FIXME
1654 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1655 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1657 if (s->last_picture_ptr) {
1658 ff_mpeg_unref_picture(s, &s->last_picture);
1659 if (s->last_picture_ptr->f.data[0] &&
1660 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1661 s->last_picture_ptr)) < 0)
1664 if (s->next_picture_ptr) {
1665 ff_mpeg_unref_picture(s, &s->next_picture);
1666 if (s->next_picture_ptr->f.data[0] &&
1667 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1668 s->next_picture_ptr)) < 0)
1672 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1673 s->last_picture_ptr->f.data[0]));
1675 if (s->picture_structure!= PICT_FRAME) {
1677 for (i = 0; i < 4; i++) {
1678 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1679 s->current_picture.f.data[i] +=
1680 s->current_picture.f.linesize[i];
1682 s->current_picture.f.linesize[i] *= 2;
1683 s->last_picture.f.linesize[i] *= 2;
1684 s->next_picture.f.linesize[i] *= 2;
1688 s->err_recognition = avctx->err_recognition;
1690 /* set dequantizer, we can't do it during init as
1691 * it might change for mpeg4 and we can't do it in the header
1692 * decode as init is not called for mpeg4 there yet */
1693 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1694 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1695 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1696 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1697 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1698 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1700 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1701 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1704 if (s->dct_error_sum) {
1705 assert(s->avctx->noise_reduction && s->encoding);
1706 update_noise_reduction(s);
1709 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1710 return ff_xvmc_field_start(s, avctx);
1715 /* generic function for encode/decode called after a
1716 * frame has been coded/decoded. */
1717 void ff_MPV_frame_end(MpegEncContext *s)
1720 /* redraw edges for the frame if decoding didn't complete */
1721 // just to make sure that all data is rendered.
1722 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1723 ff_xvmc_field_end(s);
1724 } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1725 !s->avctx->hwaccel &&
1726 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1727 s->unrestricted_mv &&
1728 s->current_picture.reference &&
1730 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1733 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1734 int hshift = desc->log2_chroma_w;
1735 int vshift = desc->log2_chroma_h;
1736 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1737 s->h_edge_pos, s->v_edge_pos,
1738 EDGE_WIDTH, EDGE_WIDTH,
1739 EDGE_TOP | EDGE_BOTTOM);
1740 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1741 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1742 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1743 EDGE_TOP | EDGE_BOTTOM);
1744 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1745 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1746 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1747 EDGE_TOP | EDGE_BOTTOM);
1752 s->last_pict_type = s->pict_type;
1753 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1754 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1755 s->last_non_b_pict_type = s->pict_type;
1758 /* copy back current_picture variables */
1759 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1760 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1761 s->picture[i] = s->current_picture;
1765 assert(i < MAX_PICTURE_COUNT);
1769 /* release non-reference frames */
1770 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1771 if (!s->picture[i].reference)
1772 ff_mpeg_unref_picture(s, &s->picture[i]);
1775 // clear copies, to avoid confusion
1777 memset(&s->last_picture, 0, sizeof(Picture));
1778 memset(&s->next_picture, 0, sizeof(Picture));
1779 memset(&s->current_picture, 0, sizeof(Picture));
1781 s->avctx->coded_frame = &s->current_picture_ptr->f;
1783 if (s->current_picture.reference)
1784 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1788 * Draw a line from (ex, ey) -> (sx, sy).
1789 * @param w width of the image
1790 * @param h height of the image
1791 * @param stride stride/linesize of the image
1792 * @param color color of the arrow
1794 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1795 int w, int h, int stride, int color)
1799 sx = av_clip(sx, 0, w - 1);
1800 sy = av_clip(sy, 0, h - 1);
1801 ex = av_clip(ex, 0, w - 1);
1802 ey = av_clip(ey, 0, h - 1);
1804 buf[sy * stride + sx] += color;
1806 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1808 FFSWAP(int, sx, ex);
1809 FFSWAP(int, sy, ey);
1811 buf += sx + sy * stride;
1813 f = ((ey - sy) << 16) / ex;
1814 for (x = 0; x <= ex; x++) {
1816 fr = (x * f) & 0xFFFF;
1817 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1818 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1822 FFSWAP(int, sx, ex);
1823 FFSWAP(int, sy, ey);
1825 buf += sx + sy * stride;
1828 f = ((ex - sx) << 16) / ey;
1831 for(y= 0; y <= ey; y++){
1833 fr = (y*f) & 0xFFFF;
1834 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1835 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1841 * Draw an arrow from (ex, ey) -> (sx, sy).
1842 * @param w width of the image
1843 * @param h height of the image
1844 * @param stride stride/linesize of the image
1845 * @param color color of the arrow
1847 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1848 int ey, int w, int h, int stride, int color)
1852 sx = av_clip(sx, -100, w + 100);
1853 sy = av_clip(sy, -100, h + 100);
1854 ex = av_clip(ex, -100, w + 100);
1855 ey = av_clip(ey, -100, h + 100);
1860 if (dx * dx + dy * dy > 3 * 3) {
1863 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1865 // FIXME subpixel accuracy
1866 rx = ROUNDED_DIV(rx * 3 << 4, length);
1867 ry = ROUNDED_DIV(ry * 3 << 4, length);
1869 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1870 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1872 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1876 * Print debugging info for the given picture.
1878 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1880 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1882 if (avctx->hwaccel || !p || !p->mb_type
1883 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1887 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1890 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1891 av_get_picture_type_char(pict->pict_type));
1892 for (y = 0; y < mb_height; y++) {
1893 for (x = 0; x < mb_width; x++) {
1894 if (avctx->debug & FF_DEBUG_SKIP) {
1895 int count = mbskip_table[x + y * mb_stride];
1898 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1900 if (avctx->debug & FF_DEBUG_QP) {
1901 av_log(avctx, AV_LOG_DEBUG, "%2d",
1902 p->qscale_table[x + y * mb_stride]);
1904 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1905 int mb_type = p->mb_type[x + y * mb_stride];
1906 // Type & MV direction
1907 if (IS_PCM(mb_type))
1908 av_log(avctx, AV_LOG_DEBUG, "P");
1909 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1910 av_log(avctx, AV_LOG_DEBUG, "A");
1911 else if (IS_INTRA4x4(mb_type))
1912 av_log(avctx, AV_LOG_DEBUG, "i");
1913 else if (IS_INTRA16x16(mb_type))
1914 av_log(avctx, AV_LOG_DEBUG, "I");
1915 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1916 av_log(avctx, AV_LOG_DEBUG, "d");
1917 else if (IS_DIRECT(mb_type))
1918 av_log(avctx, AV_LOG_DEBUG, "D");
1919 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1920 av_log(avctx, AV_LOG_DEBUG, "g");
1921 else if (IS_GMC(mb_type))
1922 av_log(avctx, AV_LOG_DEBUG, "G");
1923 else if (IS_SKIP(mb_type))
1924 av_log(avctx, AV_LOG_DEBUG, "S");
1925 else if (!USES_LIST(mb_type, 1))
1926 av_log(avctx, AV_LOG_DEBUG, ">");
1927 else if (!USES_LIST(mb_type, 0))
1928 av_log(avctx, AV_LOG_DEBUG, "<");
1930 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1931 av_log(avctx, AV_LOG_DEBUG, "X");
1935 if (IS_8X8(mb_type))
1936 av_log(avctx, AV_LOG_DEBUG, "+");
1937 else if (IS_16X8(mb_type))
1938 av_log(avctx, AV_LOG_DEBUG, "-");
1939 else if (IS_8X16(mb_type))
1940 av_log(avctx, AV_LOG_DEBUG, "|");
1941 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1942 av_log(avctx, AV_LOG_DEBUG, " ");
1944 av_log(avctx, AV_LOG_DEBUG, "?");
1947 if (IS_INTERLACED(mb_type))
1948 av_log(avctx, AV_LOG_DEBUG, "=");
1950 av_log(avctx, AV_LOG_DEBUG, " ");
1953 av_log(avctx, AV_LOG_DEBUG, "\n");
1957 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1958 (avctx->debug_mv)) {
1959 const int shift = 1 + quarter_sample;
1963 int h_chroma_shift, v_chroma_shift, block_height;
1964 const int width = avctx->width;
1965 const int height = avctx->height;
1966 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1967 const int mv_stride = (mb_width << mv_sample_log2) +
1968 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1970 *low_delay = 0; // needed to see the vectors without trashing the buffers
1972 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1974 av_frame_make_writable(pict);
1976 pict->opaque = NULL;
1977 ptr = pict->data[0];
1978 block_height = 16 >> v_chroma_shift;
1980 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1982 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1983 const int mb_index = mb_x + mb_y * mb_stride;
1984 if ((avctx->debug_mv) && p->motion_val[0]) {
1986 for (type = 0; type < 3; type++) {
1990 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1991 (pict->pict_type!= AV_PICTURE_TYPE_P))
1996 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1997 (pict->pict_type!= AV_PICTURE_TYPE_B))
2002 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2003 (pict->pict_type!= AV_PICTURE_TYPE_B))
2008 if (!USES_LIST(p->mb_type[mb_index], direction))
2011 if (IS_8X8(p->mb_type[mb_index])) {
2013 for (i = 0; i < 4; i++) {
2014 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2015 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2016 int xy = (mb_x * 2 + (i & 1) +
2017 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2018 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2019 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2020 draw_arrow(ptr, sx, sy, mx, my, width,
2021 height, pict->linesize[0], 100);
2023 } else if (IS_16X8(p->mb_type[mb_index])) {
2025 for (i = 0; i < 2; i++) {
2026 int sx = mb_x * 16 + 8;
2027 int sy = mb_y * 16 + 4 + 8 * i;
2028 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2029 int mx = (p->motion_val[direction][xy][0] >> shift);
2030 int my = (p->motion_val[direction][xy][1] >> shift);
2032 if (IS_INTERLACED(p->mb_type[mb_index]))
2035 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2036 height, pict->linesize[0], 100);
2038 } else if (IS_8X16(p->mb_type[mb_index])) {
2040 for (i = 0; i < 2; i++) {
2041 int sx = mb_x * 16 + 4 + 8 * i;
2042 int sy = mb_y * 16 + 8;
2043 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2044 int mx = p->motion_val[direction][xy][0] >> shift;
2045 int my = p->motion_val[direction][xy][1] >> shift;
2047 if (IS_INTERLACED(p->mb_type[mb_index]))
2050 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2051 height, pict->linesize[0], 100);
2054 int sx= mb_x * 16 + 8;
2055 int sy= mb_y * 16 + 8;
2056 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2057 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2058 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2059 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2063 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2064 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2065 0x0101010101010101ULL;
2067 for (y = 0; y < block_height; y++) {
2068 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2069 (block_height * mb_y + y) *
2070 pict->linesize[1]) = c;
2071 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2072 (block_height * mb_y + y) *
2073 pict->linesize[2]) = c;
2076 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2078 int mb_type = p->mb_type[mb_index];
2081 #define COLOR(theta, r) \
2082 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2083 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2087 if (IS_PCM(mb_type)) {
2089 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2090 IS_INTRA16x16(mb_type)) {
2092 } else if (IS_INTRA4x4(mb_type)) {
2094 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2096 } else if (IS_DIRECT(mb_type)) {
2098 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2100 } else if (IS_GMC(mb_type)) {
2102 } else if (IS_SKIP(mb_type)) {
2104 } else if (!USES_LIST(mb_type, 1)) {
2106 } else if (!USES_LIST(mb_type, 0)) {
2109 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2113 u *= 0x0101010101010101ULL;
2114 v *= 0x0101010101010101ULL;
2115 for (y = 0; y < block_height; y++) {
2116 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2117 (block_height * mb_y + y) * pict->linesize[1]) = u;
2118 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2119 (block_height * mb_y + y) * pict->linesize[2]) = v;
2123 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2124 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2125 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2126 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2127 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2129 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2130 for (y = 0; y < 16; y++)
2131 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2132 pict->linesize[0]] ^= 0x80;
2134 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2135 int dm = 1 << (mv_sample_log2 - 2);
2136 for (i = 0; i < 4; i++) {
2137 int sx = mb_x * 16 + 8 * (i & 1);
2138 int sy = mb_y * 16 + 8 * (i >> 1);
2139 int xy = (mb_x * 2 + (i & 1) +
2140 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2142 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2143 if (mv[0] != mv[dm] ||
2144 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2145 for (y = 0; y < 8; y++)
2146 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2147 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2148 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2149 pict->linesize[0]) ^= 0x8080808080808080ULL;
2153 if (IS_INTERLACED(mb_type) &&
2154 avctx->codec->id == AV_CODEC_ID_H264) {
2158 mbskip_table[mb_index] = 0;
2164 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2166 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2167 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2170 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2172 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2173 int offset = 2*s->mb_stride + 1;
2175 return AVERROR(ENOMEM);
2176 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2177 ref->size -= offset;
2178 ref->data += offset;
2179 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2182 static inline int hpel_motion_lowres(MpegEncContext *s,
2183 uint8_t *dest, uint8_t *src,
2184 int field_based, int field_select,
2185 int src_x, int src_y,
2186 int width, int height, int stride,
2187 int h_edge_pos, int v_edge_pos,
2188 int w, int h, h264_chroma_mc_func *pix_op,
2189 int motion_x, int motion_y)
2191 const int lowres = s->avctx->lowres;
2192 const int op_index = FFMIN(lowres, 2);
2193 const int s_mask = (2 << lowres) - 1;
2197 if (s->quarter_sample) {
2202 sx = motion_x & s_mask;
2203 sy = motion_y & s_mask;
2204 src_x += motion_x >> lowres + 1;
2205 src_y += motion_y >> lowres + 1;
2207 src += src_y * stride + src_x;
2209 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2210 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2211 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2212 (h + 1) << field_based, src_x,
2213 src_y << field_based,
2216 src = s->edge_emu_buffer;
2220 sx = (sx << 2) >> lowres;
2221 sy = (sy << 2) >> lowres;
2224 pix_op[op_index](dest, src, stride, h, sx, sy);
2228 /* apply one mpeg motion vector to the three components */
2229 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2236 uint8_t **ref_picture,
2237 h264_chroma_mc_func *pix_op,
2238 int motion_x, int motion_y,
2241 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2242 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2244 const int lowres = s->avctx->lowres;
2245 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
2246 const int block_s = 8>>lowres;
2247 const int s_mask = (2 << lowres) - 1;
2248 const int h_edge_pos = s->h_edge_pos >> lowres;
2249 const int v_edge_pos = s->v_edge_pos >> lowres;
2250 linesize = s->current_picture.f.linesize[0] << field_based;
2251 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2253 // FIXME obviously not perfect but qpel will not work in lowres anyway
2254 if (s->quarter_sample) {
2260 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2263 sx = motion_x & s_mask;
2264 sy = motion_y & s_mask;
2265 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2266 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2268 if (s->out_format == FMT_H263) {
2269 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2270 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2271 uvsrc_x = src_x >> 1;
2272 uvsrc_y = src_y >> 1;
2273 } else if (s->out_format == FMT_H261) {
2274 // even chroma mv's are full pel in H261
2277 uvsx = (2 * mx) & s_mask;
2278 uvsy = (2 * my) & s_mask;
2279 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2280 uvsrc_y = mb_y * block_s + (my >> lowres);
2282 if(s->chroma_y_shift){
2287 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2288 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2290 if(s->chroma_x_shift){
2294 uvsy = motion_y & s_mask;
2296 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2299 uvsx = motion_x & s_mask;
2300 uvsy = motion_y & s_mask;
2307 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2308 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2309 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2311 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
2312 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2313 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2314 linesize >> field_based, 17, 17 + field_based,
2315 src_x, src_y << field_based, h_edge_pos,
2317 ptr_y = s->edge_emu_buffer;
2318 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2319 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2320 s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2322 uvsrc_x, uvsrc_y << field_based,
2323 h_edge_pos >> 1, v_edge_pos >> 1);
2324 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2326 uvsrc_x, uvsrc_y << field_based,
2327 h_edge_pos >> 1, v_edge_pos >> 1);
2329 ptr_cr = uvbuf + 16;
2333 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2335 dest_y += s->linesize;
2336 dest_cb += s->uvlinesize;
2337 dest_cr += s->uvlinesize;
2341 ptr_y += s->linesize;
2342 ptr_cb += s->uvlinesize;
2343 ptr_cr += s->uvlinesize;
2346 sx = (sx << 2) >> lowres;
2347 sy = (sy << 2) >> lowres;
2348 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2350 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2351 uvsx = (uvsx << 2) >> lowres;
2352 uvsy = (uvsy << 2) >> lowres;
2353 if (h >> s->chroma_y_shift) {
2354 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
2355 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
2358 // FIXME h261 lowres loop filter
2361 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2362 uint8_t *dest_cb, uint8_t *dest_cr,
2363 uint8_t **ref_picture,
2364 h264_chroma_mc_func * pix_op,
2367 const int lowres = s->avctx->lowres;
2368 const int op_index = FFMIN(lowres, 2);
2369 const int block_s = 8 >> lowres;
2370 const int s_mask = (2 << lowres) - 1;
2371 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2372 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2373 int emu = 0, src_x, src_y, offset, sx, sy;
2376 if (s->quarter_sample) {
2381 /* In case of 8X8, we construct a single chroma motion vector
2382 with a special rounding */
2383 mx = ff_h263_round_chroma(mx);
2384 my = ff_h263_round_chroma(my);
2388 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2389 src_y = s->mb_y * block_s + (my >> lowres + 1);
2391 offset = src_y * s->uvlinesize + src_x;
2392 ptr = ref_picture[1] + offset;
2393 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2394 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2395 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2396 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2397 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2398 ptr = s->edge_emu_buffer;
2402 sx = (sx << 2) >> lowres;
2403 sy = (sy << 2) >> lowres;
2404 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2406 ptr = ref_picture[2] + offset;
2408 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2409 src_x, src_y, h_edge_pos, v_edge_pos);
2410 ptr = s->edge_emu_buffer;
2412 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2416 * motion compensation of a single macroblock
2418 * @param dest_y luma destination pointer
2419 * @param dest_cb chroma cb/u destination pointer
2420 * @param dest_cr chroma cr/v destination pointer
2421 * @param dir direction (0->forward, 1->backward)
2422 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2423 * @param pix_op halfpel motion compensation function (average or put normally)
2424 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2426 static inline void MPV_motion_lowres(MpegEncContext *s,
2427 uint8_t *dest_y, uint8_t *dest_cb,
2429 int dir, uint8_t **ref_picture,
2430 h264_chroma_mc_func *pix_op)
2434 const int lowres = s->avctx->lowres;
2435 const int block_s = 8 >>lowres;
2440 switch (s->mv_type) {
2442 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2444 ref_picture, pix_op,
2445 s->mv[dir][0][0], s->mv[dir][0][1],
2451 for (i = 0; i < 4; i++) {
2452 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2453 s->linesize) * block_s,
2454 ref_picture[0], 0, 0,
2455 (2 * mb_x + (i & 1)) * block_s,
2456 (2 * mb_y + (i >> 1)) * block_s,
2457 s->width, s->height, s->linesize,
2458 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2459 block_s, block_s, pix_op,
2460 s->mv[dir][i][0], s->mv[dir][i][1]);
2462 mx += s->mv[dir][i][0];
2463 my += s->mv[dir][i][1];
2466 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2467 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2471 if (s->picture_structure == PICT_FRAME) {
2473 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2474 1, 0, s->field_select[dir][0],
2475 ref_picture, pix_op,
2476 s->mv[dir][0][0], s->mv[dir][0][1],
2479 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2480 1, 1, s->field_select[dir][1],
2481 ref_picture, pix_op,
2482 s->mv[dir][1][0], s->mv[dir][1][1],
2485 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2486 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2487 ref_picture = s->current_picture_ptr->f.data;
2490 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2491 0, 0, s->field_select[dir][0],
2492 ref_picture, pix_op,
2494 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2498 for (i = 0; i < 2; i++) {
2499 uint8_t **ref2picture;
2501 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2502 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2503 ref2picture = ref_picture;
2505 ref2picture = s->current_picture_ptr->f.data;
2508 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2509 0, 0, s->field_select[dir][i],
2510 ref2picture, pix_op,
2511 s->mv[dir][i][0], s->mv[dir][i][1] +
2512 2 * block_s * i, block_s, mb_y >> 1);
2514 dest_y += 2 * block_s * s->linesize;
2515 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2516 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2520 if (s->picture_structure == PICT_FRAME) {
2521 for (i = 0; i < 2; i++) {
2523 for (j = 0; j < 2; j++) {
2524 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2526 ref_picture, pix_op,
2527 s->mv[dir][2 * i + j][0],
2528 s->mv[dir][2 * i + j][1],
2531 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2534 for (i = 0; i < 2; i++) {
2535 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2536 0, 0, s->picture_structure != i + 1,
2537 ref_picture, pix_op,
2538 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2539 2 * block_s, mb_y >> 1);
2541 // after put we make avg of the same block
2542 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2544 // opposite parity is always in the same
2545 // frame if this is second field
2546 if (!s->first_field) {
2547 ref_picture = s->current_picture_ptr->f.data;
2558 * find the lowest MB row referenced in the MVs
2560 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2562 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2563 int my, off, i, mvs;
2565 if (s->picture_structure != PICT_FRAME || s->mcsel)
2568 switch (s->mv_type) {
2582 for (i = 0; i < mvs; i++) {
2583 my = s->mv[dir][i][1]<<qpel_shift;
2584 my_max = FFMAX(my_max, my);
2585 my_min = FFMIN(my_min, my);
2588 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2590 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2592 return s->mb_height-1;
2595 /* put block[] to dest[] */
2596 static inline void put_dct(MpegEncContext *s,
2597 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2599 s->dct_unquantize_intra(s, block, i, qscale);
2600 s->dsp.idct_put (dest, line_size, block);
2603 /* add block[] to dest[] */
2604 static inline void add_dct(MpegEncContext *s,
2605 int16_t *block, int i, uint8_t *dest, int line_size)
2607 if (s->block_last_index[i] >= 0) {
2608 s->dsp.idct_add (dest, line_size, block);
2612 static inline void add_dequant_dct(MpegEncContext *s,
2613 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2615 if (s->block_last_index[i] >= 0) {
2616 s->dct_unquantize_inter(s, block, i, qscale);
2618 s->dsp.idct_add (dest, line_size, block);
2623 * Clean dc, ac, coded_block for the current non-intra MB.
2625 void ff_clean_intra_table_entries(MpegEncContext *s)
2627 int wrap = s->b8_stride;
2628 int xy = s->block_index[0];
2631 s->dc_val[0][xy + 1 ] =
2632 s->dc_val[0][xy + wrap] =
2633 s->dc_val[0][xy + 1 + wrap] = 1024;
2635 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2636 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2637 if (s->msmpeg4_version>=3) {
2638 s->coded_block[xy ] =
2639 s->coded_block[xy + 1 ] =
2640 s->coded_block[xy + wrap] =
2641 s->coded_block[xy + 1 + wrap] = 0;
2644 wrap = s->mb_stride;
2645 xy = s->mb_x + s->mb_y * wrap;
2647 s->dc_val[2][xy] = 1024;
2649 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2650 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2652 s->mbintra_table[xy]= 0;
2655 /* generic function called after a macroblock has been parsed by the
2656 decoder or after it has been encoded by the encoder.
2658 Important variables used:
2659 s->mb_intra : true if intra macroblock
2660 s->mv_dir : motion vector direction
2661 s->mv_type : motion vector type
2662 s->mv : motion vector
2663 s->interlaced_dct : true if interlaced dct used (mpeg2)
2665 static av_always_inline
2666 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2667 int lowres_flag, int is_mpeg12)
2669 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2670 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2671 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2675 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2676 /* print DCT coefficients */
2678 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2680 for(j=0; j<64; j++){
2681 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2683 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2687 s->current_picture.qscale_table[mb_xy] = s->qscale;
2689 /* update DC predictors for P macroblocks */
2691 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2692 if(s->mbintra_table[mb_xy])
2693 ff_clean_intra_table_entries(s);
2697 s->last_dc[2] = 128 << s->intra_dc_precision;
2700 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2701 s->mbintra_table[mb_xy]=1;
2703 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2704 uint8_t *dest_y, *dest_cb, *dest_cr;
2705 int dct_linesize, dct_offset;
2706 op_pixels_func (*op_pix)[4];
2707 qpel_mc_func (*op_qpix)[16];
2708 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2709 const int uvlinesize = s->current_picture.f.linesize[1];
2710 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2711 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2713 /* avoid copy if macroblock skipped in last frame too */
2714 /* skip only during decoding as we might trash the buffers during encoding a bit */
2716 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2718 if (s->mb_skipped) {
2720 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2722 } else if(!s->current_picture.reference) {
2725 *mbskip_ptr = 0; /* not skipped */
2729 dct_linesize = linesize << s->interlaced_dct;
2730 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2734 dest_cb= s->dest[1];
2735 dest_cr= s->dest[2];
2737 dest_y = s->b_scratchpad;
2738 dest_cb= s->b_scratchpad+16*linesize;
2739 dest_cr= s->b_scratchpad+32*linesize;
2743 /* motion handling */
2744 /* decoding or more than one mb_type (MC was already done otherwise) */
2747 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2748 if (s->mv_dir & MV_DIR_FORWARD) {
2749 ff_thread_await_progress(&s->last_picture_ptr->tf,
2750 ff_MPV_lowest_referenced_row(s, 0),
2753 if (s->mv_dir & MV_DIR_BACKWARD) {
2754 ff_thread_await_progress(&s->next_picture_ptr->tf,
2755 ff_MPV_lowest_referenced_row(s, 1),
2761 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2763 if (s->mv_dir & MV_DIR_FORWARD) {
2764 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2765 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2767 if (s->mv_dir & MV_DIR_BACKWARD) {
2768 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2771 op_qpix= s->me.qpel_put;
2772 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2773 op_pix = s->hdsp.put_pixels_tab;
2775 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2777 if (s->mv_dir & MV_DIR_FORWARD) {
2778 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2779 op_pix = s->hdsp.avg_pixels_tab;
2780 op_qpix= s->me.qpel_avg;
2782 if (s->mv_dir & MV_DIR_BACKWARD) {
2783 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2788 /* skip dequant / idct if we are really late ;) */
2789 if(s->avctx->skip_idct){
2790 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2791 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2792 || s->avctx->skip_idct >= AVDISCARD_ALL)
2796 /* add dct residue */
2797 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2798 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2799 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2800 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2801 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2802 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2804 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2805 if (s->chroma_y_shift){
2806 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2807 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2811 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2812 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2813 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2814 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2817 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2818 add_dct(s, block[0], 0, dest_y , dct_linesize);
2819 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2820 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2821 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2823 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2824 if(s->chroma_y_shift){//Chroma420
2825 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2826 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2829 dct_linesize = uvlinesize << s->interlaced_dct;
2830 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2832 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2833 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2834 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2835 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2836 if(!s->chroma_x_shift){//Chroma444
2837 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2838 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2839 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2840 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2845 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2846 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2849 /* dct only in intra block */
2850 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2851 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2852 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2853 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2854 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2856 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2857 if(s->chroma_y_shift){
2858 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2859 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2863 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2864 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2865 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2866 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2870 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2871 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2872 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2873 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2875 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2876 if(s->chroma_y_shift){
2877 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2878 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2881 dct_linesize = uvlinesize << s->interlaced_dct;
2882 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2884 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2885 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2886 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2887 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2888 if(!s->chroma_x_shift){//Chroma444
2889 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2890 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2891 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2892 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2900 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2901 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2902 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2907 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2909 if(s->out_format == FMT_MPEG1) {
2910 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2911 else MPV_decode_mb_internal(s, block, 0, 1);
2914 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2915 else MPV_decode_mb_internal(s, block, 0, 0);
2919 * @param h is the normal height, this will be reduced automatically if needed for the last row
2921 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2922 Picture *last, int y, int h, int picture_structure,
2923 int first_field, int draw_edges, int low_delay,
2924 int v_edge_pos, int h_edge_pos)
2926 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2927 int hshift = desc->log2_chroma_w;
2928 int vshift = desc->log2_chroma_h;
2929 const int field_pic = picture_structure != PICT_FRAME;
2935 if (!avctx->hwaccel &&
2936 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2939 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2940 int *linesize = cur->f.linesize;
2941 int sides = 0, edge_h;
2942 if (y==0) sides |= EDGE_TOP;
2943 if (y + h >= v_edge_pos)
2944 sides |= EDGE_BOTTOM;
2946 edge_h= FFMIN(h, v_edge_pos - y);
2948 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2949 linesize[0], h_edge_pos, edge_h,
2950 EDGE_WIDTH, EDGE_WIDTH, sides);
2951 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2952 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2953 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2954 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2955 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2956 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2959 h = FFMIN(h, avctx->height - y);
2961 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2963 if (avctx->draw_horiz_band) {
2965 int offset[AV_NUM_DATA_POINTERS];
2968 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2969 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2976 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2977 picture_structure == PICT_FRAME &&
2978 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2979 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2982 offset[0]= y * src->linesize[0];
2984 offset[2]= (y >> vshift) * src->linesize[1];
2985 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2991 avctx->draw_horiz_band(avctx, src, offset,
2992 y, picture_structure, h);
2996 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2998 int draw_edges = s->unrestricted_mv && !s->intra_only;
2999 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
3000 &s->last_picture, y, h, s->picture_structure,
3001 s->first_field, draw_edges, s->low_delay,
3002 s->v_edge_pos, s->h_edge_pos);
3005 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3006 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3007 const int uvlinesize = s->current_picture.f.linesize[1];
3008 const int mb_size= 4 - s->avctx->lowres;
3010 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3011 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3012 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3013 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3014 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3015 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3016 //block_index is not used by mpeg2, so it is not affected by chroma_format
3018 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3019 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3020 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3022 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3024 if(s->picture_structure==PICT_FRAME){
3025 s->dest[0] += s->mb_y * linesize << mb_size;
3026 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3027 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3029 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3030 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3031 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3032 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3038 * Permute an 8x8 block.
3039 * @param block the block which will be permuted according to the given permutation vector
3040 * @param permutation the permutation vector
3041 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3042 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3043 * (inverse) permutated to scantable order!
3045 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3051 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3053 for(i=0; i<=last; i++){
3054 const int j= scantable[i];
3059 for(i=0; i<=last; i++){
3060 const int j= scantable[i];
3061 const int perm_j= permutation[j];
3062 block[perm_j]= temp[j];
3066 void ff_mpeg_flush(AVCodecContext *avctx){
3068 MpegEncContext *s = avctx->priv_data;
3070 if(s==NULL || s->picture==NULL)
3073 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3074 ff_mpeg_unref_picture(s, &s->picture[i]);
3075 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3077 s->mb_x= s->mb_y= 0;
3080 s->parse_context.state= -1;
3081 s->parse_context.frame_start_found= 0;
3082 s->parse_context.overread= 0;
3083 s->parse_context.overread_index= 0;
3084 s->parse_context.index= 0;
3085 s->parse_context.last_index= 0;
3086 s->bitstream_buffer_size=0;
3090 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3091 int16_t *block, int n, int qscale)
3093 int i, level, nCoeffs;
3094 const uint16_t *quant_matrix;
3096 nCoeffs= s->block_last_index[n];
3098 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3099 /* XXX: only mpeg1 */
3100 quant_matrix = s->intra_matrix;
3101 for(i=1;i<=nCoeffs;i++) {
3102 int j= s->intra_scantable.permutated[i];
3107 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3108 level = (level - 1) | 1;
3111 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3112 level = (level - 1) | 1;
3119 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3120 int16_t *block, int n, int qscale)
3122 int i, level, nCoeffs;
3123 const uint16_t *quant_matrix;
3125 nCoeffs= s->block_last_index[n];
3127 quant_matrix = s->inter_matrix;
3128 for(i=0; i<=nCoeffs; i++) {
3129 int j= s->intra_scantable.permutated[i];
3134 level = (((level << 1) + 1) * qscale *
3135 ((int) (quant_matrix[j]))) >> 4;
3136 level = (level - 1) | 1;
3139 level = (((level << 1) + 1) * qscale *
3140 ((int) (quant_matrix[j]))) >> 4;
3141 level = (level - 1) | 1;
3148 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3149 int16_t *block, int n, int qscale)
3151 int i, level, nCoeffs;
3152 const uint16_t *quant_matrix;
3154 if(s->alternate_scan) nCoeffs= 63;
3155 else nCoeffs= s->block_last_index[n];
3157 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3158 quant_matrix = s->intra_matrix;
3159 for(i=1;i<=nCoeffs;i++) {
3160 int j= s->intra_scantable.permutated[i];
3165 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3168 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3175 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3176 int16_t *block, int n, int qscale)
3178 int i, level, nCoeffs;
3179 const uint16_t *quant_matrix;
3182 if(s->alternate_scan) nCoeffs= 63;
3183 else nCoeffs= s->block_last_index[n];
3185 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3187 quant_matrix = s->intra_matrix;
3188 for(i=1;i<=nCoeffs;i++) {
3189 int j= s->intra_scantable.permutated[i];
3194 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3197 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3206 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3207 int16_t *block, int n, int qscale)
3209 int i, level, nCoeffs;
3210 const uint16_t *quant_matrix;
3213 if(s->alternate_scan) nCoeffs= 63;
3214 else nCoeffs= s->block_last_index[n];
3216 quant_matrix = s->inter_matrix;
3217 for(i=0; i<=nCoeffs; i++) {
3218 int j= s->intra_scantable.permutated[i];
3223 level = (((level << 1) + 1) * qscale *
3224 ((int) (quant_matrix[j]))) >> 4;
3227 level = (((level << 1) + 1) * qscale *
3228 ((int) (quant_matrix[j]))) >> 4;
3237 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3238 int16_t *block, int n, int qscale)
3240 int i, level, qmul, qadd;
3243 assert(s->block_last_index[n]>=0);
3248 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3249 qadd = (qscale - 1) | 1;
3256 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3258 for(i=1; i<=nCoeffs; i++) {
3262 level = level * qmul - qadd;
3264 level = level * qmul + qadd;
3271 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3272 int16_t *block, int n, int qscale)
3274 int i, level, qmul, qadd;
3277 assert(s->block_last_index[n]>=0);
3279 qadd = (qscale - 1) | 1;
3282 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3284 for(i=0; i<=nCoeffs; i++) {
3288 level = level * qmul - qadd;
3290 level = level * qmul + qadd;
3298 * set qscale and update qscale dependent variables.
3300 void ff_set_qscale(MpegEncContext * s, int qscale)
3304 else if (qscale > 31)
3308 s->chroma_qscale= s->chroma_qscale_table[qscale];
3310 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3311 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3314 void ff_MPV_report_decode_progress(MpegEncContext *s)
3316 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3317 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3320 #if CONFIG_ERROR_RESILIENCE
3321 void ff_mpeg_er_frame_start(MpegEncContext *s)
3323 ERContext *er = &s->er;
3325 er->cur_pic = s->current_picture_ptr;
3326 er->last_pic = s->last_picture_ptr;
3327 er->next_pic = s->next_picture_ptr;
3329 er->pp_time = s->pp_time;
3330 er->pb_time = s->pb_time;
3331 er->quarter_sample = s->quarter_sample;
3332 er->partitioned_frame = s->partitioned_frame;
3334 ff_er_frame_start(er);
3336 #endif /* CONFIG_ERROR_RESILIENCE */