2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
35 #include "h264chroma.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
130 MpegEncContext *s = opaque;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
138 memcpy(s->mv, mv, sizeof(*mv));
140 ff_init_block_index(s);
141 ff_update_block_index(s);
143 s->dsp.clear_blocks(s->block[0]);
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
150 ff_MPV_decode_mb(s, s->block);
153 /* init common dct for both encoder and decoder */
154 av_cold int ff_dct_common_init(MpegEncContext *s)
156 ff_dsputil_init(&s->dsp, s->avctx);
157 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
158 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
159 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
161 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
162 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
163 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
164 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
165 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
166 if (s->flags & CODEC_FLAG_BITEXACT)
167 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
168 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
171 ff_MPV_common_init_x86(s);
173 ff_MPV_common_init_axp(s);
175 ff_MPV_common_init_arm(s);
177 ff_MPV_common_init_bfin(s);
179 ff_MPV_common_init_ppc(s);
182 /* load & permutate scantables
183 * note: only wmv uses different ones
185 if (s->alternate_scan) {
186 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
187 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
198 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
200 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
202 // edge emu needs blocksize + filter length - 1
203 // (= 17x17 for halfpel / 21x21 for h264)
204 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
205 // at uvlinesize. It supports only YUV420 so 24x24 is enough
206 // linesize * interlaced * MBsize
207 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
210 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
212 s->me.temp = s->me.scratchpad;
213 s->rd_scratchpad = s->me.scratchpad;
214 s->b_scratchpad = s->me.scratchpad;
215 s->obmc_scratchpad = s->me.scratchpad + 16;
219 av_freep(&s->edge_emu_buffer);
220 return AVERROR(ENOMEM);
224 * Allocate a frame buffer
226 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
231 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
233 s->codec_id != AV_CODEC_ID_MSS2)
234 r = ff_thread_get_buffer(s->avctx, &pic->tf,
235 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
237 pic->f.width = s->avctx->width;
238 pic->f.height = s->avctx->height;
239 pic->f.format = s->avctx->pix_fmt;
240 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
243 if (r < 0 || !pic->f.data[0]) {
244 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
249 if (s->avctx->hwaccel) {
250 assert(!pic->hwaccel_picture_private);
251 if (s->avctx->hwaccel->priv_data_size) {
252 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
253 if (!pic->hwaccel_priv_buf) {
254 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
257 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
261 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
262 s->uvlinesize != pic->f.linesize[1])) {
263 av_log(s->avctx, AV_LOG_ERROR,
264 "get_buffer() failed (stride changed)\n");
265 ff_mpeg_unref_picture(s, pic);
269 if (pic->f.linesize[1] != pic->f.linesize[2]) {
270 av_log(s->avctx, AV_LOG_ERROR,
271 "get_buffer() failed (uv stride mismatch)\n");
272 ff_mpeg_unref_picture(s, pic);
276 if (!s->edge_emu_buffer &&
277 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
278 av_log(s->avctx, AV_LOG_ERROR,
279 "get_buffer() failed to allocate context scratch buffers.\n");
280 ff_mpeg_unref_picture(s, pic);
287 static void free_picture_tables(Picture *pic)
291 pic->alloc_mb_width =
292 pic->alloc_mb_height = 0;
294 av_buffer_unref(&pic->mb_var_buf);
295 av_buffer_unref(&pic->mc_mb_var_buf);
296 av_buffer_unref(&pic->mb_mean_buf);
297 av_buffer_unref(&pic->mbskip_table_buf);
298 av_buffer_unref(&pic->qscale_table_buf);
299 av_buffer_unref(&pic->mb_type_buf);
301 for (i = 0; i < 2; i++) {
302 av_buffer_unref(&pic->motion_val_buf[i]);
303 av_buffer_unref(&pic->ref_index_buf[i]);
307 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
309 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
310 const int mb_array_size = s->mb_stride * s->mb_height;
311 const int b8_array_size = s->b8_stride * s->mb_height * 2;
315 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
316 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
317 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
319 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
320 return AVERROR(ENOMEM);
323 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
324 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
326 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
327 return AVERROR(ENOMEM);
330 if (s->out_format == FMT_H263 || s->encoding ||
331 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
332 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
333 int ref_index_size = 4 * mb_array_size;
335 for (i = 0; mv_size && i < 2; i++) {
336 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
338 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339 return AVERROR(ENOMEM);
343 pic->alloc_mb_width = s->mb_width;
344 pic->alloc_mb_height = s->mb_height;
349 static int make_tables_writable(Picture *pic)
352 #define MAKE_WRITABLE(table) \
355 (ret = av_buffer_make_writable(&pic->table)) < 0)\
359 MAKE_WRITABLE(mb_var_buf);
360 MAKE_WRITABLE(mc_mb_var_buf);
361 MAKE_WRITABLE(mb_mean_buf);
362 MAKE_WRITABLE(mbskip_table_buf);
363 MAKE_WRITABLE(qscale_table_buf);
364 MAKE_WRITABLE(mb_type_buf);
366 for (i = 0; i < 2; i++) {
367 MAKE_WRITABLE(motion_val_buf[i]);
368 MAKE_WRITABLE(ref_index_buf[i]);
375 * Allocate a Picture.
376 * The pixels are allocated/set by calling get_buffer() if shared = 0
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
382 if (pic->qscale_table_buf)
383 if ( pic->alloc_mb_width != s->mb_width
384 || pic->alloc_mb_height != s->mb_height)
385 free_picture_tables(pic);
388 assert(pic->f.data[0]);
391 assert(!pic->f.data[0]);
393 if (alloc_frame_buffer(s, pic) < 0)
396 s->linesize = pic->f.linesize[0];
397 s->uvlinesize = pic->f.linesize[1];
400 if (!pic->qscale_table_buf)
401 ret = alloc_picture_tables(s, pic);
403 ret = make_tables_writable(pic);
408 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
409 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410 pic->mb_mean = pic->mb_mean_buf->data;
413 pic->mbskip_table = pic->mbskip_table_buf->data;
414 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
417 if (pic->motion_val_buf[0]) {
418 for (i = 0; i < 2; i++) {
419 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420 pic->ref_index[i] = pic->ref_index_buf[i]->data;
426 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427 ff_mpeg_unref_picture(s, pic);
428 free_picture_tables(pic);
429 return AVERROR(ENOMEM);
433 * Deallocate a picture.
435 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
437 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
440 /* WM Image / Screen codecs allocate internal buffers with different
441 * dimensions / colorspaces; ignore user-defined callbacks for these. */
442 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
443 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
444 s->codec_id != AV_CODEC_ID_MSS2)
445 ff_thread_release_buffer(s->avctx, &pic->tf);
447 av_frame_unref(&pic->f);
449 av_buffer_unref(&pic->hwaccel_priv_buf);
451 if (pic->needs_realloc)
452 free_picture_tables(pic);
454 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
457 static int update_picture_tables(Picture *dst, Picture *src)
461 #define UPDATE_TABLE(table)\
464 (!dst->table || dst->table->buffer != src->table->buffer)) {\
465 av_buffer_unref(&dst->table);\
466 dst->table = av_buffer_ref(src->table);\
468 free_picture_tables(dst);\
469 return AVERROR(ENOMEM);\
474 UPDATE_TABLE(mb_var_buf);
475 UPDATE_TABLE(mc_mb_var_buf);
476 UPDATE_TABLE(mb_mean_buf);
477 UPDATE_TABLE(mbskip_table_buf);
478 UPDATE_TABLE(qscale_table_buf);
479 UPDATE_TABLE(mb_type_buf);
480 for (i = 0; i < 2; i++) {
481 UPDATE_TABLE(motion_val_buf[i]);
482 UPDATE_TABLE(ref_index_buf[i]);
485 dst->mb_var = src->mb_var;
486 dst->mc_mb_var = src->mc_mb_var;
487 dst->mb_mean = src->mb_mean;
488 dst->mbskip_table = src->mbskip_table;
489 dst->qscale_table = src->qscale_table;
490 dst->mb_type = src->mb_type;
491 for (i = 0; i < 2; i++) {
492 dst->motion_val[i] = src->motion_val[i];
493 dst->ref_index[i] = src->ref_index[i];
496 dst->alloc_mb_width = src->alloc_mb_width;
497 dst->alloc_mb_height = src->alloc_mb_height;
502 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
506 av_assert0(!dst->f.buf[0]);
507 av_assert0(src->f.buf[0]);
511 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
515 ret = update_picture_tables(dst, src);
519 if (src->hwaccel_picture_private) {
520 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
521 if (!dst->hwaccel_priv_buf)
523 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
526 dst->field_picture = src->field_picture;
527 dst->mb_var_sum = src->mb_var_sum;
528 dst->mc_mb_var_sum = src->mc_mb_var_sum;
529 dst->b_frame_score = src->b_frame_score;
530 dst->needs_realloc = src->needs_realloc;
531 dst->reference = src->reference;
532 dst->shared = src->shared;
536 ff_mpeg_unref_picture(s, dst);
540 static int init_duplicate_context(MpegEncContext *s)
542 int y_size = s->b8_stride * (2 * s->mb_height + 1);
543 int c_size = s->mb_stride * (s->mb_height + 1);
544 int yc_size = y_size + 2 * c_size;
552 s->obmc_scratchpad = NULL;
555 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
556 ME_MAP_SIZE * sizeof(uint32_t), fail)
557 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
558 ME_MAP_SIZE * sizeof(uint32_t), fail)
559 if (s->avctx->noise_reduction) {
560 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
561 2 * 64 * sizeof(int), fail)
564 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
565 s->block = s->blocks[0];
567 for (i = 0; i < 12; i++) {
568 s->pblocks[i] = &s->block[i];
571 if (s->out_format == FMT_H263) {
573 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
574 yc_size * sizeof(int16_t) * 16, fail);
575 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
576 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
577 s->ac_val[2] = s->ac_val[1] + c_size;
582 return -1; // free() through ff_MPV_common_end()
585 static void free_duplicate_context(MpegEncContext *s)
590 av_freep(&s->edge_emu_buffer);
591 av_freep(&s->me.scratchpad);
595 s->obmc_scratchpad = NULL;
597 av_freep(&s->dct_error_sum);
598 av_freep(&s->me.map);
599 av_freep(&s->me.score_map);
600 av_freep(&s->blocks);
601 av_freep(&s->ac_val_base);
605 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
607 #define COPY(a) bak->a = src->a
608 COPY(edge_emu_buffer);
613 COPY(obmc_scratchpad);
620 COPY(me.map_generation);
632 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
636 // FIXME copy only needed parts
638 backup_duplicate_context(&bak, dst);
639 memcpy(dst, src, sizeof(MpegEncContext));
640 backup_duplicate_context(dst, &bak);
641 for (i = 0; i < 12; i++) {
642 dst->pblocks[i] = &dst->block[i];
644 if (!dst->edge_emu_buffer &&
645 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
646 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
647 "scratch buffers.\n");
650 // STOP_TIMER("update_duplicate_context")
651 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
655 int ff_mpeg_update_thread_context(AVCodecContext *dst,
656 const AVCodecContext *src)
659 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
666 // FIXME can parameters change on I-frames?
667 // in that case dst may need a reinit
668 if (!s->context_initialized) {
669 memcpy(s, s1, sizeof(MpegEncContext));
672 s->bitstream_buffer = NULL;
673 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
675 if (s1->context_initialized){
676 // s->picture_range_start += MAX_PICTURE_COUNT;
677 // s->picture_range_end += MAX_PICTURE_COUNT;
678 if((ret = ff_MPV_common_init(s)) < 0){
679 memset(s, 0, sizeof(MpegEncContext));
686 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
687 s->context_reinit = 0;
688 s->height = s1->height;
689 s->width = s1->width;
690 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
694 s->avctx->coded_height = s1->avctx->coded_height;
695 s->avctx->coded_width = s1->avctx->coded_width;
696 s->avctx->width = s1->avctx->width;
697 s->avctx->height = s1->avctx->height;
699 s->coded_picture_number = s1->coded_picture_number;
700 s->picture_number = s1->picture_number;
701 s->input_picture_number = s1->input_picture_number;
703 av_assert0(!s->picture || s->picture != s1->picture);
705 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
706 ff_mpeg_unref_picture(s, &s->picture[i]);
707 if (s1->picture[i].f.data[0] &&
708 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
712 #define UPDATE_PICTURE(pic)\
714 ff_mpeg_unref_picture(s, &s->pic);\
715 if (s1->pic.f.data[0])\
716 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
718 ret = update_picture_tables(&s->pic, &s1->pic);\
723 UPDATE_PICTURE(current_picture);
724 UPDATE_PICTURE(last_picture);
725 UPDATE_PICTURE(next_picture);
727 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
728 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
729 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
731 // Error/bug resilience
732 s->next_p_frame_damaged = s1->next_p_frame_damaged;
733 s->workaround_bugs = s1->workaround_bugs;
734 s->padding_bug_score = s1->padding_bug_score;
737 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
738 (char *) &s1->shape - (char *) &s1->time_increment_bits);
741 s->max_b_frames = s1->max_b_frames;
742 s->low_delay = s1->low_delay;
743 s->droppable = s1->droppable;
745 // DivX handling (doesn't work)
746 s->divx_packed = s1->divx_packed;
748 if (s1->bitstream_buffer) {
749 if (s1->bitstream_buffer_size +
750 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
751 av_fast_malloc(&s->bitstream_buffer,
752 &s->allocated_bitstream_buffer_size,
753 s1->allocated_bitstream_buffer_size);
754 s->bitstream_buffer_size = s1->bitstream_buffer_size;
755 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
756 s1->bitstream_buffer_size);
757 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
758 FF_INPUT_BUFFER_PADDING_SIZE);
761 // linesize dependend scratch buffer allocation
762 if (!s->edge_emu_buffer)
764 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
765 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
766 "scratch buffers.\n");
767 return AVERROR(ENOMEM);
770 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
771 "be allocated due to unknown size.\n");
774 // MPEG2/interlacing info
775 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
776 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
778 if (!s1->first_field) {
779 s->last_pict_type = s1->pict_type;
780 if (s1->current_picture_ptr)
781 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
783 if (s1->pict_type != AV_PICTURE_TYPE_B) {
784 s->last_non_b_pict_type = s1->pict_type;
792 * Set the given MpegEncContext to common defaults
793 * (same for encoding and decoding).
794 * The changed fields will not depend upon the
795 * prior state of the MpegEncContext.
797 void ff_MPV_common_defaults(MpegEncContext *s)
799 s->y_dc_scale_table =
800 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
801 s->chroma_qscale_table = ff_default_chroma_qscale_table;
802 s->progressive_frame = 1;
803 s->progressive_sequence = 1;
804 s->picture_structure = PICT_FRAME;
806 s->coded_picture_number = 0;
807 s->picture_number = 0;
808 s->input_picture_number = 0;
810 s->picture_in_gop_number = 0;
815 s->slice_context_count = 1;
819 * Set the given MpegEncContext to defaults for decoding.
820 * the changed fields will not depend upon
821 * the prior state of the MpegEncContext.
823 void ff_MPV_decode_defaults(MpegEncContext *s)
825 ff_MPV_common_defaults(s);
828 static int init_er(MpegEncContext *s)
830 ERContext *er = &s->er;
831 int mb_array_size = s->mb_height * s->mb_stride;
834 er->avctx = s->avctx;
837 er->mb_index2xy = s->mb_index2xy;
838 er->mb_num = s->mb_num;
839 er->mb_width = s->mb_width;
840 er->mb_height = s->mb_height;
841 er->mb_stride = s->mb_stride;
842 er->b8_stride = s->b8_stride;
844 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
845 er->error_status_table = av_mallocz(mb_array_size);
846 if (!er->er_temp_buffer || !er->error_status_table)
849 er->mbskip_table = s->mbskip_table;
850 er->mbintra_table = s->mbintra_table;
852 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
853 er->dc_val[i] = s->dc_val[i];
855 er->decode_mb = mpeg_er_decode_mb;
860 av_freep(&er->er_temp_buffer);
861 av_freep(&er->error_status_table);
862 return AVERROR(ENOMEM);
866 * Initialize and allocates MpegEncContext fields dependent on the resolution.
868 static int init_context_frame(MpegEncContext *s)
870 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
872 s->mb_width = (s->width + 15) / 16;
873 s->mb_stride = s->mb_width + 1;
874 s->b8_stride = s->mb_width * 2 + 1;
875 s->b4_stride = s->mb_width * 4 + 1;
876 mb_array_size = s->mb_height * s->mb_stride;
877 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
879 /* set default edge pos, will be overriden
880 * in decode_header if needed */
881 s->h_edge_pos = s->mb_width * 16;
882 s->v_edge_pos = s->mb_height * 16;
884 s->mb_num = s->mb_width * s->mb_height;
889 s->block_wrap[3] = s->b8_stride;
891 s->block_wrap[5] = s->mb_stride;
893 y_size = s->b8_stride * (2 * s->mb_height + 1);
894 c_size = s->mb_stride * (s->mb_height + 1);
895 yc_size = y_size + 2 * c_size;
897 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
898 for (y = 0; y < s->mb_height; y++)
899 for (x = 0; x < s->mb_width; x++)
900 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
902 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
905 /* Allocate MV tables */
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
907 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
911 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
912 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
913 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
914 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
915 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
916 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
917 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
919 /* Allocate MB type table */
920 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
922 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
924 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
925 mb_array_size * sizeof(float), fail);
926 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
927 mb_array_size * sizeof(float), fail);
931 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
932 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
933 /* interlaced direct mode decoding tables */
934 for (i = 0; i < 2; i++) {
936 for (j = 0; j < 2; j++) {
937 for (k = 0; k < 2; k++) {
938 FF_ALLOCZ_OR_GOTO(s->avctx,
939 s->b_field_mv_table_base[i][j][k],
940 mv_table_size * 2 * sizeof(int16_t),
942 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
946 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
947 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
952 if (s->out_format == FMT_H263) {
954 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
955 s->coded_block = s->coded_block_base + s->b8_stride + 1;
957 /* cbp, ac_pred, pred_dir */
958 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
959 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
962 if (s->h263_pred || s->h263_plus || !s->encoding) {
964 // MN: we need these for error resilience of intra-frames
965 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
966 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
967 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
968 s->dc_val[2] = s->dc_val[1] + c_size;
969 for (i = 0; i < yc_size; i++)
970 s->dc_val_base[i] = 1024;
973 /* which mb is a intra block */
974 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
975 memset(s->mbintra_table, 1, mb_array_size);
977 /* init macroblock skip table */
978 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
979 // Note the + 1 is for a quicker mpeg4 slice_end detection
983 return AVERROR(ENOMEM);
987 * init common structure for both encoder and decoder.
988 * this assumes that some variables like width/height are already set
990 av_cold int ff_MPV_common_init(MpegEncContext *s)
993 int nb_slices = (HAVE_THREADS &&
994 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
995 s->avctx->thread_count : 1;
997 if (s->encoding && s->avctx->slices)
998 nb_slices = s->avctx->slices;
1000 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1001 s->mb_height = (s->height + 31) / 32 * 2;
1003 s->mb_height = (s->height + 15) / 16;
1005 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1006 av_log(s->avctx, AV_LOG_ERROR,
1007 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1011 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1014 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1016 max_slices = MAX_THREADS;
1017 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1018 " reducing to %d\n", nb_slices, max_slices);
1019 nb_slices = max_slices;
1022 if ((s->width || s->height) &&
1023 av_image_check_size(s->width, s->height, 0, s->avctx))
1026 ff_dct_common_init(s);
1028 s->flags = s->avctx->flags;
1029 s->flags2 = s->avctx->flags2;
1031 /* set chroma shifts */
1032 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
1034 /* convert fourcc to upper case */
1035 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1036 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1038 s->avctx->coded_frame = &s->current_picture.f;
1041 if (s->msmpeg4_version) {
1042 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1043 2 * 2 * (MAX_LEVEL + 1) *
1044 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1046 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1048 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1057 if (s->avctx->noise_reduction) {
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1062 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1063 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1064 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1065 avcodec_get_frame_defaults(&s->picture[i].f);
1067 memset(&s->next_picture, 0, sizeof(s->next_picture));
1068 memset(&s->last_picture, 0, sizeof(s->last_picture));
1069 memset(&s->current_picture, 0, sizeof(s->current_picture));
1070 avcodec_get_frame_defaults(&s->next_picture.f);
1071 avcodec_get_frame_defaults(&s->last_picture.f);
1072 avcodec_get_frame_defaults(&s->current_picture.f);
1074 if (init_context_frame(s))
1077 s->parse_context.state = -1;
1079 s->context_initialized = 1;
1080 s->thread_context[0] = s;
1082 // if (s->width && s->height) {
1083 if (nb_slices > 1) {
1084 for (i = 1; i < nb_slices; i++) {
1085 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1086 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1089 for (i = 0; i < nb_slices; i++) {
1090 if (init_duplicate_context(s->thread_context[i]) < 0)
1092 s->thread_context[i]->start_mb_y =
1093 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1094 s->thread_context[i]->end_mb_y =
1095 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1098 if (init_duplicate_context(s) < 0)
1101 s->end_mb_y = s->mb_height;
1103 s->slice_context_count = nb_slices;
1108 ff_MPV_common_end(s);
1113 * Frees and resets MpegEncContext fields depending on the resolution.
1114 * Is used during resolution changes to avoid a full reinitialization of the
1117 static int free_context_frame(MpegEncContext *s)
1121 av_freep(&s->mb_type);
1122 av_freep(&s->p_mv_table_base);
1123 av_freep(&s->b_forw_mv_table_base);
1124 av_freep(&s->b_back_mv_table_base);
1125 av_freep(&s->b_bidir_forw_mv_table_base);
1126 av_freep(&s->b_bidir_back_mv_table_base);
1127 av_freep(&s->b_direct_mv_table_base);
1128 s->p_mv_table = NULL;
1129 s->b_forw_mv_table = NULL;
1130 s->b_back_mv_table = NULL;
1131 s->b_bidir_forw_mv_table = NULL;
1132 s->b_bidir_back_mv_table = NULL;
1133 s->b_direct_mv_table = NULL;
1134 for (i = 0; i < 2; i++) {
1135 for (j = 0; j < 2; j++) {
1136 for (k = 0; k < 2; k++) {
1137 av_freep(&s->b_field_mv_table_base[i][j][k]);
1138 s->b_field_mv_table[i][j][k] = NULL;
1140 av_freep(&s->b_field_select_table[i][j]);
1141 av_freep(&s->p_field_mv_table_base[i][j]);
1142 s->p_field_mv_table[i][j] = NULL;
1144 av_freep(&s->p_field_select_table[i]);
1147 av_freep(&s->dc_val_base);
1148 av_freep(&s->coded_block_base);
1149 av_freep(&s->mbintra_table);
1150 av_freep(&s->cbp_table);
1151 av_freep(&s->pred_dir_table);
1153 av_freep(&s->mbskip_table);
1155 av_freep(&s->er.error_status_table);
1156 av_freep(&s->er.er_temp_buffer);
1157 av_freep(&s->mb_index2xy);
1158 av_freep(&s->lambda_table);
1160 av_freep(&s->cplx_tab);
1161 av_freep(&s->bits_tab);
1163 s->linesize = s->uvlinesize = 0;
1168 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1172 if (s->slice_context_count > 1) {
1173 for (i = 0; i < s->slice_context_count; i++) {
1174 free_duplicate_context(s->thread_context[i]);
1176 for (i = 1; i < s->slice_context_count; i++) {
1177 av_freep(&s->thread_context[i]);
1180 free_duplicate_context(s);
1182 if ((err = free_context_frame(s)) < 0)
1186 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1187 s->picture[i].needs_realloc = 1;
1190 s->last_picture_ptr =
1191 s->next_picture_ptr =
1192 s->current_picture_ptr = NULL;
1195 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1196 s->mb_height = (s->height + 31) / 32 * 2;
1198 s->mb_height = (s->height + 15) / 16;
1200 if ((s->width || s->height) &&
1201 av_image_check_size(s->width, s->height, 0, s->avctx))
1202 return AVERROR_INVALIDDATA;
1204 if ((err = init_context_frame(s)))
1207 s->thread_context[0] = s;
1209 if (s->width && s->height) {
1210 int nb_slices = s->slice_context_count;
1211 if (nb_slices > 1) {
1212 for (i = 1; i < nb_slices; i++) {
1213 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1214 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1217 for (i = 0; i < nb_slices; i++) {
1218 if (init_duplicate_context(s->thread_context[i]) < 0)
1220 s->thread_context[i]->start_mb_y =
1221 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1222 s->thread_context[i]->end_mb_y =
1223 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1226 err = init_duplicate_context(s);
1230 s->end_mb_y = s->mb_height;
1232 s->slice_context_count = nb_slices;
1237 ff_MPV_common_end(s);
1241 /* init common structure for both encoder and decoder */
1242 void ff_MPV_common_end(MpegEncContext *s)
1246 if (s->slice_context_count > 1) {
1247 for (i = 0; i < s->slice_context_count; i++) {
1248 free_duplicate_context(s->thread_context[i]);
1250 for (i = 1; i < s->slice_context_count; i++) {
1251 av_freep(&s->thread_context[i]);
1253 s->slice_context_count = 1;
1254 } else free_duplicate_context(s);
1256 av_freep(&s->parse_context.buffer);
1257 s->parse_context.buffer_size = 0;
1259 av_freep(&s->bitstream_buffer);
1260 s->allocated_bitstream_buffer_size = 0;
1262 av_freep(&s->avctx->stats_out);
1263 av_freep(&s->ac_stats);
1265 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1266 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1267 s->q_chroma_intra_matrix= NULL;
1268 s->q_chroma_intra_matrix16= NULL;
1269 av_freep(&s->q_intra_matrix);
1270 av_freep(&s->q_inter_matrix);
1271 av_freep(&s->q_intra_matrix16);
1272 av_freep(&s->q_inter_matrix16);
1273 av_freep(&s->input_picture);
1274 av_freep(&s->reordered_input_picture);
1275 av_freep(&s->dct_offset);
1278 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1279 free_picture_tables(&s->picture[i]);
1280 ff_mpeg_unref_picture(s, &s->picture[i]);
1283 av_freep(&s->picture);
1284 free_picture_tables(&s->last_picture);
1285 ff_mpeg_unref_picture(s, &s->last_picture);
1286 free_picture_tables(&s->current_picture);
1287 ff_mpeg_unref_picture(s, &s->current_picture);
1288 free_picture_tables(&s->next_picture);
1289 ff_mpeg_unref_picture(s, &s->next_picture);
1290 free_picture_tables(&s->new_picture);
1291 ff_mpeg_unref_picture(s, &s->new_picture);
1293 free_context_frame(s);
1295 s->context_initialized = 0;
1296 s->last_picture_ptr =
1297 s->next_picture_ptr =
1298 s->current_picture_ptr = NULL;
1299 s->linesize = s->uvlinesize = 0;
1302 av_cold void ff_init_rl(RLTable *rl,
1303 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1305 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1306 uint8_t index_run[MAX_RUN + 1];
1307 int last, run, level, start, end, i;
1309 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1310 if (static_store && rl->max_level[0])
1313 /* compute max_level[], max_run[] and index_run[] */
1314 for (last = 0; last < 2; last++) {
1323 memset(max_level, 0, MAX_RUN + 1);
1324 memset(max_run, 0, MAX_LEVEL + 1);
1325 memset(index_run, rl->n, MAX_RUN + 1);
1326 for (i = start; i < end; i++) {
1327 run = rl->table_run[i];
1328 level = rl->table_level[i];
1329 if (index_run[run] == rl->n)
1331 if (level > max_level[run])
1332 max_level[run] = level;
1333 if (run > max_run[level])
1334 max_run[level] = run;
1337 rl->max_level[last] = static_store[last];
1339 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1340 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1342 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1344 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1345 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1347 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1349 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1350 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1354 av_cold void ff_init_vlc_rl(RLTable *rl)
1358 for (q = 0; q < 32; q++) {
1360 int qadd = (q - 1) | 1;
1366 for (i = 0; i < rl->vlc.table_size; i++) {
1367 int code = rl->vlc.table[i][0];
1368 int len = rl->vlc.table[i][1];
1371 if (len == 0) { // illegal code
1374 } else if (len < 0) { // more bits needed
1378 if (code == rl->n) { // esc
1382 run = rl->table_run[code] + 1;
1383 level = rl->table_level[code] * qmul + qadd;
1384 if (code >= rl->last) run += 192;
1387 rl->rl_vlc[q][i].len = len;
1388 rl->rl_vlc[q][i].level = level;
1389 rl->rl_vlc[q][i].run = run;
1394 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1398 /* release non reference frames */
1399 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1400 if (!s->picture[i].reference &&
1401 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1402 ff_mpeg_unref_picture(s, &s->picture[i]);
1407 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1409 if (pic == s->last_picture_ptr)
1411 if (pic->f.data[0] == NULL)
1413 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1418 static int find_unused_picture(MpegEncContext *s, int shared)
1423 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1424 if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1428 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1429 if (pic_is_unused(s, &s->picture[i]))
1434 av_log(s->avctx, AV_LOG_FATAL,
1435 "Internal error, picture buffer overflow\n");
1436 /* We could return -1, but the codec would crash trying to draw into a
1437 * non-existing frame anyway. This is safer than waiting for a random crash.
1438 * Also the return of this is never useful, an encoder must only allocate
1439 * as much as allowed in the specification. This has no relationship to how
1440 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1441 * enough for such valid streams).
1442 * Plus, a decoder has to check stream validity and remove frames if too
1443 * many reference frames are around. Waiting for "OOM" is not correct at
1444 * all. Similarly, missing reference frames have to be replaced by
1445 * interpolated/MC frames, anything else is a bug in the codec ...
1451 int ff_find_unused_picture(MpegEncContext *s, int shared)
1453 int ret = find_unused_picture(s, shared);
1455 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1456 if (s->picture[ret].needs_realloc) {
1457 s->picture[ret].needs_realloc = 0;
1458 free_picture_tables(&s->picture[ret]);
1459 ff_mpeg_unref_picture(s, &s->picture[ret]);
1460 avcodec_get_frame_defaults(&s->picture[ret].f);
1466 static void update_noise_reduction(MpegEncContext *s)
1470 for (intra = 0; intra < 2; intra++) {
1471 if (s->dct_count[intra] > (1 << 16)) {
1472 for (i = 0; i < 64; i++) {
1473 s->dct_error_sum[intra][i] >>= 1;
1475 s->dct_count[intra] >>= 1;
1478 for (i = 0; i < 64; i++) {
1479 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1480 s->dct_count[intra] +
1481 s->dct_error_sum[intra][i] / 2) /
1482 (s->dct_error_sum[intra][i] + 1);
1488 * generic function for encode/decode called after coding/decoding
1489 * the header and before a frame is coded/decoded.
1491 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1497 if (!ff_thread_can_start_frame(avctx)) {
1498 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1502 /* mark & release old frames */
1503 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1504 s->last_picture_ptr != s->next_picture_ptr &&
1505 s->last_picture_ptr->f.data[0]) {
1506 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1509 /* release forgotten pictures */
1510 /* if (mpeg124/h263) */
1512 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1513 if (&s->picture[i] != s->last_picture_ptr &&
1514 &s->picture[i] != s->next_picture_ptr &&
1515 s->picture[i].reference && !s->picture[i].needs_realloc) {
1516 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1517 av_log(avctx, AV_LOG_ERROR,
1518 "releasing zombie picture\n");
1519 ff_mpeg_unref_picture(s, &s->picture[i]);
1524 ff_mpeg_unref_picture(s, &s->current_picture);
1527 ff_release_unused_pictures(s, 1);
1529 if (s->current_picture_ptr &&
1530 s->current_picture_ptr->f.data[0] == NULL) {
1531 // we already have a unused image
1532 // (maybe it was set before reading the header)
1533 pic = s->current_picture_ptr;
1535 i = ff_find_unused_picture(s, 0);
1537 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1540 pic = &s->picture[i];
1544 if (!s->droppable) {
1545 if (s->pict_type != AV_PICTURE_TYPE_B)
1549 pic->f.coded_picture_number = s->coded_picture_number++;
1551 if (ff_alloc_picture(s, pic, 0) < 0)
1554 s->current_picture_ptr = pic;
1555 // FIXME use only the vars from current_pic
1556 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1557 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1558 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1559 if (s->picture_structure != PICT_FRAME)
1560 s->current_picture_ptr->f.top_field_first =
1561 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1563 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1564 !s->progressive_sequence;
1565 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1568 s->current_picture_ptr->f.pict_type = s->pict_type;
1569 // if (s->flags && CODEC_FLAG_QSCALE)
1570 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1571 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1573 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1574 s->current_picture_ptr)) < 0)
1577 if (s->pict_type != AV_PICTURE_TYPE_B) {
1578 s->last_picture_ptr = s->next_picture_ptr;
1580 s->next_picture_ptr = s->current_picture_ptr;
1582 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1583 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1584 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1585 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1586 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1587 s->pict_type, s->droppable);
1589 if ((s->last_picture_ptr == NULL ||
1590 s->last_picture_ptr->f.data[0] == NULL) &&
1591 (s->pict_type != AV_PICTURE_TYPE_I ||
1592 s->picture_structure != PICT_FRAME)) {
1593 int h_chroma_shift, v_chroma_shift;
1594 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1595 &h_chroma_shift, &v_chroma_shift);
1596 if (s->pict_type != AV_PICTURE_TYPE_I)
1597 av_log(avctx, AV_LOG_ERROR,
1598 "warning: first frame is no keyframe\n");
1599 else if (s->picture_structure != PICT_FRAME)
1600 av_log(avctx, AV_LOG_INFO,
1601 "allocate dummy last picture for field based first keyframe\n");
1603 /* Allocate a dummy frame */
1604 i = ff_find_unused_picture(s, 0);
1606 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1609 s->last_picture_ptr = &s->picture[i];
1610 s->last_picture_ptr->f.key_frame = 0;
1611 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1612 s->last_picture_ptr = NULL;
1616 memset(s->last_picture_ptr->f.data[0], 0x80,
1617 avctx->height * s->last_picture_ptr->f.linesize[0]);
1618 memset(s->last_picture_ptr->f.data[1], 0x80,
1619 (avctx->height >> v_chroma_shift) *
1620 s->last_picture_ptr->f.linesize[1]);
1621 memset(s->last_picture_ptr->f.data[2], 0x80,
1622 (avctx->height >> v_chroma_shift) *
1623 s->last_picture_ptr->f.linesize[2]);
1625 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1626 for(i=0; i<avctx->height; i++)
1627 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1630 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1631 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1633 if ((s->next_picture_ptr == NULL ||
1634 s->next_picture_ptr->f.data[0] == NULL) &&
1635 s->pict_type == AV_PICTURE_TYPE_B) {
1636 /* Allocate a dummy frame */
1637 i = ff_find_unused_picture(s, 0);
1639 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1642 s->next_picture_ptr = &s->picture[i];
1643 s->next_picture_ptr->f.key_frame = 0;
1644 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1645 s->next_picture_ptr = NULL;
1648 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1649 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1652 #if 0 // BUFREF-FIXME
1653 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1654 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1656 if (s->last_picture_ptr) {
1657 ff_mpeg_unref_picture(s, &s->last_picture);
1658 if (s->last_picture_ptr->f.data[0] &&
1659 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1660 s->last_picture_ptr)) < 0)
1663 if (s->next_picture_ptr) {
1664 ff_mpeg_unref_picture(s, &s->next_picture);
1665 if (s->next_picture_ptr->f.data[0] &&
1666 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1667 s->next_picture_ptr)) < 0)
1671 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1672 s->last_picture_ptr->f.data[0]));
1674 if (s->picture_structure!= PICT_FRAME) {
1676 for (i = 0; i < 4; i++) {
1677 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1678 s->current_picture.f.data[i] +=
1679 s->current_picture.f.linesize[i];
1681 s->current_picture.f.linesize[i] *= 2;
1682 s->last_picture.f.linesize[i] *= 2;
1683 s->next_picture.f.linesize[i] *= 2;
1687 s->err_recognition = avctx->err_recognition;
1689 /* set dequantizer, we can't do it during init as
1690 * it might change for mpeg4 and we can't do it in the header
1691 * decode as init is not called for mpeg4 there yet */
1692 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1693 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1694 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1695 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1696 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1697 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1699 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1700 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1703 if (s->dct_error_sum) {
1704 av_assert2(s->avctx->noise_reduction && s->encoding);
1705 update_noise_reduction(s);
1708 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1709 return ff_xvmc_field_start(s, avctx);
1714 /* generic function for encode/decode called after a
1715 * frame has been coded/decoded. */
1716 void ff_MPV_frame_end(MpegEncContext *s)
1718 /* redraw edges for the frame if decoding didn't complete */
1719 // just to make sure that all data is rendered.
1720 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1721 ff_xvmc_field_end(s);
1722 } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1723 !s->avctx->hwaccel &&
1724 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1725 s->unrestricted_mv &&
1726 s->current_picture.reference &&
1728 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1731 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1732 int hshift = desc->log2_chroma_w;
1733 int vshift = desc->log2_chroma_h;
1734 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1735 s->h_edge_pos, s->v_edge_pos,
1736 EDGE_WIDTH, EDGE_WIDTH,
1737 EDGE_TOP | EDGE_BOTTOM);
1738 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1739 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1740 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1741 EDGE_TOP | EDGE_BOTTOM);
1742 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1743 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1744 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1745 EDGE_TOP | EDGE_BOTTOM);
1750 s->last_pict_type = s->pict_type;
1751 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1752 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1753 s->last_non_b_pict_type = s->pict_type;
1756 /* copy back current_picture variables */
1757 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1758 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1759 s->picture[i] = s->current_picture;
1763 assert(i < MAX_PICTURE_COUNT);
1766 // clear copies, to avoid confusion
1768 memset(&s->last_picture, 0, sizeof(Picture));
1769 memset(&s->next_picture, 0, sizeof(Picture));
1770 memset(&s->current_picture, 0, sizeof(Picture));
1772 s->avctx->coded_frame = &s->current_picture_ptr->f;
1774 if (s->current_picture.reference)
1775 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1779 * Draw a line from (ex, ey) -> (sx, sy).
1780 * @param w width of the image
1781 * @param h height of the image
1782 * @param stride stride/linesize of the image
1783 * @param color color of the arrow
1785 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1786 int w, int h, int stride, int color)
1790 sx = av_clip(sx, 0, w - 1);
1791 sy = av_clip(sy, 0, h - 1);
1792 ex = av_clip(ex, 0, w - 1);
1793 ey = av_clip(ey, 0, h - 1);
1795 buf[sy * stride + sx] += color;
1797 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1799 FFSWAP(int, sx, ex);
1800 FFSWAP(int, sy, ey);
1802 buf += sx + sy * stride;
1804 f = ((ey - sy) << 16) / ex;
1805 for (x = 0; x <= ex; x++) {
1807 fr = (x * f) & 0xFFFF;
1808 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1809 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1813 FFSWAP(int, sx, ex);
1814 FFSWAP(int, sy, ey);
1816 buf += sx + sy * stride;
1819 f = ((ex - sx) << 16) / ey;
1822 for(y= 0; y <= ey; y++){
1824 fr = (y*f) & 0xFFFF;
1825 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1826 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1832 * Draw an arrow from (ex, ey) -> (sx, sy).
1833 * @param w width of the image
1834 * @param h height of the image
1835 * @param stride stride/linesize of the image
1836 * @param color color of the arrow
1838 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1839 int ey, int w, int h, int stride, int color)
1843 sx = av_clip(sx, -100, w + 100);
1844 sy = av_clip(sy, -100, h + 100);
1845 ex = av_clip(ex, -100, w + 100);
1846 ey = av_clip(ey, -100, h + 100);
1851 if (dx * dx + dy * dy > 3 * 3) {
1854 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1856 // FIXME subpixel accuracy
1857 rx = ROUNDED_DIV(rx * 3 << 4, length);
1858 ry = ROUNDED_DIV(ry * 3 << 4, length);
1860 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1861 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1863 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1867 * Print debugging info for the given picture.
1869 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1871 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1873 if (avctx->hwaccel || !p || !p->mb_type
1874 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1878 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1881 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1882 av_get_picture_type_char(pict->pict_type));
1883 for (y = 0; y < mb_height; y++) {
1884 for (x = 0; x < mb_width; x++) {
1885 if (avctx->debug & FF_DEBUG_SKIP) {
1886 int count = mbskip_table[x + y * mb_stride];
1889 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1891 if (avctx->debug & FF_DEBUG_QP) {
1892 av_log(avctx, AV_LOG_DEBUG, "%2d",
1893 p->qscale_table[x + y * mb_stride]);
1895 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1896 int mb_type = p->mb_type[x + y * mb_stride];
1897 // Type & MV direction
1898 if (IS_PCM(mb_type))
1899 av_log(avctx, AV_LOG_DEBUG, "P");
1900 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1901 av_log(avctx, AV_LOG_DEBUG, "A");
1902 else if (IS_INTRA4x4(mb_type))
1903 av_log(avctx, AV_LOG_DEBUG, "i");
1904 else if (IS_INTRA16x16(mb_type))
1905 av_log(avctx, AV_LOG_DEBUG, "I");
1906 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1907 av_log(avctx, AV_LOG_DEBUG, "d");
1908 else if (IS_DIRECT(mb_type))
1909 av_log(avctx, AV_LOG_DEBUG, "D");
1910 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1911 av_log(avctx, AV_LOG_DEBUG, "g");
1912 else if (IS_GMC(mb_type))
1913 av_log(avctx, AV_LOG_DEBUG, "G");
1914 else if (IS_SKIP(mb_type))
1915 av_log(avctx, AV_LOG_DEBUG, "S");
1916 else if (!USES_LIST(mb_type, 1))
1917 av_log(avctx, AV_LOG_DEBUG, ">");
1918 else if (!USES_LIST(mb_type, 0))
1919 av_log(avctx, AV_LOG_DEBUG, "<");
1921 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1922 av_log(avctx, AV_LOG_DEBUG, "X");
1926 if (IS_8X8(mb_type))
1927 av_log(avctx, AV_LOG_DEBUG, "+");
1928 else if (IS_16X8(mb_type))
1929 av_log(avctx, AV_LOG_DEBUG, "-");
1930 else if (IS_8X16(mb_type))
1931 av_log(avctx, AV_LOG_DEBUG, "|");
1932 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1933 av_log(avctx, AV_LOG_DEBUG, " ");
1935 av_log(avctx, AV_LOG_DEBUG, "?");
1938 if (IS_INTERLACED(mb_type))
1939 av_log(avctx, AV_LOG_DEBUG, "=");
1941 av_log(avctx, AV_LOG_DEBUG, " ");
1944 av_log(avctx, AV_LOG_DEBUG, "\n");
1948 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1949 (avctx->debug_mv)) {
1950 const int shift = 1 + quarter_sample;
1954 int h_chroma_shift, v_chroma_shift, block_height;
1955 const int width = avctx->width;
1956 const int height = avctx->height;
1957 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1958 const int mv_stride = (mb_width << mv_sample_log2) +
1959 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1961 *low_delay = 0; // needed to see the vectors without trashing the buffers
1963 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1965 av_frame_make_writable(pict);
1967 pict->opaque = NULL;
1968 ptr = pict->data[0];
1969 block_height = 16 >> v_chroma_shift;
1971 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1973 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1974 const int mb_index = mb_x + mb_y * mb_stride;
1975 if ((avctx->debug_mv) && p->motion_val[0]) {
1977 for (type = 0; type < 3; type++) {
1981 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1982 (pict->pict_type!= AV_PICTURE_TYPE_P))
1987 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1988 (pict->pict_type!= AV_PICTURE_TYPE_B))
1993 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1994 (pict->pict_type!= AV_PICTURE_TYPE_B))
1999 if (!USES_LIST(p->mb_type[mb_index], direction))
2002 if (IS_8X8(p->mb_type[mb_index])) {
2004 for (i = 0; i < 4; i++) {
2005 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2006 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2007 int xy = (mb_x * 2 + (i & 1) +
2008 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2009 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2010 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2011 draw_arrow(ptr, sx, sy, mx, my, width,
2012 height, pict->linesize[0], 100);
2014 } else if (IS_16X8(p->mb_type[mb_index])) {
2016 for (i = 0; i < 2; i++) {
2017 int sx = mb_x * 16 + 8;
2018 int sy = mb_y * 16 + 4 + 8 * i;
2019 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2020 int mx = (p->motion_val[direction][xy][0] >> shift);
2021 int my = (p->motion_val[direction][xy][1] >> shift);
2023 if (IS_INTERLACED(p->mb_type[mb_index]))
2026 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2027 height, pict->linesize[0], 100);
2029 } else if (IS_8X16(p->mb_type[mb_index])) {
2031 for (i = 0; i < 2; i++) {
2032 int sx = mb_x * 16 + 4 + 8 * i;
2033 int sy = mb_y * 16 + 8;
2034 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2035 int mx = p->motion_val[direction][xy][0] >> shift;
2036 int my = p->motion_val[direction][xy][1] >> shift;
2038 if (IS_INTERLACED(p->mb_type[mb_index]))
2041 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2042 height, pict->linesize[0], 100);
2045 int sx= mb_x * 16 + 8;
2046 int sy= mb_y * 16 + 8;
2047 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2048 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2049 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2050 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2054 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2055 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2056 0x0101010101010101ULL;
2058 for (y = 0; y < block_height; y++) {
2059 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2060 (block_height * mb_y + y) *
2061 pict->linesize[1]) = c;
2062 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2063 (block_height * mb_y + y) *
2064 pict->linesize[2]) = c;
2067 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2069 int mb_type = p->mb_type[mb_index];
2072 #define COLOR(theta, r) \
2073 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2074 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2078 if (IS_PCM(mb_type)) {
2080 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2081 IS_INTRA16x16(mb_type)) {
2083 } else if (IS_INTRA4x4(mb_type)) {
2085 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2087 } else if (IS_DIRECT(mb_type)) {
2089 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2091 } else if (IS_GMC(mb_type)) {
2093 } else if (IS_SKIP(mb_type)) {
2095 } else if (!USES_LIST(mb_type, 1)) {
2097 } else if (!USES_LIST(mb_type, 0)) {
2100 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2104 u *= 0x0101010101010101ULL;
2105 v *= 0x0101010101010101ULL;
2106 for (y = 0; y < block_height; y++) {
2107 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2108 (block_height * mb_y + y) * pict->linesize[1]) = u;
2109 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2110 (block_height * mb_y + y) * pict->linesize[2]) = v;
2114 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2115 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2116 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2117 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2118 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2120 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2121 for (y = 0; y < 16; y++)
2122 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2123 pict->linesize[0]] ^= 0x80;
2125 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2126 int dm = 1 << (mv_sample_log2 - 2);
2127 for (i = 0; i < 4; i++) {
2128 int sx = mb_x * 16 + 8 * (i & 1);
2129 int sy = mb_y * 16 + 8 * (i >> 1);
2130 int xy = (mb_x * 2 + (i & 1) +
2131 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2133 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2134 if (mv[0] != mv[dm] ||
2135 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2136 for (y = 0; y < 8; y++)
2137 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2138 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2139 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2140 pict->linesize[0]) ^= 0x8080808080808080ULL;
2144 if (IS_INTERLACED(mb_type) &&
2145 avctx->codec->id == AV_CODEC_ID_H264) {
2149 mbskip_table[mb_index] = 0;
2155 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2157 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2158 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2161 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2163 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2164 int offset = 2*s->mb_stride + 1;
2166 return AVERROR(ENOMEM);
2167 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2168 ref->size -= offset;
2169 ref->data += offset;
2170 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2173 static inline int hpel_motion_lowres(MpegEncContext *s,
2174 uint8_t *dest, uint8_t *src,
2175 int field_based, int field_select,
2176 int src_x, int src_y,
2177 int width, int height, int stride,
2178 int h_edge_pos, int v_edge_pos,
2179 int w, int h, h264_chroma_mc_func *pix_op,
2180 int motion_x, int motion_y)
2182 const int lowres = s->avctx->lowres;
2183 const int op_index = FFMIN(lowres, 3);
2184 const int s_mask = (2 << lowres) - 1;
2188 if (s->quarter_sample) {
2193 sx = motion_x & s_mask;
2194 sy = motion_y & s_mask;
2195 src_x += motion_x >> lowres + 1;
2196 src_y += motion_y >> lowres + 1;
2198 src += src_y * stride + src_x;
2200 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2201 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2202 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2203 (h + 1) << field_based, src_x,
2204 src_y << field_based,
2207 src = s->edge_emu_buffer;
2211 sx = (sx << 2) >> lowres;
2212 sy = (sy << 2) >> lowres;
2215 pix_op[op_index](dest, src, stride, h, sx, sy);
2219 /* apply one mpeg motion vector to the three components */
2220 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2227 uint8_t **ref_picture,
2228 h264_chroma_mc_func *pix_op,
2229 int motion_x, int motion_y,
2232 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2233 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2235 const int lowres = s->avctx->lowres;
2236 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2237 const int block_s = 8>>lowres;
2238 const int s_mask = (2 << lowres) - 1;
2239 const int h_edge_pos = s->h_edge_pos >> lowres;
2240 const int v_edge_pos = s->v_edge_pos >> lowres;
2241 linesize = s->current_picture.f.linesize[0] << field_based;
2242 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2244 // FIXME obviously not perfect but qpel will not work in lowres anyway
2245 if (s->quarter_sample) {
2251 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2254 sx = motion_x & s_mask;
2255 sy = motion_y & s_mask;
2256 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2257 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2259 if (s->out_format == FMT_H263) {
2260 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2261 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2262 uvsrc_x = src_x >> 1;
2263 uvsrc_y = src_y >> 1;
2264 } else if (s->out_format == FMT_H261) {
2265 // even chroma mv's are full pel in H261
2268 uvsx = (2 * mx) & s_mask;
2269 uvsy = (2 * my) & s_mask;
2270 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2271 uvsrc_y = mb_y * block_s + (my >> lowres);
2273 if(s->chroma_y_shift){
2278 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2279 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2281 if(s->chroma_x_shift){
2285 uvsy = motion_y & s_mask;
2287 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2290 uvsx = motion_x & s_mask;
2291 uvsy = motion_y & s_mask;
2298 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2299 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2300 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2302 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2303 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2304 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2305 linesize >> field_based, 17, 17 + field_based,
2306 src_x, src_y << field_based, h_edge_pos,
2308 ptr_y = s->edge_emu_buffer;
2309 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2310 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2311 s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2313 uvsrc_x, uvsrc_y << field_based,
2314 h_edge_pos >> 1, v_edge_pos >> 1);
2315 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2317 uvsrc_x, uvsrc_y << field_based,
2318 h_edge_pos >> 1, v_edge_pos >> 1);
2320 ptr_cr = uvbuf + 16;
2324 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2326 dest_y += s->linesize;
2327 dest_cb += s->uvlinesize;
2328 dest_cr += s->uvlinesize;
2332 ptr_y += s->linesize;
2333 ptr_cb += s->uvlinesize;
2334 ptr_cr += s->uvlinesize;
2337 sx = (sx << 2) >> lowres;
2338 sy = (sy << 2) >> lowres;
2339 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2341 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2342 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2343 uvsx = (uvsx << 2) >> lowres;
2344 uvsy = (uvsy << 2) >> lowres;
2346 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2347 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2350 // FIXME h261 lowres loop filter
2353 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2354 uint8_t *dest_cb, uint8_t *dest_cr,
2355 uint8_t **ref_picture,
2356 h264_chroma_mc_func * pix_op,
2359 const int lowres = s->avctx->lowres;
2360 const int op_index = FFMIN(lowres, 3);
2361 const int block_s = 8 >> lowres;
2362 const int s_mask = (2 << lowres) - 1;
2363 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2364 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2365 int emu = 0, src_x, src_y, offset, sx, sy;
2368 if (s->quarter_sample) {
2373 /* In case of 8X8, we construct a single chroma motion vector
2374 with a special rounding */
2375 mx = ff_h263_round_chroma(mx);
2376 my = ff_h263_round_chroma(my);
2380 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2381 src_y = s->mb_y * block_s + (my >> lowres + 1);
2383 offset = src_y * s->uvlinesize + src_x;
2384 ptr = ref_picture[1] + offset;
2385 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2386 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2387 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2388 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2389 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2390 ptr = s->edge_emu_buffer;
2394 sx = (sx << 2) >> lowres;
2395 sy = (sy << 2) >> lowres;
2396 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2398 ptr = ref_picture[2] + offset;
2400 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2401 src_x, src_y, h_edge_pos, v_edge_pos);
2402 ptr = s->edge_emu_buffer;
2404 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2408 * motion compensation of a single macroblock
2410 * @param dest_y luma destination pointer
2411 * @param dest_cb chroma cb/u destination pointer
2412 * @param dest_cr chroma cr/v destination pointer
2413 * @param dir direction (0->forward, 1->backward)
2414 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2415 * @param pix_op halfpel motion compensation function (average or put normally)
2416 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2418 static inline void MPV_motion_lowres(MpegEncContext *s,
2419 uint8_t *dest_y, uint8_t *dest_cb,
2421 int dir, uint8_t **ref_picture,
2422 h264_chroma_mc_func *pix_op)
2426 const int lowres = s->avctx->lowres;
2427 const int block_s = 8 >>lowres;
2432 switch (s->mv_type) {
2434 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2436 ref_picture, pix_op,
2437 s->mv[dir][0][0], s->mv[dir][0][1],
2443 for (i = 0; i < 4; i++) {
2444 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2445 s->linesize) * block_s,
2446 ref_picture[0], 0, 0,
2447 (2 * mb_x + (i & 1)) * block_s,
2448 (2 * mb_y + (i >> 1)) * block_s,
2449 s->width, s->height, s->linesize,
2450 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2451 block_s, block_s, pix_op,
2452 s->mv[dir][i][0], s->mv[dir][i][1]);
2454 mx += s->mv[dir][i][0];
2455 my += s->mv[dir][i][1];
2458 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2459 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2463 if (s->picture_structure == PICT_FRAME) {
2465 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2466 1, 0, s->field_select[dir][0],
2467 ref_picture, pix_op,
2468 s->mv[dir][0][0], s->mv[dir][0][1],
2471 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2472 1, 1, s->field_select[dir][1],
2473 ref_picture, pix_op,
2474 s->mv[dir][1][0], s->mv[dir][1][1],
2477 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2478 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2479 ref_picture = s->current_picture_ptr->f.data;
2482 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2483 0, 0, s->field_select[dir][0],
2484 ref_picture, pix_op,
2486 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2490 for (i = 0; i < 2; i++) {
2491 uint8_t **ref2picture;
2493 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2494 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2495 ref2picture = ref_picture;
2497 ref2picture = s->current_picture_ptr->f.data;
2500 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2501 0, 0, s->field_select[dir][i],
2502 ref2picture, pix_op,
2503 s->mv[dir][i][0], s->mv[dir][i][1] +
2504 2 * block_s * i, block_s, mb_y >> 1);
2506 dest_y += 2 * block_s * s->linesize;
2507 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2508 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2512 if (s->picture_structure == PICT_FRAME) {
2513 for (i = 0; i < 2; i++) {
2515 for (j = 0; j < 2; j++) {
2516 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2518 ref_picture, pix_op,
2519 s->mv[dir][2 * i + j][0],
2520 s->mv[dir][2 * i + j][1],
2523 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2526 for (i = 0; i < 2; i++) {
2527 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2528 0, 0, s->picture_structure != i + 1,
2529 ref_picture, pix_op,
2530 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2531 2 * block_s, mb_y >> 1);
2533 // after put we make avg of the same block
2534 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2536 // opposite parity is always in the same
2537 // frame if this is second field
2538 if (!s->first_field) {
2539 ref_picture = s->current_picture_ptr->f.data;
2550 * find the lowest MB row referenced in the MVs
2552 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2554 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2555 int my, off, i, mvs;
2557 if (s->picture_structure != PICT_FRAME || s->mcsel)
2560 switch (s->mv_type) {
2574 for (i = 0; i < mvs; i++) {
2575 my = s->mv[dir][i][1]<<qpel_shift;
2576 my_max = FFMAX(my_max, my);
2577 my_min = FFMIN(my_min, my);
2580 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2582 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2584 return s->mb_height-1;
2587 /* put block[] to dest[] */
2588 static inline void put_dct(MpegEncContext *s,
2589 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2591 s->dct_unquantize_intra(s, block, i, qscale);
2592 s->dsp.idct_put (dest, line_size, block);
2595 /* add block[] to dest[] */
2596 static inline void add_dct(MpegEncContext *s,
2597 int16_t *block, int i, uint8_t *dest, int line_size)
2599 if (s->block_last_index[i] >= 0) {
2600 s->dsp.idct_add (dest, line_size, block);
2604 static inline void add_dequant_dct(MpegEncContext *s,
2605 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2607 if (s->block_last_index[i] >= 0) {
2608 s->dct_unquantize_inter(s, block, i, qscale);
2610 s->dsp.idct_add (dest, line_size, block);
2615 * Clean dc, ac, coded_block for the current non-intra MB.
2617 void ff_clean_intra_table_entries(MpegEncContext *s)
2619 int wrap = s->b8_stride;
2620 int xy = s->block_index[0];
2623 s->dc_val[0][xy + 1 ] =
2624 s->dc_val[0][xy + wrap] =
2625 s->dc_val[0][xy + 1 + wrap] = 1024;
2627 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2628 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2629 if (s->msmpeg4_version>=3) {
2630 s->coded_block[xy ] =
2631 s->coded_block[xy + 1 ] =
2632 s->coded_block[xy + wrap] =
2633 s->coded_block[xy + 1 + wrap] = 0;
2636 wrap = s->mb_stride;
2637 xy = s->mb_x + s->mb_y * wrap;
2639 s->dc_val[2][xy] = 1024;
2641 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2642 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2644 s->mbintra_table[xy]= 0;
2647 /* generic function called after a macroblock has been parsed by the
2648 decoder or after it has been encoded by the encoder.
2650 Important variables used:
2651 s->mb_intra : true if intra macroblock
2652 s->mv_dir : motion vector direction
2653 s->mv_type : motion vector type
2654 s->mv : motion vector
2655 s->interlaced_dct : true if interlaced dct used (mpeg2)
2657 static av_always_inline
2658 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2659 int lowres_flag, int is_mpeg12)
2661 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2662 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2663 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2667 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2668 /* print DCT coefficients */
2670 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2672 for(j=0; j<64; j++){
2673 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2675 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2679 s->current_picture.qscale_table[mb_xy] = s->qscale;
2681 /* update DC predictors for P macroblocks */
2683 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2684 if(s->mbintra_table[mb_xy])
2685 ff_clean_intra_table_entries(s);
2689 s->last_dc[2] = 128 << s->intra_dc_precision;
2692 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2693 s->mbintra_table[mb_xy]=1;
2695 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2696 uint8_t *dest_y, *dest_cb, *dest_cr;
2697 int dct_linesize, dct_offset;
2698 op_pixels_func (*op_pix)[4];
2699 qpel_mc_func (*op_qpix)[16];
2700 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2701 const int uvlinesize = s->current_picture.f.linesize[1];
2702 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2703 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2705 /* avoid copy if macroblock skipped in last frame too */
2706 /* skip only during decoding as we might trash the buffers during encoding a bit */
2708 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2710 if (s->mb_skipped) {
2712 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2714 } else if(!s->current_picture.reference) {
2717 *mbskip_ptr = 0; /* not skipped */
2721 dct_linesize = linesize << s->interlaced_dct;
2722 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2726 dest_cb= s->dest[1];
2727 dest_cr= s->dest[2];
2729 dest_y = s->b_scratchpad;
2730 dest_cb= s->b_scratchpad+16*linesize;
2731 dest_cr= s->b_scratchpad+32*linesize;
2735 /* motion handling */
2736 /* decoding or more than one mb_type (MC was already done otherwise) */
2739 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2740 if (s->mv_dir & MV_DIR_FORWARD) {
2741 ff_thread_await_progress(&s->last_picture_ptr->tf,
2742 ff_MPV_lowest_referenced_row(s, 0),
2745 if (s->mv_dir & MV_DIR_BACKWARD) {
2746 ff_thread_await_progress(&s->next_picture_ptr->tf,
2747 ff_MPV_lowest_referenced_row(s, 1),
2753 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2755 if (s->mv_dir & MV_DIR_FORWARD) {
2756 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2757 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2759 if (s->mv_dir & MV_DIR_BACKWARD) {
2760 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2763 op_qpix= s->me.qpel_put;
2764 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2765 op_pix = s->hdsp.put_pixels_tab;
2767 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2769 if (s->mv_dir & MV_DIR_FORWARD) {
2770 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2771 op_pix = s->hdsp.avg_pixels_tab;
2772 op_qpix= s->me.qpel_avg;
2774 if (s->mv_dir & MV_DIR_BACKWARD) {
2775 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2780 /* skip dequant / idct if we are really late ;) */
2781 if(s->avctx->skip_idct){
2782 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2783 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2784 || s->avctx->skip_idct >= AVDISCARD_ALL)
2788 /* add dct residue */
2789 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2790 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2791 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2792 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2793 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2794 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2796 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2797 if (s->chroma_y_shift){
2798 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2799 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2803 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2804 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2805 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2806 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2809 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2810 add_dct(s, block[0], 0, dest_y , dct_linesize);
2811 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2812 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2813 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2815 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2816 if(s->chroma_y_shift){//Chroma420
2817 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2818 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2821 dct_linesize = uvlinesize << s->interlaced_dct;
2822 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2824 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2825 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2826 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2827 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2828 if(!s->chroma_x_shift){//Chroma444
2829 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2830 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2831 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2832 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2837 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2838 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2841 /* dct only in intra block */
2842 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2843 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2844 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2845 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2846 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2848 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2849 if(s->chroma_y_shift){
2850 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2851 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2855 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2856 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2857 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2858 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2862 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2863 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2864 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2865 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2867 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2868 if(s->chroma_y_shift){
2869 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2870 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2873 dct_linesize = uvlinesize << s->interlaced_dct;
2874 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2876 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2877 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2878 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2879 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2880 if(!s->chroma_x_shift){//Chroma444
2881 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2882 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2883 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2884 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2892 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2893 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2894 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2899 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2901 if(s->out_format == FMT_MPEG1) {
2902 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2903 else MPV_decode_mb_internal(s, block, 0, 1);
2906 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2907 else MPV_decode_mb_internal(s, block, 0, 0);
2911 * @param h is the normal height, this will be reduced automatically if needed for the last row
2913 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2914 Picture *last, int y, int h, int picture_structure,
2915 int first_field, int draw_edges, int low_delay,
2916 int v_edge_pos, int h_edge_pos)
2918 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2919 int hshift = desc->log2_chroma_w;
2920 int vshift = desc->log2_chroma_h;
2921 const int field_pic = picture_structure != PICT_FRAME;
2927 if (!avctx->hwaccel &&
2928 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2931 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2932 int *linesize = cur->f.linesize;
2933 int sides = 0, edge_h;
2934 if (y==0) sides |= EDGE_TOP;
2935 if (y + h >= v_edge_pos)
2936 sides |= EDGE_BOTTOM;
2938 edge_h= FFMIN(h, v_edge_pos - y);
2940 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2941 linesize[0], h_edge_pos, edge_h,
2942 EDGE_WIDTH, EDGE_WIDTH, sides);
2943 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2944 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2945 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2946 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2947 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2948 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2951 h = FFMIN(h, avctx->height - y);
2953 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2955 if (avctx->draw_horiz_band) {
2957 int offset[AV_NUM_DATA_POINTERS];
2960 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2961 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2968 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2969 picture_structure == PICT_FRAME &&
2970 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2971 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2974 offset[0]= y * src->linesize[0];
2976 offset[2]= (y >> vshift) * src->linesize[1];
2977 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2983 avctx->draw_horiz_band(avctx, src, offset,
2984 y, picture_structure, h);
2988 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2990 int draw_edges = s->unrestricted_mv && !s->intra_only;
2991 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2992 &s->last_picture, y, h, s->picture_structure,
2993 s->first_field, draw_edges, s->low_delay,
2994 s->v_edge_pos, s->h_edge_pos);
2997 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2998 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2999 const int uvlinesize = s->current_picture.f.linesize[1];
3000 const int mb_size= 4 - s->avctx->lowres;
3002 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3003 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3004 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3005 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3006 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3007 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3008 //block_index is not used by mpeg2, so it is not affected by chroma_format
3010 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3011 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3012 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3014 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3016 if(s->picture_structure==PICT_FRAME){
3017 s->dest[0] += s->mb_y * linesize << mb_size;
3018 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3019 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3021 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3022 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3023 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3024 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3030 * Permute an 8x8 block.
3031 * @param block the block which will be permuted according to the given permutation vector
3032 * @param permutation the permutation vector
3033 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3034 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3035 * (inverse) permutated to scantable order!
3037 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3043 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3045 for(i=0; i<=last; i++){
3046 const int j= scantable[i];
3051 for(i=0; i<=last; i++){
3052 const int j= scantable[i];
3053 const int perm_j= permutation[j];
3054 block[perm_j]= temp[j];
3058 void ff_mpeg_flush(AVCodecContext *avctx){
3060 MpegEncContext *s = avctx->priv_data;
3062 if(s==NULL || s->picture==NULL)
3065 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3066 ff_mpeg_unref_picture(s, &s->picture[i]);
3067 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3069 ff_mpeg_unref_picture(s, &s->current_picture);
3070 ff_mpeg_unref_picture(s, &s->last_picture);
3071 ff_mpeg_unref_picture(s, &s->next_picture);
3073 s->mb_x= s->mb_y= 0;
3076 s->parse_context.state= -1;
3077 s->parse_context.frame_start_found= 0;
3078 s->parse_context.overread= 0;
3079 s->parse_context.overread_index= 0;
3080 s->parse_context.index= 0;
3081 s->parse_context.last_index= 0;
3082 s->bitstream_buffer_size=0;
3086 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3087 int16_t *block, int n, int qscale)
3089 int i, level, nCoeffs;
3090 const uint16_t *quant_matrix;
3092 nCoeffs= s->block_last_index[n];
3094 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3095 /* XXX: only mpeg1 */
3096 quant_matrix = s->intra_matrix;
3097 for(i=1;i<=nCoeffs;i++) {
3098 int j= s->intra_scantable.permutated[i];
3103 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3104 level = (level - 1) | 1;
3107 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3108 level = (level - 1) | 1;
3115 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3116 int16_t *block, int n, int qscale)
3118 int i, level, nCoeffs;
3119 const uint16_t *quant_matrix;
3121 nCoeffs= s->block_last_index[n];
3123 quant_matrix = s->inter_matrix;
3124 for(i=0; i<=nCoeffs; i++) {
3125 int j= s->intra_scantable.permutated[i];
3130 level = (((level << 1) + 1) * qscale *
3131 ((int) (quant_matrix[j]))) >> 4;
3132 level = (level - 1) | 1;
3135 level = (((level << 1) + 1) * qscale *
3136 ((int) (quant_matrix[j]))) >> 4;
3137 level = (level - 1) | 1;
3144 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3145 int16_t *block, int n, int qscale)
3147 int i, level, nCoeffs;
3148 const uint16_t *quant_matrix;
3150 if(s->alternate_scan) nCoeffs= 63;
3151 else nCoeffs= s->block_last_index[n];
3153 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3154 quant_matrix = s->intra_matrix;
3155 for(i=1;i<=nCoeffs;i++) {
3156 int j= s->intra_scantable.permutated[i];
3161 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3164 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3171 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3172 int16_t *block, int n, int qscale)
3174 int i, level, nCoeffs;
3175 const uint16_t *quant_matrix;
3178 if(s->alternate_scan) nCoeffs= 63;
3179 else nCoeffs= s->block_last_index[n];
3181 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3183 quant_matrix = s->intra_matrix;
3184 for(i=1;i<=nCoeffs;i++) {
3185 int j= s->intra_scantable.permutated[i];
3190 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3193 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3202 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3203 int16_t *block, int n, int qscale)
3205 int i, level, nCoeffs;
3206 const uint16_t *quant_matrix;
3209 if(s->alternate_scan) nCoeffs= 63;
3210 else nCoeffs= s->block_last_index[n];
3212 quant_matrix = s->inter_matrix;
3213 for(i=0; i<=nCoeffs; i++) {
3214 int j= s->intra_scantable.permutated[i];
3219 level = (((level << 1) + 1) * qscale *
3220 ((int) (quant_matrix[j]))) >> 4;
3223 level = (((level << 1) + 1) * qscale *
3224 ((int) (quant_matrix[j]))) >> 4;
3233 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3234 int16_t *block, int n, int qscale)
3236 int i, level, qmul, qadd;
3239 av_assert2(s->block_last_index[n]>=0);
3244 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3245 qadd = (qscale - 1) | 1;
3252 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3254 for(i=1; i<=nCoeffs; i++) {
3258 level = level * qmul - qadd;
3260 level = level * qmul + qadd;
3267 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3268 int16_t *block, int n, int qscale)
3270 int i, level, qmul, qadd;
3273 av_assert2(s->block_last_index[n]>=0);
3275 qadd = (qscale - 1) | 1;
3278 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3280 for(i=0; i<=nCoeffs; i++) {
3284 level = level * qmul - qadd;
3286 level = level * qmul + qadd;
3294 * set qscale and update qscale dependent variables.
3296 void ff_set_qscale(MpegEncContext * s, int qscale)
3300 else if (qscale > 31)
3304 s->chroma_qscale= s->chroma_qscale_table[qscale];
3306 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3307 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3310 void ff_MPV_report_decode_progress(MpegEncContext *s)
3312 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3313 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3316 #if CONFIG_ERROR_RESILIENCE
3317 void ff_mpeg_er_frame_start(MpegEncContext *s)
3319 ERContext *er = &s->er;
3321 er->cur_pic = s->current_picture_ptr;
3322 er->last_pic = s->last_picture_ptr;
3323 er->next_pic = s->next_picture_ptr;
3325 er->pp_time = s->pp_time;
3326 er->pb_time = s->pb_time;
3327 er->quarter_sample = s->quarter_sample;
3328 er->partitioned_frame = s->partitioned_frame;
3330 ff_er_frame_start(er);
3332 #endif /* CONFIG_ERROR_RESILIENCE */