2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
35 #include "h264chroma.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
130 MpegEncContext *s = opaque;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
138 memcpy(s->mv, mv, sizeof(*mv));
140 ff_init_block_index(s);
141 ff_update_block_index(s);
143 s->dsp.clear_blocks(s->block[0]);
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
150 ff_MPV_decode_mb(s, s->block);
153 /* init common dct for both encoder and decoder */
154 av_cold int ff_dct_common_init(MpegEncContext *s)
156 ff_dsputil_init(&s->dsp, s->avctx);
157 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
158 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
159 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
161 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
162 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
163 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
164 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
165 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
166 if (s->flags & CODEC_FLAG_BITEXACT)
167 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
168 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
171 ff_MPV_common_init_x86(s);
173 ff_MPV_common_init_axp(s);
175 ff_MPV_common_init_arm(s);
177 ff_MPV_common_init_bfin(s);
179 ff_MPV_common_init_ppc(s);
182 /* load & permutate scantables
183 * note: only wmv uses different ones
185 if (s->alternate_scan) {
186 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
187 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
198 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
200 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
202 // edge emu needs blocksize + filter length - 1
203 // (= 17x17 for halfpel / 21x21 for h264)
204 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
205 // at uvlinesize. It supports only YUV420 so 24x24 is enough
206 // linesize * interlaced * MBsize
207 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
210 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
212 s->me.temp = s->me.scratchpad;
213 s->rd_scratchpad = s->me.scratchpad;
214 s->b_scratchpad = s->me.scratchpad;
215 s->obmc_scratchpad = s->me.scratchpad + 16;
219 av_freep(&s->edge_emu_buffer);
220 return AVERROR(ENOMEM);
224 * Allocate a frame buffer
226 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
231 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
233 s->codec_id != AV_CODEC_ID_MSS2)
234 r = ff_thread_get_buffer(s->avctx, &pic->tf,
235 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
237 pic->f.width = s->avctx->width;
238 pic->f.height = s->avctx->height;
239 pic->f.format = s->avctx->pix_fmt;
240 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
243 if (r < 0 || !pic->f.data[0]) {
244 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
249 if (s->avctx->hwaccel) {
250 assert(!pic->hwaccel_picture_private);
251 if (s->avctx->hwaccel->priv_data_size) {
252 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
253 if (!pic->hwaccel_priv_buf) {
254 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
257 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
261 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
262 s->uvlinesize != pic->f.linesize[1])) {
263 av_log(s->avctx, AV_LOG_ERROR,
264 "get_buffer() failed (stride changed)\n");
265 ff_mpeg_unref_picture(s, pic);
269 if (pic->f.linesize[1] != pic->f.linesize[2]) {
270 av_log(s->avctx, AV_LOG_ERROR,
271 "get_buffer() failed (uv stride mismatch)\n");
272 ff_mpeg_unref_picture(s, pic);
276 if (!s->edge_emu_buffer &&
277 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
278 av_log(s->avctx, AV_LOG_ERROR,
279 "get_buffer() failed to allocate context scratch buffers.\n");
280 ff_mpeg_unref_picture(s, pic);
287 static void free_picture_tables(Picture *pic)
291 pic->alloc_mb_width =
292 pic->alloc_mb_height = 0;
294 av_buffer_unref(&pic->mb_var_buf);
295 av_buffer_unref(&pic->mc_mb_var_buf);
296 av_buffer_unref(&pic->mb_mean_buf);
297 av_buffer_unref(&pic->mbskip_table_buf);
298 av_buffer_unref(&pic->qscale_table_buf);
299 av_buffer_unref(&pic->mb_type_buf);
301 for (i = 0; i < 2; i++) {
302 av_buffer_unref(&pic->motion_val_buf[i]);
303 av_buffer_unref(&pic->ref_index_buf[i]);
307 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
309 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
310 const int mb_array_size = s->mb_stride * s->mb_height;
311 const int b8_array_size = s->b8_stride * s->mb_height * 2;
315 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
316 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
317 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
319 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
320 return AVERROR(ENOMEM);
323 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
324 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
326 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
327 return AVERROR(ENOMEM);
330 if (s->out_format == FMT_H263 || s->encoding ||
331 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
332 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
333 int ref_index_size = 4 * mb_array_size;
335 for (i = 0; mv_size && i < 2; i++) {
336 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
338 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339 return AVERROR(ENOMEM);
343 pic->alloc_mb_width = s->mb_width;
344 pic->alloc_mb_height = s->mb_height;
349 static int make_tables_writable(Picture *pic)
352 #define MAKE_WRITABLE(table) \
355 (ret = av_buffer_make_writable(&pic->table)) < 0)\
359 MAKE_WRITABLE(mb_var_buf);
360 MAKE_WRITABLE(mc_mb_var_buf);
361 MAKE_WRITABLE(mb_mean_buf);
362 MAKE_WRITABLE(mbskip_table_buf);
363 MAKE_WRITABLE(qscale_table_buf);
364 MAKE_WRITABLE(mb_type_buf);
366 for (i = 0; i < 2; i++) {
367 MAKE_WRITABLE(motion_val_buf[i]);
368 MAKE_WRITABLE(ref_index_buf[i]);
375 * Allocate a Picture.
376 * The pixels are allocated/set by calling get_buffer() if shared = 0
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
382 if (pic->qscale_table_buf)
383 if ( pic->alloc_mb_width != s->mb_width
384 || pic->alloc_mb_height != s->mb_height)
385 free_picture_tables(pic);
388 assert(pic->f.data[0]);
391 assert(!pic->f.data[0]);
393 if (alloc_frame_buffer(s, pic) < 0)
396 s->linesize = pic->f.linesize[0];
397 s->uvlinesize = pic->f.linesize[1];
400 if (!pic->qscale_table_buf)
401 ret = alloc_picture_tables(s, pic);
403 ret = make_tables_writable(pic);
408 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
409 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410 pic->mb_mean = pic->mb_mean_buf->data;
413 pic->mbskip_table = pic->mbskip_table_buf->data;
414 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
417 if (pic->motion_val_buf[0]) {
418 for (i = 0; i < 2; i++) {
419 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420 pic->ref_index[i] = pic->ref_index_buf[i]->data;
426 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427 ff_mpeg_unref_picture(s, pic);
428 free_picture_tables(pic);
429 return AVERROR(ENOMEM);
433 * Deallocate a picture.
435 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
437 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
440 /* WM Image / Screen codecs allocate internal buffers with different
441 * dimensions / colorspaces; ignore user-defined callbacks for these. */
442 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
443 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
444 s->codec_id != AV_CODEC_ID_MSS2)
445 ff_thread_release_buffer(s->avctx, &pic->tf);
447 av_frame_unref(&pic->f);
449 av_buffer_unref(&pic->hwaccel_priv_buf);
451 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
454 static int update_picture_tables(Picture *dst, Picture *src)
458 #define UPDATE_TABLE(table)\
461 (!dst->table || dst->table->buffer != src->table->buffer)) {\
462 av_buffer_unref(&dst->table);\
463 dst->table = av_buffer_ref(src->table);\
465 free_picture_tables(dst);\
466 return AVERROR(ENOMEM);\
471 UPDATE_TABLE(mb_var_buf);
472 UPDATE_TABLE(mc_mb_var_buf);
473 UPDATE_TABLE(mb_mean_buf);
474 UPDATE_TABLE(mbskip_table_buf);
475 UPDATE_TABLE(qscale_table_buf);
476 UPDATE_TABLE(mb_type_buf);
477 for (i = 0; i < 2; i++) {
478 UPDATE_TABLE(motion_val_buf[i]);
479 UPDATE_TABLE(ref_index_buf[i]);
482 dst->mb_var = src->mb_var;
483 dst->mc_mb_var = src->mc_mb_var;
484 dst->mb_mean = src->mb_mean;
485 dst->mbskip_table = src->mbskip_table;
486 dst->qscale_table = src->qscale_table;
487 dst->mb_type = src->mb_type;
488 for (i = 0; i < 2; i++) {
489 dst->motion_val[i] = src->motion_val[i];
490 dst->ref_index[i] = src->ref_index[i];
493 dst->alloc_mb_width = src->alloc_mb_width;
494 dst->alloc_mb_height = src->alloc_mb_height;
499 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
503 av_assert0(!dst->f.buf[0]);
504 av_assert0(src->f.buf[0]);
508 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
512 ret = update_picture_tables(dst, src);
516 if (src->hwaccel_picture_private) {
517 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
518 if (!dst->hwaccel_priv_buf)
520 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
523 dst->field_picture = src->field_picture;
524 dst->mb_var_sum = src->mb_var_sum;
525 dst->mc_mb_var_sum = src->mc_mb_var_sum;
526 dst->b_frame_score = src->b_frame_score;
527 dst->needs_realloc = src->needs_realloc;
528 dst->reference = src->reference;
529 dst->shared = src->shared;
533 ff_mpeg_unref_picture(s, dst);
537 static int init_duplicate_context(MpegEncContext *s)
539 int y_size = s->b8_stride * (2 * s->mb_height + 1);
540 int c_size = s->mb_stride * (s->mb_height + 1);
541 int yc_size = y_size + 2 * c_size;
549 s->obmc_scratchpad = NULL;
552 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
553 ME_MAP_SIZE * sizeof(uint32_t), fail)
554 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
555 ME_MAP_SIZE * sizeof(uint32_t), fail)
556 if (s->avctx->noise_reduction) {
557 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
558 2 * 64 * sizeof(int), fail)
561 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
562 s->block = s->blocks[0];
564 for (i = 0; i < 12; i++) {
565 s->pblocks[i] = &s->block[i];
568 if (s->out_format == FMT_H263) {
570 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
571 yc_size * sizeof(int16_t) * 16, fail);
572 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
573 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
574 s->ac_val[2] = s->ac_val[1] + c_size;
579 return -1; // free() through ff_MPV_common_end()
582 static void free_duplicate_context(MpegEncContext *s)
587 av_freep(&s->edge_emu_buffer);
588 av_freep(&s->me.scratchpad);
592 s->obmc_scratchpad = NULL;
594 av_freep(&s->dct_error_sum);
595 av_freep(&s->me.map);
596 av_freep(&s->me.score_map);
597 av_freep(&s->blocks);
598 av_freep(&s->ac_val_base);
602 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
604 #define COPY(a) bak->a = src->a
605 COPY(edge_emu_buffer);
610 COPY(obmc_scratchpad);
617 COPY(me.map_generation);
629 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
633 // FIXME copy only needed parts
635 backup_duplicate_context(&bak, dst);
636 memcpy(dst, src, sizeof(MpegEncContext));
637 backup_duplicate_context(dst, &bak);
638 for (i = 0; i < 12; i++) {
639 dst->pblocks[i] = &dst->block[i];
641 if (!dst->edge_emu_buffer &&
642 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
643 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
644 "scratch buffers.\n");
647 // STOP_TIMER("update_duplicate_context")
648 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
652 int ff_mpeg_update_thread_context(AVCodecContext *dst,
653 const AVCodecContext *src)
656 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
663 // FIXME can parameters change on I-frames?
664 // in that case dst may need a reinit
665 if (!s->context_initialized) {
666 memcpy(s, s1, sizeof(MpegEncContext));
669 s->bitstream_buffer = NULL;
670 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
672 if (s1->context_initialized){
673 // s->picture_range_start += MAX_PICTURE_COUNT;
674 // s->picture_range_end += MAX_PICTURE_COUNT;
675 if((ret = ff_MPV_common_init(s)) < 0){
676 memset(s, 0, sizeof(MpegEncContext));
683 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
684 s->context_reinit = 0;
685 s->height = s1->height;
686 s->width = s1->width;
687 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
691 s->avctx->coded_height = s1->avctx->coded_height;
692 s->avctx->coded_width = s1->avctx->coded_width;
693 s->avctx->width = s1->avctx->width;
694 s->avctx->height = s1->avctx->height;
696 s->coded_picture_number = s1->coded_picture_number;
697 s->picture_number = s1->picture_number;
698 s->input_picture_number = s1->input_picture_number;
700 av_assert0(!s->picture || s->picture != s1->picture);
702 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
703 ff_mpeg_unref_picture(s, &s->picture[i]);
704 if (s1->picture[i].f.data[0] &&
705 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
709 #define UPDATE_PICTURE(pic)\
711 ff_mpeg_unref_picture(s, &s->pic);\
712 if (s1->pic.f.data[0])\
713 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
715 ret = update_picture_tables(&s->pic, &s1->pic);\
720 UPDATE_PICTURE(current_picture);
721 UPDATE_PICTURE(last_picture);
722 UPDATE_PICTURE(next_picture);
724 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
725 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
726 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
728 // Error/bug resilience
729 s->next_p_frame_damaged = s1->next_p_frame_damaged;
730 s->workaround_bugs = s1->workaround_bugs;
731 s->padding_bug_score = s1->padding_bug_score;
734 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
735 (char *) &s1->shape - (char *) &s1->time_increment_bits);
738 s->max_b_frames = s1->max_b_frames;
739 s->low_delay = s1->low_delay;
740 s->droppable = s1->droppable;
742 // DivX handling (doesn't work)
743 s->divx_packed = s1->divx_packed;
745 if (s1->bitstream_buffer) {
746 if (s1->bitstream_buffer_size +
747 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
748 av_fast_malloc(&s->bitstream_buffer,
749 &s->allocated_bitstream_buffer_size,
750 s1->allocated_bitstream_buffer_size);
751 s->bitstream_buffer_size = s1->bitstream_buffer_size;
752 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
753 s1->bitstream_buffer_size);
754 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
755 FF_INPUT_BUFFER_PADDING_SIZE);
758 // linesize dependend scratch buffer allocation
759 if (!s->edge_emu_buffer)
761 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
762 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
763 "scratch buffers.\n");
764 return AVERROR(ENOMEM);
767 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
768 "be allocated due to unknown size.\n");
771 // MPEG2/interlacing info
772 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
773 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
775 if (!s1->first_field) {
776 s->last_pict_type = s1->pict_type;
777 if (s1->current_picture_ptr)
778 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
780 if (s1->pict_type != AV_PICTURE_TYPE_B) {
781 s->last_non_b_pict_type = s1->pict_type;
789 * Set the given MpegEncContext to common defaults
790 * (same for encoding and decoding).
791 * The changed fields will not depend upon the
792 * prior state of the MpegEncContext.
794 void ff_MPV_common_defaults(MpegEncContext *s)
796 s->y_dc_scale_table =
797 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
798 s->chroma_qscale_table = ff_default_chroma_qscale_table;
799 s->progressive_frame = 1;
800 s->progressive_sequence = 1;
801 s->picture_structure = PICT_FRAME;
803 s->coded_picture_number = 0;
804 s->picture_number = 0;
805 s->input_picture_number = 0;
807 s->picture_in_gop_number = 0;
812 s->slice_context_count = 1;
816 * Set the given MpegEncContext to defaults for decoding.
817 * the changed fields will not depend upon
818 * the prior state of the MpegEncContext.
820 void ff_MPV_decode_defaults(MpegEncContext *s)
822 ff_MPV_common_defaults(s);
825 static int init_er(MpegEncContext *s)
827 ERContext *er = &s->er;
828 int mb_array_size = s->mb_height * s->mb_stride;
831 er->avctx = s->avctx;
834 er->mb_index2xy = s->mb_index2xy;
835 er->mb_num = s->mb_num;
836 er->mb_width = s->mb_width;
837 er->mb_height = s->mb_height;
838 er->mb_stride = s->mb_stride;
839 er->b8_stride = s->b8_stride;
841 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
842 er->error_status_table = av_mallocz(mb_array_size);
843 if (!er->er_temp_buffer || !er->error_status_table)
846 er->mbskip_table = s->mbskip_table;
847 er->mbintra_table = s->mbintra_table;
849 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
850 er->dc_val[i] = s->dc_val[i];
852 er->decode_mb = mpeg_er_decode_mb;
857 av_freep(&er->er_temp_buffer);
858 av_freep(&er->error_status_table);
859 return AVERROR(ENOMEM);
863 * Initialize and allocates MpegEncContext fields dependent on the resolution.
865 static int init_context_frame(MpegEncContext *s)
867 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
869 s->mb_width = (s->width + 15) / 16;
870 s->mb_stride = s->mb_width + 1;
871 s->b8_stride = s->mb_width * 2 + 1;
872 s->b4_stride = s->mb_width * 4 + 1;
873 mb_array_size = s->mb_height * s->mb_stride;
874 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
876 /* set default edge pos, will be overriden
877 * in decode_header if needed */
878 s->h_edge_pos = s->mb_width * 16;
879 s->v_edge_pos = s->mb_height * 16;
881 s->mb_num = s->mb_width * s->mb_height;
886 s->block_wrap[3] = s->b8_stride;
888 s->block_wrap[5] = s->mb_stride;
890 y_size = s->b8_stride * (2 * s->mb_height + 1);
891 c_size = s->mb_stride * (s->mb_height + 1);
892 yc_size = y_size + 2 * c_size;
894 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
895 for (y = 0; y < s->mb_height; y++)
896 for (x = 0; x < s->mb_width; x++)
897 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
899 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
902 /* Allocate MV tables */
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
904 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
905 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
907 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
909 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
910 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
911 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
912 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
913 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
914 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
916 /* Allocate MB type table */
917 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
919 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
921 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
922 mb_array_size * sizeof(float), fail);
923 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
924 mb_array_size * sizeof(float), fail);
928 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
929 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
930 /* interlaced direct mode decoding tables */
931 for (i = 0; i < 2; i++) {
933 for (j = 0; j < 2; j++) {
934 for (k = 0; k < 2; k++) {
935 FF_ALLOCZ_OR_GOTO(s->avctx,
936 s->b_field_mv_table_base[i][j][k],
937 mv_table_size * 2 * sizeof(int16_t),
939 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
942 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
944 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
946 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
949 if (s->out_format == FMT_H263) {
951 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
952 s->coded_block = s->coded_block_base + s->b8_stride + 1;
954 /* cbp, ac_pred, pred_dir */
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
959 if (s->h263_pred || s->h263_plus || !s->encoding) {
961 // MN: we need these for error resilience of intra-frames
962 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
963 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
964 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
965 s->dc_val[2] = s->dc_val[1] + c_size;
966 for (i = 0; i < yc_size; i++)
967 s->dc_val_base[i] = 1024;
970 /* which mb is a intra block */
971 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
972 memset(s->mbintra_table, 1, mb_array_size);
974 /* init macroblock skip table */
975 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
976 // Note the + 1 is for a quicker mpeg4 slice_end detection
980 return AVERROR(ENOMEM);
984 * init common structure for both encoder and decoder.
985 * this assumes that some variables like width/height are already set
987 av_cold int ff_MPV_common_init(MpegEncContext *s)
990 int nb_slices = (HAVE_THREADS &&
991 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
992 s->avctx->thread_count : 1;
994 if (s->encoding && s->avctx->slices)
995 nb_slices = s->avctx->slices;
997 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
998 s->mb_height = (s->height + 31) / 32 * 2;
1000 s->mb_height = (s->height + 15) / 16;
1002 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1003 av_log(s->avctx, AV_LOG_ERROR,
1004 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1008 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1011 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1013 max_slices = MAX_THREADS;
1014 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1015 " reducing to %d\n", nb_slices, max_slices);
1016 nb_slices = max_slices;
1019 if ((s->width || s->height) &&
1020 av_image_check_size(s->width, s->height, 0, s->avctx))
1023 ff_dct_common_init(s);
1025 s->flags = s->avctx->flags;
1026 s->flags2 = s->avctx->flags2;
1028 /* set chroma shifts */
1029 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
1031 /* convert fourcc to upper case */
1032 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1033 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1035 s->avctx->coded_frame = &s->current_picture.f;
1038 if (s->msmpeg4_version) {
1039 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1040 2 * 2 * (MAX_LEVEL + 1) *
1041 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1043 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1045 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1046 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1047 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1048 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1054 if (s->avctx->noise_reduction) {
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1059 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1060 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1061 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1062 avcodec_get_frame_defaults(&s->picture[i].f);
1064 memset(&s->next_picture, 0, sizeof(s->next_picture));
1065 memset(&s->last_picture, 0, sizeof(s->last_picture));
1066 memset(&s->current_picture, 0, sizeof(s->current_picture));
1067 avcodec_get_frame_defaults(&s->next_picture.f);
1068 avcodec_get_frame_defaults(&s->last_picture.f);
1069 avcodec_get_frame_defaults(&s->current_picture.f);
1071 if (init_context_frame(s))
1074 s->parse_context.state = -1;
1076 s->context_initialized = 1;
1077 s->thread_context[0] = s;
1079 // if (s->width && s->height) {
1080 if (nb_slices > 1) {
1081 for (i = 1; i < nb_slices; i++) {
1082 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1083 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1086 for (i = 0; i < nb_slices; i++) {
1087 if (init_duplicate_context(s->thread_context[i]) < 0)
1089 s->thread_context[i]->start_mb_y =
1090 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1091 s->thread_context[i]->end_mb_y =
1092 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1095 if (init_duplicate_context(s) < 0)
1098 s->end_mb_y = s->mb_height;
1100 s->slice_context_count = nb_slices;
1105 ff_MPV_common_end(s);
1110 * Frees and resets MpegEncContext fields depending on the resolution.
1111 * Is used during resolution changes to avoid a full reinitialization of the
1114 static int free_context_frame(MpegEncContext *s)
1118 av_freep(&s->mb_type);
1119 av_freep(&s->p_mv_table_base);
1120 av_freep(&s->b_forw_mv_table_base);
1121 av_freep(&s->b_back_mv_table_base);
1122 av_freep(&s->b_bidir_forw_mv_table_base);
1123 av_freep(&s->b_bidir_back_mv_table_base);
1124 av_freep(&s->b_direct_mv_table_base);
1125 s->p_mv_table = NULL;
1126 s->b_forw_mv_table = NULL;
1127 s->b_back_mv_table = NULL;
1128 s->b_bidir_forw_mv_table = NULL;
1129 s->b_bidir_back_mv_table = NULL;
1130 s->b_direct_mv_table = NULL;
1131 for (i = 0; i < 2; i++) {
1132 for (j = 0; j < 2; j++) {
1133 for (k = 0; k < 2; k++) {
1134 av_freep(&s->b_field_mv_table_base[i][j][k]);
1135 s->b_field_mv_table[i][j][k] = NULL;
1137 av_freep(&s->b_field_select_table[i][j]);
1138 av_freep(&s->p_field_mv_table_base[i][j]);
1139 s->p_field_mv_table[i][j] = NULL;
1141 av_freep(&s->p_field_select_table[i]);
1144 av_freep(&s->dc_val_base);
1145 av_freep(&s->coded_block_base);
1146 av_freep(&s->mbintra_table);
1147 av_freep(&s->cbp_table);
1148 av_freep(&s->pred_dir_table);
1150 av_freep(&s->mbskip_table);
1152 av_freep(&s->er.error_status_table);
1153 av_freep(&s->er.er_temp_buffer);
1154 av_freep(&s->mb_index2xy);
1155 av_freep(&s->lambda_table);
1157 av_freep(&s->cplx_tab);
1158 av_freep(&s->bits_tab);
1160 s->linesize = s->uvlinesize = 0;
1165 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1169 if (s->slice_context_count > 1) {
1170 for (i = 0; i < s->slice_context_count; i++) {
1171 free_duplicate_context(s->thread_context[i]);
1173 for (i = 1; i < s->slice_context_count; i++) {
1174 av_freep(&s->thread_context[i]);
1177 free_duplicate_context(s);
1179 if ((err = free_context_frame(s)) < 0)
1183 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1184 s->picture[i].needs_realloc = 1;
1187 s->last_picture_ptr =
1188 s->next_picture_ptr =
1189 s->current_picture_ptr = NULL;
1192 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1193 s->mb_height = (s->height + 31) / 32 * 2;
1195 s->mb_height = (s->height + 15) / 16;
1197 if ((s->width || s->height) &&
1198 av_image_check_size(s->width, s->height, 0, s->avctx))
1199 return AVERROR_INVALIDDATA;
1201 if ((err = init_context_frame(s)))
1204 s->thread_context[0] = s;
1206 if (s->width && s->height) {
1207 int nb_slices = s->slice_context_count;
1208 if (nb_slices > 1) {
1209 for (i = 1; i < nb_slices; i++) {
1210 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1211 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1214 for (i = 0; i < nb_slices; i++) {
1215 if (init_duplicate_context(s->thread_context[i]) < 0)
1217 s->thread_context[i]->start_mb_y =
1218 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1219 s->thread_context[i]->end_mb_y =
1220 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1223 if (init_duplicate_context(s) < 0)
1226 s->end_mb_y = s->mb_height;
1228 s->slice_context_count = nb_slices;
1233 ff_MPV_common_end(s);
1237 /* init common structure for both encoder and decoder */
1238 void ff_MPV_common_end(MpegEncContext *s)
1242 if (s->slice_context_count > 1) {
1243 for (i = 0; i < s->slice_context_count; i++) {
1244 free_duplicate_context(s->thread_context[i]);
1246 for (i = 1; i < s->slice_context_count; i++) {
1247 av_freep(&s->thread_context[i]);
1249 s->slice_context_count = 1;
1250 } else free_duplicate_context(s);
1252 av_freep(&s->parse_context.buffer);
1253 s->parse_context.buffer_size = 0;
1255 av_freep(&s->bitstream_buffer);
1256 s->allocated_bitstream_buffer_size = 0;
1258 av_freep(&s->avctx->stats_out);
1259 av_freep(&s->ac_stats);
1261 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1262 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1263 s->q_chroma_intra_matrix= NULL;
1264 s->q_chroma_intra_matrix16= NULL;
1265 av_freep(&s->q_intra_matrix);
1266 av_freep(&s->q_inter_matrix);
1267 av_freep(&s->q_intra_matrix16);
1268 av_freep(&s->q_inter_matrix16);
1269 av_freep(&s->input_picture);
1270 av_freep(&s->reordered_input_picture);
1271 av_freep(&s->dct_offset);
1274 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1275 free_picture_tables(&s->picture[i]);
1276 ff_mpeg_unref_picture(s, &s->picture[i]);
1279 av_freep(&s->picture);
1280 free_picture_tables(&s->last_picture);
1281 ff_mpeg_unref_picture(s, &s->last_picture);
1282 free_picture_tables(&s->current_picture);
1283 ff_mpeg_unref_picture(s, &s->current_picture);
1284 free_picture_tables(&s->next_picture);
1285 ff_mpeg_unref_picture(s, &s->next_picture);
1286 free_picture_tables(&s->new_picture);
1287 ff_mpeg_unref_picture(s, &s->new_picture);
1289 free_context_frame(s);
1291 s->context_initialized = 0;
1292 s->last_picture_ptr =
1293 s->next_picture_ptr =
1294 s->current_picture_ptr = NULL;
1295 s->linesize = s->uvlinesize = 0;
1298 av_cold void ff_init_rl(RLTable *rl,
1299 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1301 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1302 uint8_t index_run[MAX_RUN + 1];
1303 int last, run, level, start, end, i;
1305 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1306 if (static_store && rl->max_level[0])
1309 /* compute max_level[], max_run[] and index_run[] */
1310 for (last = 0; last < 2; last++) {
1319 memset(max_level, 0, MAX_RUN + 1);
1320 memset(max_run, 0, MAX_LEVEL + 1);
1321 memset(index_run, rl->n, MAX_RUN + 1);
1322 for (i = start; i < end; i++) {
1323 run = rl->table_run[i];
1324 level = rl->table_level[i];
1325 if (index_run[run] == rl->n)
1327 if (level > max_level[run])
1328 max_level[run] = level;
1329 if (run > max_run[level])
1330 max_run[level] = run;
1333 rl->max_level[last] = static_store[last];
1335 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1336 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1338 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1340 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1341 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1343 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1345 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1346 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1350 av_cold void ff_init_vlc_rl(RLTable *rl)
1354 for (q = 0; q < 32; q++) {
1356 int qadd = (q - 1) | 1;
1362 for (i = 0; i < rl->vlc.table_size; i++) {
1363 int code = rl->vlc.table[i][0];
1364 int len = rl->vlc.table[i][1];
1367 if (len == 0) { // illegal code
1370 } else if (len < 0) { // more bits needed
1374 if (code == rl->n) { // esc
1378 run = rl->table_run[code] + 1;
1379 level = rl->table_level[code] * qmul + qadd;
1380 if (code >= rl->last) run += 192;
1383 rl->rl_vlc[q][i].len = len;
1384 rl->rl_vlc[q][i].level = level;
1385 rl->rl_vlc[q][i].run = run;
1390 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1394 /* release non reference frames */
1395 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1396 if (!s->picture[i].reference &&
1397 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1398 ff_mpeg_unref_picture(s, &s->picture[i]);
1403 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1405 if (pic == s->last_picture_ptr)
1407 if (pic->f.data[0] == NULL)
1409 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1414 static int find_unused_picture(MpegEncContext *s, int shared)
1419 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1420 if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1424 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1425 if (pic_is_unused(s, &s->picture[i]))
1430 av_log(s->avctx, AV_LOG_FATAL,
1431 "Internal error, picture buffer overflow\n");
1432 /* We could return -1, but the codec would crash trying to draw into a
1433 * non-existing frame anyway. This is safer than waiting for a random crash.
1434 * Also the return of this is never useful, an encoder must only allocate
1435 * as much as allowed in the specification. This has no relationship to how
1436 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1437 * enough for such valid streams).
1438 * Plus, a decoder has to check stream validity and remove frames if too
1439 * many reference frames are around. Waiting for "OOM" is not correct at
1440 * all. Similarly, missing reference frames have to be replaced by
1441 * interpolated/MC frames, anything else is a bug in the codec ...
1447 int ff_find_unused_picture(MpegEncContext *s, int shared)
1449 int ret = find_unused_picture(s, shared);
1451 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1452 if (s->picture[ret].needs_realloc) {
1453 s->picture[ret].needs_realloc = 0;
1454 free_picture_tables(&s->picture[ret]);
1455 ff_mpeg_unref_picture(s, &s->picture[ret]);
1456 avcodec_get_frame_defaults(&s->picture[ret].f);
1462 static void update_noise_reduction(MpegEncContext *s)
1466 for (intra = 0; intra < 2; intra++) {
1467 if (s->dct_count[intra] > (1 << 16)) {
1468 for (i = 0; i < 64; i++) {
1469 s->dct_error_sum[intra][i] >>= 1;
1471 s->dct_count[intra] >>= 1;
1474 for (i = 0; i < 64; i++) {
1475 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1476 s->dct_count[intra] +
1477 s->dct_error_sum[intra][i] / 2) /
1478 (s->dct_error_sum[intra][i] + 1);
1484 * generic function for encode/decode called after coding/decoding
1485 * the header and before a frame is coded/decoded.
1487 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1493 if (!ff_thread_can_start_frame(avctx)) {
1494 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1498 /* mark & release old frames */
1499 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1500 s->last_picture_ptr != s->next_picture_ptr &&
1501 s->last_picture_ptr->f.data[0]) {
1502 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1505 /* release forgotten pictures */
1506 /* if (mpeg124/h263) */
1508 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1509 if (&s->picture[i] != s->last_picture_ptr &&
1510 &s->picture[i] != s->next_picture_ptr &&
1511 s->picture[i].reference && !s->picture[i].needs_realloc) {
1512 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1513 av_log(avctx, AV_LOG_ERROR,
1514 "releasing zombie picture\n");
1515 ff_mpeg_unref_picture(s, &s->picture[i]);
1520 ff_mpeg_unref_picture(s, &s->current_picture);
1523 ff_release_unused_pictures(s, 1);
1525 if (s->current_picture_ptr &&
1526 s->current_picture_ptr->f.data[0] == NULL) {
1527 // we already have a unused image
1528 // (maybe it was set before reading the header)
1529 pic = s->current_picture_ptr;
1531 i = ff_find_unused_picture(s, 0);
1533 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1536 pic = &s->picture[i];
1540 if (!s->droppable) {
1541 if (s->pict_type != AV_PICTURE_TYPE_B)
1545 pic->f.coded_picture_number = s->coded_picture_number++;
1547 if (ff_alloc_picture(s, pic, 0) < 0)
1550 s->current_picture_ptr = pic;
1551 // FIXME use only the vars from current_pic
1552 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1553 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1554 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1555 if (s->picture_structure != PICT_FRAME)
1556 s->current_picture_ptr->f.top_field_first =
1557 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1559 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1560 !s->progressive_sequence;
1561 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1564 s->current_picture_ptr->f.pict_type = s->pict_type;
1565 // if (s->flags && CODEC_FLAG_QSCALE)
1566 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1567 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1569 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1570 s->current_picture_ptr)) < 0)
1573 if (s->pict_type != AV_PICTURE_TYPE_B) {
1574 s->last_picture_ptr = s->next_picture_ptr;
1576 s->next_picture_ptr = s->current_picture_ptr;
1578 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1579 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1580 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1581 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1582 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1583 s->pict_type, s->droppable);
1585 if ((s->last_picture_ptr == NULL ||
1586 s->last_picture_ptr->f.data[0] == NULL) &&
1587 (s->pict_type != AV_PICTURE_TYPE_I ||
1588 s->picture_structure != PICT_FRAME)) {
1589 int h_chroma_shift, v_chroma_shift;
1590 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1591 &h_chroma_shift, &v_chroma_shift);
1592 if (s->pict_type != AV_PICTURE_TYPE_I)
1593 av_log(avctx, AV_LOG_ERROR,
1594 "warning: first frame is no keyframe\n");
1595 else if (s->picture_structure != PICT_FRAME)
1596 av_log(avctx, AV_LOG_INFO,
1597 "allocate dummy last picture for field based first keyframe\n");
1599 /* Allocate a dummy frame */
1600 i = ff_find_unused_picture(s, 0);
1602 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1605 s->last_picture_ptr = &s->picture[i];
1606 s->last_picture_ptr->f.key_frame = 0;
1607 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1608 s->last_picture_ptr = NULL;
1612 memset(s->last_picture_ptr->f.data[0], 0x80,
1613 avctx->height * s->last_picture_ptr->f.linesize[0]);
1614 memset(s->last_picture_ptr->f.data[1], 0x80,
1615 (avctx->height >> v_chroma_shift) *
1616 s->last_picture_ptr->f.linesize[1]);
1617 memset(s->last_picture_ptr->f.data[2], 0x80,
1618 (avctx->height >> v_chroma_shift) *
1619 s->last_picture_ptr->f.linesize[2]);
1621 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1622 for(i=0; i<avctx->height; i++)
1623 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1626 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1627 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1629 if ((s->next_picture_ptr == NULL ||
1630 s->next_picture_ptr->f.data[0] == NULL) &&
1631 s->pict_type == AV_PICTURE_TYPE_B) {
1632 /* Allocate a dummy frame */
1633 i = ff_find_unused_picture(s, 0);
1635 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1638 s->next_picture_ptr = &s->picture[i];
1639 s->next_picture_ptr->f.key_frame = 0;
1640 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1641 s->next_picture_ptr = NULL;
1644 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1645 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1648 #if 0 // BUFREF-FIXME
1649 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1650 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1652 if (s->last_picture_ptr) {
1653 ff_mpeg_unref_picture(s, &s->last_picture);
1654 if (s->last_picture_ptr->f.data[0] &&
1655 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1656 s->last_picture_ptr)) < 0)
1659 if (s->next_picture_ptr) {
1660 ff_mpeg_unref_picture(s, &s->next_picture);
1661 if (s->next_picture_ptr->f.data[0] &&
1662 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1663 s->next_picture_ptr)) < 0)
1667 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1668 s->last_picture_ptr->f.data[0]));
1670 if (s->picture_structure!= PICT_FRAME) {
1672 for (i = 0; i < 4; i++) {
1673 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1674 s->current_picture.f.data[i] +=
1675 s->current_picture.f.linesize[i];
1677 s->current_picture.f.linesize[i] *= 2;
1678 s->last_picture.f.linesize[i] *= 2;
1679 s->next_picture.f.linesize[i] *= 2;
1683 s->err_recognition = avctx->err_recognition;
1685 /* set dequantizer, we can't do it during init as
1686 * it might change for mpeg4 and we can't do it in the header
1687 * decode as init is not called for mpeg4 there yet */
1688 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1689 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1690 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1691 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1692 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1693 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1695 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1696 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1699 if (s->dct_error_sum) {
1700 av_assert2(s->avctx->noise_reduction && s->encoding);
1701 update_noise_reduction(s);
1704 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1705 return ff_xvmc_field_start(s, avctx);
1710 /* generic function for encode/decode called after a
1711 * frame has been coded/decoded. */
1712 void ff_MPV_frame_end(MpegEncContext *s)
1715 /* redraw edges for the frame if decoding didn't complete */
1716 // just to make sure that all data is rendered.
1717 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1718 ff_xvmc_field_end(s);
1719 } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1720 !s->avctx->hwaccel &&
1721 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1722 s->unrestricted_mv &&
1723 s->current_picture.reference &&
1725 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1728 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1729 int hshift = desc->log2_chroma_w;
1730 int vshift = desc->log2_chroma_h;
1731 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1732 s->h_edge_pos, s->v_edge_pos,
1733 EDGE_WIDTH, EDGE_WIDTH,
1734 EDGE_TOP | EDGE_BOTTOM);
1735 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1736 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1737 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1738 EDGE_TOP | EDGE_BOTTOM);
1739 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1740 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1741 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1742 EDGE_TOP | EDGE_BOTTOM);
1747 s->last_pict_type = s->pict_type;
1748 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1749 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1750 s->last_non_b_pict_type = s->pict_type;
1753 /* copy back current_picture variables */
1754 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1755 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1756 s->picture[i] = s->current_picture;
1760 assert(i < MAX_PICTURE_COUNT);
1764 /* release non-reference frames */
1765 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1766 if (!s->picture[i].reference)
1767 ff_mpeg_unref_picture(s, &s->picture[i]);
1770 // clear copies, to avoid confusion
1772 memset(&s->last_picture, 0, sizeof(Picture));
1773 memset(&s->next_picture, 0, sizeof(Picture));
1774 memset(&s->current_picture, 0, sizeof(Picture));
1776 s->avctx->coded_frame = &s->current_picture_ptr->f;
1778 if (s->current_picture.reference)
1779 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1783 * Draw a line from (ex, ey) -> (sx, sy).
1784 * @param w width of the image
1785 * @param h height of the image
1786 * @param stride stride/linesize of the image
1787 * @param color color of the arrow
1789 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1790 int w, int h, int stride, int color)
1794 sx = av_clip(sx, 0, w - 1);
1795 sy = av_clip(sy, 0, h - 1);
1796 ex = av_clip(ex, 0, w - 1);
1797 ey = av_clip(ey, 0, h - 1);
1799 buf[sy * stride + sx] += color;
1801 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1803 FFSWAP(int, sx, ex);
1804 FFSWAP(int, sy, ey);
1806 buf += sx + sy * stride;
1808 f = ((ey - sy) << 16) / ex;
1809 for (x = 0; x <= ex; x++) {
1811 fr = (x * f) & 0xFFFF;
1812 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1813 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1817 FFSWAP(int, sx, ex);
1818 FFSWAP(int, sy, ey);
1820 buf += sx + sy * stride;
1823 f = ((ex - sx) << 16) / ey;
1826 for(y= 0; y <= ey; y++){
1828 fr = (y*f) & 0xFFFF;
1829 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1830 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1836 * Draw an arrow from (ex, ey) -> (sx, sy).
1837 * @param w width of the image
1838 * @param h height of the image
1839 * @param stride stride/linesize of the image
1840 * @param color color of the arrow
1842 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1843 int ey, int w, int h, int stride, int color)
1847 sx = av_clip(sx, -100, w + 100);
1848 sy = av_clip(sy, -100, h + 100);
1849 ex = av_clip(ex, -100, w + 100);
1850 ey = av_clip(ey, -100, h + 100);
1855 if (dx * dx + dy * dy > 3 * 3) {
1858 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1860 // FIXME subpixel accuracy
1861 rx = ROUNDED_DIV(rx * 3 << 4, length);
1862 ry = ROUNDED_DIV(ry * 3 << 4, length);
1864 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1865 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1867 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1871 * Print debugging info for the given picture.
1873 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1875 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1877 if (avctx->hwaccel || !p || !p->mb_type
1878 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1882 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1885 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1886 av_get_picture_type_char(pict->pict_type));
1887 for (y = 0; y < mb_height; y++) {
1888 for (x = 0; x < mb_width; x++) {
1889 if (avctx->debug & FF_DEBUG_SKIP) {
1890 int count = mbskip_table[x + y * mb_stride];
1893 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1895 if (avctx->debug & FF_DEBUG_QP) {
1896 av_log(avctx, AV_LOG_DEBUG, "%2d",
1897 p->qscale_table[x + y * mb_stride]);
1899 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1900 int mb_type = p->mb_type[x + y * mb_stride];
1901 // Type & MV direction
1902 if (IS_PCM(mb_type))
1903 av_log(avctx, AV_LOG_DEBUG, "P");
1904 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1905 av_log(avctx, AV_LOG_DEBUG, "A");
1906 else if (IS_INTRA4x4(mb_type))
1907 av_log(avctx, AV_LOG_DEBUG, "i");
1908 else if (IS_INTRA16x16(mb_type))
1909 av_log(avctx, AV_LOG_DEBUG, "I");
1910 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1911 av_log(avctx, AV_LOG_DEBUG, "d");
1912 else if (IS_DIRECT(mb_type))
1913 av_log(avctx, AV_LOG_DEBUG, "D");
1914 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1915 av_log(avctx, AV_LOG_DEBUG, "g");
1916 else if (IS_GMC(mb_type))
1917 av_log(avctx, AV_LOG_DEBUG, "G");
1918 else if (IS_SKIP(mb_type))
1919 av_log(avctx, AV_LOG_DEBUG, "S");
1920 else if (!USES_LIST(mb_type, 1))
1921 av_log(avctx, AV_LOG_DEBUG, ">");
1922 else if (!USES_LIST(mb_type, 0))
1923 av_log(avctx, AV_LOG_DEBUG, "<");
1925 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1926 av_log(avctx, AV_LOG_DEBUG, "X");
1930 if (IS_8X8(mb_type))
1931 av_log(avctx, AV_LOG_DEBUG, "+");
1932 else if (IS_16X8(mb_type))
1933 av_log(avctx, AV_LOG_DEBUG, "-");
1934 else if (IS_8X16(mb_type))
1935 av_log(avctx, AV_LOG_DEBUG, "|");
1936 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1937 av_log(avctx, AV_LOG_DEBUG, " ");
1939 av_log(avctx, AV_LOG_DEBUG, "?");
1942 if (IS_INTERLACED(mb_type))
1943 av_log(avctx, AV_LOG_DEBUG, "=");
1945 av_log(avctx, AV_LOG_DEBUG, " ");
1948 av_log(avctx, AV_LOG_DEBUG, "\n");
1952 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1953 (avctx->debug_mv)) {
1954 const int shift = 1 + quarter_sample;
1958 int h_chroma_shift, v_chroma_shift, block_height;
1959 const int width = avctx->width;
1960 const int height = avctx->height;
1961 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1962 const int mv_stride = (mb_width << mv_sample_log2) +
1963 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1965 *low_delay = 0; // needed to see the vectors without trashing the buffers
1967 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1969 av_frame_make_writable(pict);
1971 pict->opaque = NULL;
1972 ptr = pict->data[0];
1973 block_height = 16 >> v_chroma_shift;
1975 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1977 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1978 const int mb_index = mb_x + mb_y * mb_stride;
1979 if ((avctx->debug_mv) && p->motion_val[0]) {
1981 for (type = 0; type < 3; type++) {
1985 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1986 (pict->pict_type!= AV_PICTURE_TYPE_P))
1991 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1992 (pict->pict_type!= AV_PICTURE_TYPE_B))
1997 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1998 (pict->pict_type!= AV_PICTURE_TYPE_B))
2003 if (!USES_LIST(p->mb_type[mb_index], direction))
2006 if (IS_8X8(p->mb_type[mb_index])) {
2008 for (i = 0; i < 4; i++) {
2009 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2010 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2011 int xy = (mb_x * 2 + (i & 1) +
2012 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2013 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2014 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2015 draw_arrow(ptr, sx, sy, mx, my, width,
2016 height, pict->linesize[0], 100);
2018 } else if (IS_16X8(p->mb_type[mb_index])) {
2020 for (i = 0; i < 2; i++) {
2021 int sx = mb_x * 16 + 8;
2022 int sy = mb_y * 16 + 4 + 8 * i;
2023 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2024 int mx = (p->motion_val[direction][xy][0] >> shift);
2025 int my = (p->motion_val[direction][xy][1] >> shift);
2027 if (IS_INTERLACED(p->mb_type[mb_index]))
2030 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2031 height, pict->linesize[0], 100);
2033 } else if (IS_8X16(p->mb_type[mb_index])) {
2035 for (i = 0; i < 2; i++) {
2036 int sx = mb_x * 16 + 4 + 8 * i;
2037 int sy = mb_y * 16 + 8;
2038 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2039 int mx = p->motion_val[direction][xy][0] >> shift;
2040 int my = p->motion_val[direction][xy][1] >> shift;
2042 if (IS_INTERLACED(p->mb_type[mb_index]))
2045 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2046 height, pict->linesize[0], 100);
2049 int sx= mb_x * 16 + 8;
2050 int sy= mb_y * 16 + 8;
2051 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2052 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2053 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2054 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2058 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2059 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2060 0x0101010101010101ULL;
2062 for (y = 0; y < block_height; y++) {
2063 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2064 (block_height * mb_y + y) *
2065 pict->linesize[1]) = c;
2066 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2067 (block_height * mb_y + y) *
2068 pict->linesize[2]) = c;
2071 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2073 int mb_type = p->mb_type[mb_index];
2076 #define COLOR(theta, r) \
2077 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2078 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2082 if (IS_PCM(mb_type)) {
2084 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2085 IS_INTRA16x16(mb_type)) {
2087 } else if (IS_INTRA4x4(mb_type)) {
2089 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2091 } else if (IS_DIRECT(mb_type)) {
2093 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2095 } else if (IS_GMC(mb_type)) {
2097 } else if (IS_SKIP(mb_type)) {
2099 } else if (!USES_LIST(mb_type, 1)) {
2101 } else if (!USES_LIST(mb_type, 0)) {
2104 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2108 u *= 0x0101010101010101ULL;
2109 v *= 0x0101010101010101ULL;
2110 for (y = 0; y < block_height; y++) {
2111 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2112 (block_height * mb_y + y) * pict->linesize[1]) = u;
2113 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2114 (block_height * mb_y + y) * pict->linesize[2]) = v;
2118 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2119 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2120 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2121 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2122 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2124 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2125 for (y = 0; y < 16; y++)
2126 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2127 pict->linesize[0]] ^= 0x80;
2129 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2130 int dm = 1 << (mv_sample_log2 - 2);
2131 for (i = 0; i < 4; i++) {
2132 int sx = mb_x * 16 + 8 * (i & 1);
2133 int sy = mb_y * 16 + 8 * (i >> 1);
2134 int xy = (mb_x * 2 + (i & 1) +
2135 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2137 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2138 if (mv[0] != mv[dm] ||
2139 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2140 for (y = 0; y < 8; y++)
2141 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2142 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2143 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2144 pict->linesize[0]) ^= 0x8080808080808080ULL;
2148 if (IS_INTERLACED(mb_type) &&
2149 avctx->codec->id == AV_CODEC_ID_H264) {
2153 mbskip_table[mb_index] = 0;
2159 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2161 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2162 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2165 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2167 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2168 int offset = 2*s->mb_stride + 1;
2170 return AVERROR(ENOMEM);
2171 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2172 ref->size -= offset;
2173 ref->data += offset;
2174 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2177 static inline int hpel_motion_lowres(MpegEncContext *s,
2178 uint8_t *dest, uint8_t *src,
2179 int field_based, int field_select,
2180 int src_x, int src_y,
2181 int width, int height, int stride,
2182 int h_edge_pos, int v_edge_pos,
2183 int w, int h, h264_chroma_mc_func *pix_op,
2184 int motion_x, int motion_y)
2186 const int lowres = s->avctx->lowres;
2187 const int op_index = FFMIN(lowres, 2);
2188 const int s_mask = (2 << lowres) - 1;
2192 if (s->quarter_sample) {
2197 sx = motion_x & s_mask;
2198 sy = motion_y & s_mask;
2199 src_x += motion_x >> lowres + 1;
2200 src_y += motion_y >> lowres + 1;
2202 src += src_y * stride + src_x;
2204 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2205 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2206 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2207 (h + 1) << field_based, src_x,
2208 src_y << field_based,
2211 src = s->edge_emu_buffer;
2215 sx = (sx << 2) >> lowres;
2216 sy = (sy << 2) >> lowres;
2219 pix_op[op_index](dest, src, stride, h, sx, sy);
2223 /* apply one mpeg motion vector to the three components */
2224 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2231 uint8_t **ref_picture,
2232 h264_chroma_mc_func *pix_op,
2233 int motion_x, int motion_y,
2236 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2237 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2239 const int lowres = s->avctx->lowres;
2240 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
2241 const int block_s = 8>>lowres;
2242 const int s_mask = (2 << lowres) - 1;
2243 const int h_edge_pos = s->h_edge_pos >> lowres;
2244 const int v_edge_pos = s->v_edge_pos >> lowres;
2245 linesize = s->current_picture.f.linesize[0] << field_based;
2246 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2248 // FIXME obviously not perfect but qpel will not work in lowres anyway
2249 if (s->quarter_sample) {
2255 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2258 sx = motion_x & s_mask;
2259 sy = motion_y & s_mask;
2260 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2261 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2263 if (s->out_format == FMT_H263) {
2264 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2265 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2266 uvsrc_x = src_x >> 1;
2267 uvsrc_y = src_y >> 1;
2268 } else if (s->out_format == FMT_H261) {
2269 // even chroma mv's are full pel in H261
2272 uvsx = (2 * mx) & s_mask;
2273 uvsy = (2 * my) & s_mask;
2274 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2275 uvsrc_y = mb_y * block_s + (my >> lowres);
2277 if(s->chroma_y_shift){
2282 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2283 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2285 if(s->chroma_x_shift){
2289 uvsy = motion_y & s_mask;
2291 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2294 uvsx = motion_x & s_mask;
2295 uvsy = motion_y & s_mask;
2302 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2303 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2304 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2306 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2307 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2308 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2309 linesize >> field_based, 17, 17 + field_based,
2310 src_x, src_y << field_based, h_edge_pos,
2312 ptr_y = s->edge_emu_buffer;
2313 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2314 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2315 s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2317 uvsrc_x, uvsrc_y << field_based,
2318 h_edge_pos >> 1, v_edge_pos >> 1);
2319 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2321 uvsrc_x, uvsrc_y << field_based,
2322 h_edge_pos >> 1, v_edge_pos >> 1);
2324 ptr_cr = uvbuf + 16;
2328 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2330 dest_y += s->linesize;
2331 dest_cb += s->uvlinesize;
2332 dest_cr += s->uvlinesize;
2336 ptr_y += s->linesize;
2337 ptr_cb += s->uvlinesize;
2338 ptr_cr += s->uvlinesize;
2341 sx = (sx << 2) >> lowres;
2342 sy = (sy << 2) >> lowres;
2343 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2345 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2346 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2347 uvsx = (uvsx << 2) >> lowres;
2348 uvsy = (uvsy << 2) >> lowres;
2350 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2351 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2354 // FIXME h261 lowres loop filter
2357 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2358 uint8_t *dest_cb, uint8_t *dest_cr,
2359 uint8_t **ref_picture,
2360 h264_chroma_mc_func * pix_op,
2363 const int lowres = s->avctx->lowres;
2364 const int op_index = FFMIN(lowres, 2);
2365 const int block_s = 8 >> lowres;
2366 const int s_mask = (2 << lowres) - 1;
2367 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2368 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2369 int emu = 0, src_x, src_y, offset, sx, sy;
2372 if (s->quarter_sample) {
2377 /* In case of 8X8, we construct a single chroma motion vector
2378 with a special rounding */
2379 mx = ff_h263_round_chroma(mx);
2380 my = ff_h263_round_chroma(my);
2384 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2385 src_y = s->mb_y * block_s + (my >> lowres + 1);
2387 offset = src_y * s->uvlinesize + src_x;
2388 ptr = ref_picture[1] + offset;
2389 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2390 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2391 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2392 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2393 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2394 ptr = s->edge_emu_buffer;
2398 sx = (sx << 2) >> lowres;
2399 sy = (sy << 2) >> lowres;
2400 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2402 ptr = ref_picture[2] + offset;
2404 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2405 src_x, src_y, h_edge_pos, v_edge_pos);
2406 ptr = s->edge_emu_buffer;
2408 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2412 * motion compensation of a single macroblock
2414 * @param dest_y luma destination pointer
2415 * @param dest_cb chroma cb/u destination pointer
2416 * @param dest_cr chroma cr/v destination pointer
2417 * @param dir direction (0->forward, 1->backward)
2418 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2419 * @param pix_op halfpel motion compensation function (average or put normally)
2420 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2422 static inline void MPV_motion_lowres(MpegEncContext *s,
2423 uint8_t *dest_y, uint8_t *dest_cb,
2425 int dir, uint8_t **ref_picture,
2426 h264_chroma_mc_func *pix_op)
2430 const int lowres = s->avctx->lowres;
2431 const int block_s = 8 >>lowres;
2436 switch (s->mv_type) {
2438 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2440 ref_picture, pix_op,
2441 s->mv[dir][0][0], s->mv[dir][0][1],
2447 for (i = 0; i < 4; i++) {
2448 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2449 s->linesize) * block_s,
2450 ref_picture[0], 0, 0,
2451 (2 * mb_x + (i & 1)) * block_s,
2452 (2 * mb_y + (i >> 1)) * block_s,
2453 s->width, s->height, s->linesize,
2454 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2455 block_s, block_s, pix_op,
2456 s->mv[dir][i][0], s->mv[dir][i][1]);
2458 mx += s->mv[dir][i][0];
2459 my += s->mv[dir][i][1];
2462 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2463 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2467 if (s->picture_structure == PICT_FRAME) {
2469 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2470 1, 0, s->field_select[dir][0],
2471 ref_picture, pix_op,
2472 s->mv[dir][0][0], s->mv[dir][0][1],
2475 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2476 1, 1, s->field_select[dir][1],
2477 ref_picture, pix_op,
2478 s->mv[dir][1][0], s->mv[dir][1][1],
2481 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2482 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2483 ref_picture = s->current_picture_ptr->f.data;
2486 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2487 0, 0, s->field_select[dir][0],
2488 ref_picture, pix_op,
2490 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2494 for (i = 0; i < 2; i++) {
2495 uint8_t **ref2picture;
2497 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2498 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2499 ref2picture = ref_picture;
2501 ref2picture = s->current_picture_ptr->f.data;
2504 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2505 0, 0, s->field_select[dir][i],
2506 ref2picture, pix_op,
2507 s->mv[dir][i][0], s->mv[dir][i][1] +
2508 2 * block_s * i, block_s, mb_y >> 1);
2510 dest_y += 2 * block_s * s->linesize;
2511 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2512 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2516 if (s->picture_structure == PICT_FRAME) {
2517 for (i = 0; i < 2; i++) {
2519 for (j = 0; j < 2; j++) {
2520 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2522 ref_picture, pix_op,
2523 s->mv[dir][2 * i + j][0],
2524 s->mv[dir][2 * i + j][1],
2527 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2530 for (i = 0; i < 2; i++) {
2531 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2532 0, 0, s->picture_structure != i + 1,
2533 ref_picture, pix_op,
2534 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2535 2 * block_s, mb_y >> 1);
2537 // after put we make avg of the same block
2538 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2540 // opposite parity is always in the same
2541 // frame if this is second field
2542 if (!s->first_field) {
2543 ref_picture = s->current_picture_ptr->f.data;
2554 * find the lowest MB row referenced in the MVs
2556 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2558 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2559 int my, off, i, mvs;
2561 if (s->picture_structure != PICT_FRAME || s->mcsel)
2564 switch (s->mv_type) {
2578 for (i = 0; i < mvs; i++) {
2579 my = s->mv[dir][i][1]<<qpel_shift;
2580 my_max = FFMAX(my_max, my);
2581 my_min = FFMIN(my_min, my);
2584 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2586 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2588 return s->mb_height-1;
2591 /* put block[] to dest[] */
2592 static inline void put_dct(MpegEncContext *s,
2593 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2595 s->dct_unquantize_intra(s, block, i, qscale);
2596 s->dsp.idct_put (dest, line_size, block);
2599 /* add block[] to dest[] */
2600 static inline void add_dct(MpegEncContext *s,
2601 int16_t *block, int i, uint8_t *dest, int line_size)
2603 if (s->block_last_index[i] >= 0) {
2604 s->dsp.idct_add (dest, line_size, block);
2608 static inline void add_dequant_dct(MpegEncContext *s,
2609 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2611 if (s->block_last_index[i] >= 0) {
2612 s->dct_unquantize_inter(s, block, i, qscale);
2614 s->dsp.idct_add (dest, line_size, block);
2619 * Clean dc, ac, coded_block for the current non-intra MB.
2621 void ff_clean_intra_table_entries(MpegEncContext *s)
2623 int wrap = s->b8_stride;
2624 int xy = s->block_index[0];
2627 s->dc_val[0][xy + 1 ] =
2628 s->dc_val[0][xy + wrap] =
2629 s->dc_val[0][xy + 1 + wrap] = 1024;
2631 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2632 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2633 if (s->msmpeg4_version>=3) {
2634 s->coded_block[xy ] =
2635 s->coded_block[xy + 1 ] =
2636 s->coded_block[xy + wrap] =
2637 s->coded_block[xy + 1 + wrap] = 0;
2640 wrap = s->mb_stride;
2641 xy = s->mb_x + s->mb_y * wrap;
2643 s->dc_val[2][xy] = 1024;
2645 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2646 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2648 s->mbintra_table[xy]= 0;
2651 /* generic function called after a macroblock has been parsed by the
2652 decoder or after it has been encoded by the encoder.
2654 Important variables used:
2655 s->mb_intra : true if intra macroblock
2656 s->mv_dir : motion vector direction
2657 s->mv_type : motion vector type
2658 s->mv : motion vector
2659 s->interlaced_dct : true if interlaced dct used (mpeg2)
2661 static av_always_inline
2662 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2663 int lowres_flag, int is_mpeg12)
2665 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2666 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2667 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2671 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2672 /* print DCT coefficients */
2674 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2676 for(j=0; j<64; j++){
2677 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2679 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2683 s->current_picture.qscale_table[mb_xy] = s->qscale;
2685 /* update DC predictors for P macroblocks */
2687 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2688 if(s->mbintra_table[mb_xy])
2689 ff_clean_intra_table_entries(s);
2693 s->last_dc[2] = 128 << s->intra_dc_precision;
2696 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2697 s->mbintra_table[mb_xy]=1;
2699 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2700 uint8_t *dest_y, *dest_cb, *dest_cr;
2701 int dct_linesize, dct_offset;
2702 op_pixels_func (*op_pix)[4];
2703 qpel_mc_func (*op_qpix)[16];
2704 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2705 const int uvlinesize = s->current_picture.f.linesize[1];
2706 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2707 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2709 /* avoid copy if macroblock skipped in last frame too */
2710 /* skip only during decoding as we might trash the buffers during encoding a bit */
2712 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2714 if (s->mb_skipped) {
2716 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2718 } else if(!s->current_picture.reference) {
2721 *mbskip_ptr = 0; /* not skipped */
2725 dct_linesize = linesize << s->interlaced_dct;
2726 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2730 dest_cb= s->dest[1];
2731 dest_cr= s->dest[2];
2733 dest_y = s->b_scratchpad;
2734 dest_cb= s->b_scratchpad+16*linesize;
2735 dest_cr= s->b_scratchpad+32*linesize;
2739 /* motion handling */
2740 /* decoding or more than one mb_type (MC was already done otherwise) */
2743 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2744 if (s->mv_dir & MV_DIR_FORWARD) {
2745 ff_thread_await_progress(&s->last_picture_ptr->tf,
2746 ff_MPV_lowest_referenced_row(s, 0),
2749 if (s->mv_dir & MV_DIR_BACKWARD) {
2750 ff_thread_await_progress(&s->next_picture_ptr->tf,
2751 ff_MPV_lowest_referenced_row(s, 1),
2757 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2759 if (s->mv_dir & MV_DIR_FORWARD) {
2760 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2761 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2763 if (s->mv_dir & MV_DIR_BACKWARD) {
2764 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2767 op_qpix= s->me.qpel_put;
2768 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2769 op_pix = s->hdsp.put_pixels_tab;
2771 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2773 if (s->mv_dir & MV_DIR_FORWARD) {
2774 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2775 op_pix = s->hdsp.avg_pixels_tab;
2776 op_qpix= s->me.qpel_avg;
2778 if (s->mv_dir & MV_DIR_BACKWARD) {
2779 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2784 /* skip dequant / idct if we are really late ;) */
2785 if(s->avctx->skip_idct){
2786 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2787 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2788 || s->avctx->skip_idct >= AVDISCARD_ALL)
2792 /* add dct residue */
2793 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2794 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2795 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2796 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2797 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2798 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2800 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2801 if (s->chroma_y_shift){
2802 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2803 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2807 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2808 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2809 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2810 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2813 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2814 add_dct(s, block[0], 0, dest_y , dct_linesize);
2815 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2816 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2817 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2819 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2820 if(s->chroma_y_shift){//Chroma420
2821 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2822 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2825 dct_linesize = uvlinesize << s->interlaced_dct;
2826 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2828 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2829 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2830 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2831 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2832 if(!s->chroma_x_shift){//Chroma444
2833 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2834 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2835 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2836 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2841 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2842 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2845 /* dct only in intra block */
2846 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2847 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2848 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2849 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2850 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2852 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2853 if(s->chroma_y_shift){
2854 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2855 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2859 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2860 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2861 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2862 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2866 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2867 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2868 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2869 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2871 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2872 if(s->chroma_y_shift){
2873 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2874 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2877 dct_linesize = uvlinesize << s->interlaced_dct;
2878 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2880 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2881 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2882 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2883 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2884 if(!s->chroma_x_shift){//Chroma444
2885 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2886 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2887 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2888 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2896 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2897 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2898 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2903 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2905 if(s->out_format == FMT_MPEG1) {
2906 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2907 else MPV_decode_mb_internal(s, block, 0, 1);
2910 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2911 else MPV_decode_mb_internal(s, block, 0, 0);
2915 * @param h is the normal height, this will be reduced automatically if needed for the last row
2917 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2918 Picture *last, int y, int h, int picture_structure,
2919 int first_field, int draw_edges, int low_delay,
2920 int v_edge_pos, int h_edge_pos)
2922 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2923 int hshift = desc->log2_chroma_w;
2924 int vshift = desc->log2_chroma_h;
2925 const int field_pic = picture_structure != PICT_FRAME;
2931 if (!avctx->hwaccel &&
2932 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2935 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2936 int *linesize = cur->f.linesize;
2937 int sides = 0, edge_h;
2938 if (y==0) sides |= EDGE_TOP;
2939 if (y + h >= v_edge_pos)
2940 sides |= EDGE_BOTTOM;
2942 edge_h= FFMIN(h, v_edge_pos - y);
2944 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2945 linesize[0], h_edge_pos, edge_h,
2946 EDGE_WIDTH, EDGE_WIDTH, sides);
2947 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2948 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2949 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2950 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2951 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2952 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2955 h = FFMIN(h, avctx->height - y);
2957 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2959 if (avctx->draw_horiz_band) {
2961 int offset[AV_NUM_DATA_POINTERS];
2964 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2965 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2972 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2973 picture_structure == PICT_FRAME &&
2974 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2975 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2978 offset[0]= y * src->linesize[0];
2980 offset[2]= (y >> vshift) * src->linesize[1];
2981 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2987 avctx->draw_horiz_band(avctx, src, offset,
2988 y, picture_structure, h);
2992 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2994 int draw_edges = s->unrestricted_mv && !s->intra_only;
2995 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2996 &s->last_picture, y, h, s->picture_structure,
2997 s->first_field, draw_edges, s->low_delay,
2998 s->v_edge_pos, s->h_edge_pos);
3001 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3002 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3003 const int uvlinesize = s->current_picture.f.linesize[1];
3004 const int mb_size= 4 - s->avctx->lowres;
3006 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3007 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3008 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3009 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3010 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3011 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3012 //block_index is not used by mpeg2, so it is not affected by chroma_format
3014 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3015 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3016 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3018 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3020 if(s->picture_structure==PICT_FRAME){
3021 s->dest[0] += s->mb_y * linesize << mb_size;
3022 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3023 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3025 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3026 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3027 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3028 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3034 * Permute an 8x8 block.
3035 * @param block the block which will be permuted according to the given permutation vector
3036 * @param permutation the permutation vector
3037 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3038 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3039 * (inverse) permutated to scantable order!
3041 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3047 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3049 for(i=0; i<=last; i++){
3050 const int j= scantable[i];
3055 for(i=0; i<=last; i++){
3056 const int j= scantable[i];
3057 const int perm_j= permutation[j];
3058 block[perm_j]= temp[j];
3062 void ff_mpeg_flush(AVCodecContext *avctx){
3064 MpegEncContext *s = avctx->priv_data;
3066 if(s==NULL || s->picture==NULL)
3069 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3070 ff_mpeg_unref_picture(s, &s->picture[i]);
3071 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3073 ff_mpeg_unref_picture(s, &s->current_picture);
3074 ff_mpeg_unref_picture(s, &s->last_picture);
3075 ff_mpeg_unref_picture(s, &s->next_picture);
3077 s->mb_x= s->mb_y= 0;
3080 s->parse_context.state= -1;
3081 s->parse_context.frame_start_found= 0;
3082 s->parse_context.overread= 0;
3083 s->parse_context.overread_index= 0;
3084 s->parse_context.index= 0;
3085 s->parse_context.last_index= 0;
3086 s->bitstream_buffer_size=0;
3090 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3091 int16_t *block, int n, int qscale)
3093 int i, level, nCoeffs;
3094 const uint16_t *quant_matrix;
3096 nCoeffs= s->block_last_index[n];
3098 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3099 /* XXX: only mpeg1 */
3100 quant_matrix = s->intra_matrix;
3101 for(i=1;i<=nCoeffs;i++) {
3102 int j= s->intra_scantable.permutated[i];
3107 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3108 level = (level - 1) | 1;
3111 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3112 level = (level - 1) | 1;
3119 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3120 int16_t *block, int n, int qscale)
3122 int i, level, nCoeffs;
3123 const uint16_t *quant_matrix;
3125 nCoeffs= s->block_last_index[n];
3127 quant_matrix = s->inter_matrix;
3128 for(i=0; i<=nCoeffs; i++) {
3129 int j= s->intra_scantable.permutated[i];
3134 level = (((level << 1) + 1) * qscale *
3135 ((int) (quant_matrix[j]))) >> 4;
3136 level = (level - 1) | 1;
3139 level = (((level << 1) + 1) * qscale *
3140 ((int) (quant_matrix[j]))) >> 4;
3141 level = (level - 1) | 1;
3148 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3149 int16_t *block, int n, int qscale)
3151 int i, level, nCoeffs;
3152 const uint16_t *quant_matrix;
3154 if(s->alternate_scan) nCoeffs= 63;
3155 else nCoeffs= s->block_last_index[n];
3157 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3158 quant_matrix = s->intra_matrix;
3159 for(i=1;i<=nCoeffs;i++) {
3160 int j= s->intra_scantable.permutated[i];
3165 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3168 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3175 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3176 int16_t *block, int n, int qscale)
3178 int i, level, nCoeffs;
3179 const uint16_t *quant_matrix;
3182 if(s->alternate_scan) nCoeffs= 63;
3183 else nCoeffs= s->block_last_index[n];
3185 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3187 quant_matrix = s->intra_matrix;
3188 for(i=1;i<=nCoeffs;i++) {
3189 int j= s->intra_scantable.permutated[i];
3194 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3197 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3206 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3207 int16_t *block, int n, int qscale)
3209 int i, level, nCoeffs;
3210 const uint16_t *quant_matrix;
3213 if(s->alternate_scan) nCoeffs= 63;
3214 else nCoeffs= s->block_last_index[n];
3216 quant_matrix = s->inter_matrix;
3217 for(i=0; i<=nCoeffs; i++) {
3218 int j= s->intra_scantable.permutated[i];
3223 level = (((level << 1) + 1) * qscale *
3224 ((int) (quant_matrix[j]))) >> 4;
3227 level = (((level << 1) + 1) * qscale *
3228 ((int) (quant_matrix[j]))) >> 4;
3237 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3238 int16_t *block, int n, int qscale)
3240 int i, level, qmul, qadd;
3243 av_assert2(s->block_last_index[n]>=0);
3248 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3249 qadd = (qscale - 1) | 1;
3256 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3258 for(i=1; i<=nCoeffs; i++) {
3262 level = level * qmul - qadd;
3264 level = level * qmul + qadd;
3271 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3272 int16_t *block, int n, int qscale)
3274 int i, level, qmul, qadd;
3277 av_assert2(s->block_last_index[n]>=0);
3279 qadd = (qscale - 1) | 1;
3282 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3284 for(i=0; i<=nCoeffs; i++) {
3288 level = level * qmul - qadd;
3290 level = level * qmul + qadd;
3298 * set qscale and update qscale dependent variables.
3300 void ff_set_qscale(MpegEncContext * s, int qscale)
3304 else if (qscale > 31)
3308 s->chroma_qscale= s->chroma_qscale_table[qscale];
3310 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3311 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3314 void ff_MPV_report_decode_progress(MpegEncContext *s)
3316 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3317 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3320 #if CONFIG_ERROR_RESILIENCE
3321 void ff_mpeg_er_frame_start(MpegEncContext *s)
3323 ERContext *er = &s->er;
3325 er->cur_pic = s->current_picture_ptr;
3326 er->last_pic = s->last_picture_ptr;
3327 er->next_pic = s->next_picture_ptr;
3329 er->pp_time = s->pp_time;
3330 er->pb_time = s->pb_time;
3331 er->quarter_sample = s->quarter_sample;
3332 er->partitioned_frame = s->partitioned_frame;
3334 ff_er_frame_start(er);
3336 #endif /* CONFIG_ERROR_RESILIENCE */