2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
35 #include "h264chroma.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
130 MpegEncContext *s = opaque;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
138 memcpy(s->mv, mv, sizeof(*mv));
140 ff_init_block_index(s);
141 ff_update_block_index(s);
143 s->dsp.clear_blocks(s->block[0]);
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
150 ff_MPV_decode_mb(s, s->block);
153 /* init common dct for both encoder and decoder */
154 av_cold int ff_dct_common_init(MpegEncContext *s)
156 ff_dsputil_init(&s->dsp, s->avctx);
157 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
158 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
159 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
161 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
162 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
163 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
164 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
165 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
166 if (s->flags & CODEC_FLAG_BITEXACT)
167 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
168 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
171 ff_MPV_common_init_x86(s);
173 ff_MPV_common_init_axp(s);
175 ff_MPV_common_init_arm(s);
177 ff_MPV_common_init_bfin(s);
179 ff_MPV_common_init_ppc(s);
182 /* load & permutate scantables
183 * note: only wmv uses different ones
185 if (s->alternate_scan) {
186 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
187 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
198 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
200 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
202 // edge emu needs blocksize + filter length - 1
203 // (= 17x17 for halfpel / 21x21 for h264)
204 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
205 // at uvlinesize. It supports only YUV420 so 24x24 is enough
206 // linesize * interlaced * MBsize
207 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
210 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
212 s->me.temp = s->me.scratchpad;
213 s->rd_scratchpad = s->me.scratchpad;
214 s->b_scratchpad = s->me.scratchpad;
215 s->obmc_scratchpad = s->me.scratchpad + 16;
219 av_freep(&s->edge_emu_buffer);
220 return AVERROR(ENOMEM);
224 * Allocate a frame buffer
226 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
231 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
233 s->codec_id != AV_CODEC_ID_MSS2)
234 r = ff_thread_get_buffer(s->avctx, &pic->tf,
235 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
237 pic->f.width = s->avctx->width;
238 pic->f.height = s->avctx->height;
239 pic->f.format = s->avctx->pix_fmt;
240 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
243 if (r < 0 || !pic->f.data[0]) {
244 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
249 if (s->avctx->hwaccel) {
250 assert(!pic->hwaccel_picture_private);
251 if (s->avctx->hwaccel->priv_data_size) {
252 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
253 if (!pic->hwaccel_priv_buf) {
254 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
257 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
261 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
262 s->uvlinesize != pic->f.linesize[1])) {
263 av_log(s->avctx, AV_LOG_ERROR,
264 "get_buffer() failed (stride changed)\n");
265 ff_mpeg_unref_picture(s, pic);
269 if (pic->f.linesize[1] != pic->f.linesize[2]) {
270 av_log(s->avctx, AV_LOG_ERROR,
271 "get_buffer() failed (uv stride mismatch)\n");
272 ff_mpeg_unref_picture(s, pic);
276 if (!s->edge_emu_buffer &&
277 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
278 av_log(s->avctx, AV_LOG_ERROR,
279 "get_buffer() failed to allocate context scratch buffers.\n");
280 ff_mpeg_unref_picture(s, pic);
287 static void free_picture_tables(Picture *pic)
291 pic->alloc_mb_width =
292 pic->alloc_mb_height = 0;
294 av_buffer_unref(&pic->mb_var_buf);
295 av_buffer_unref(&pic->mc_mb_var_buf);
296 av_buffer_unref(&pic->mb_mean_buf);
297 av_buffer_unref(&pic->mbskip_table_buf);
298 av_buffer_unref(&pic->qscale_table_buf);
299 av_buffer_unref(&pic->mb_type_buf);
301 for (i = 0; i < 2; i++) {
302 av_buffer_unref(&pic->motion_val_buf[i]);
303 av_buffer_unref(&pic->ref_index_buf[i]);
307 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
309 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
310 const int mb_array_size = s->mb_stride * s->mb_height;
311 const int b8_array_size = s->b8_stride * s->mb_height * 2;
315 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
316 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
317 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
319 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
320 return AVERROR(ENOMEM);
323 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
324 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
326 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
327 return AVERROR(ENOMEM);
330 if (s->out_format == FMT_H263 || s->encoding ||
331 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
332 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
333 int ref_index_size = 4 * mb_array_size;
335 for (i = 0; mv_size && i < 2; i++) {
336 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
338 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339 return AVERROR(ENOMEM);
343 pic->alloc_mb_width = s->mb_width;
344 pic->alloc_mb_height = s->mb_height;
349 static int make_tables_writable(Picture *pic)
352 #define MAKE_WRITABLE(table) \
355 (ret = av_buffer_make_writable(&pic->table)) < 0)\
359 MAKE_WRITABLE(mb_var_buf);
360 MAKE_WRITABLE(mc_mb_var_buf);
361 MAKE_WRITABLE(mb_mean_buf);
362 MAKE_WRITABLE(mbskip_table_buf);
363 MAKE_WRITABLE(qscale_table_buf);
364 MAKE_WRITABLE(mb_type_buf);
366 for (i = 0; i < 2; i++) {
367 MAKE_WRITABLE(motion_val_buf[i]);
368 MAKE_WRITABLE(ref_index_buf[i]);
375 * Allocate a Picture.
376 * The pixels are allocated/set by calling get_buffer() if shared = 0
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
382 if (pic->qscale_table_buf)
383 if ( pic->alloc_mb_width != s->mb_width
384 || pic->alloc_mb_height != s->mb_height)
385 free_picture_tables(pic);
388 assert(pic->f.data[0]);
391 assert(!pic->f.data[0]);
393 if (alloc_frame_buffer(s, pic) < 0)
396 s->linesize = pic->f.linesize[0];
397 s->uvlinesize = pic->f.linesize[1];
400 if (!pic->qscale_table_buf)
401 ret = alloc_picture_tables(s, pic);
403 ret = make_tables_writable(pic);
408 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
409 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410 pic->mb_mean = pic->mb_mean_buf->data;
413 pic->mbskip_table = pic->mbskip_table_buf->data;
414 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
417 if (pic->motion_val_buf[0]) {
418 for (i = 0; i < 2; i++) {
419 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420 pic->ref_index[i] = pic->ref_index_buf[i]->data;
426 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427 ff_mpeg_unref_picture(s, pic);
428 free_picture_tables(pic);
429 return AVERROR(ENOMEM);
433 * Deallocate a picture.
435 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
437 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
440 /* WM Image / Screen codecs allocate internal buffers with different
441 * dimensions / colorspaces; ignore user-defined callbacks for these. */
442 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
443 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
444 s->codec_id != AV_CODEC_ID_MSS2)
445 ff_thread_release_buffer(s->avctx, &pic->tf);
447 av_frame_unref(&pic->f);
449 av_buffer_unref(&pic->hwaccel_priv_buf);
451 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
454 static int update_picture_tables(Picture *dst, Picture *src)
458 #define UPDATE_TABLE(table)\
461 (!dst->table || dst->table->buffer != src->table->buffer)) {\
462 av_buffer_unref(&dst->table);\
463 dst->table = av_buffer_ref(src->table);\
465 free_picture_tables(dst);\
466 return AVERROR(ENOMEM);\
471 UPDATE_TABLE(mb_var_buf);
472 UPDATE_TABLE(mc_mb_var_buf);
473 UPDATE_TABLE(mb_mean_buf);
474 UPDATE_TABLE(mbskip_table_buf);
475 UPDATE_TABLE(qscale_table_buf);
476 UPDATE_TABLE(mb_type_buf);
477 for (i = 0; i < 2; i++) {
478 UPDATE_TABLE(motion_val_buf[i]);
479 UPDATE_TABLE(ref_index_buf[i]);
482 dst->mb_var = src->mb_var;
483 dst->mc_mb_var = src->mc_mb_var;
484 dst->mb_mean = src->mb_mean;
485 dst->mbskip_table = src->mbskip_table;
486 dst->qscale_table = src->qscale_table;
487 dst->mb_type = src->mb_type;
488 for (i = 0; i < 2; i++) {
489 dst->motion_val[i] = src->motion_val[i];
490 dst->ref_index[i] = src->ref_index[i];
493 dst->alloc_mb_width = src->alloc_mb_width;
494 dst->alloc_mb_height = src->alloc_mb_height;
499 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
503 av_assert0(!dst->f.buf[0]);
504 av_assert0(src->f.buf[0]);
508 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
512 ret = update_picture_tables(dst, src);
516 if (src->hwaccel_picture_private) {
517 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
518 if (!dst->hwaccel_priv_buf)
520 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
523 dst->field_picture = src->field_picture;
524 dst->mb_var_sum = src->mb_var_sum;
525 dst->mc_mb_var_sum = src->mc_mb_var_sum;
526 dst->b_frame_score = src->b_frame_score;
527 dst->needs_realloc = src->needs_realloc;
528 dst->reference = src->reference;
529 dst->shared = src->shared;
533 ff_mpeg_unref_picture(s, dst);
537 static int init_duplicate_context(MpegEncContext *s)
539 int y_size = s->b8_stride * (2 * s->mb_height + 1);
540 int c_size = s->mb_stride * (s->mb_height + 1);
541 int yc_size = y_size + 2 * c_size;
549 s->obmc_scratchpad = NULL;
552 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
553 ME_MAP_SIZE * sizeof(uint32_t), fail)
554 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
555 ME_MAP_SIZE * sizeof(uint32_t), fail)
556 if (s->avctx->noise_reduction) {
557 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
558 2 * 64 * sizeof(int), fail)
561 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
562 s->block = s->blocks[0];
564 for (i = 0; i < 12; i++) {
565 s->pblocks[i] = &s->block[i];
568 if (s->out_format == FMT_H263) {
570 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
571 yc_size * sizeof(int16_t) * 16, fail);
572 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
573 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
574 s->ac_val[2] = s->ac_val[1] + c_size;
579 return -1; // free() through ff_MPV_common_end()
582 static void free_duplicate_context(MpegEncContext *s)
587 av_freep(&s->edge_emu_buffer);
588 av_freep(&s->me.scratchpad);
592 s->obmc_scratchpad = NULL;
594 av_freep(&s->dct_error_sum);
595 av_freep(&s->me.map);
596 av_freep(&s->me.score_map);
597 av_freep(&s->blocks);
598 av_freep(&s->ac_val_base);
602 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
604 #define COPY(a) bak->a = src->a
605 COPY(edge_emu_buffer);
610 COPY(obmc_scratchpad);
617 COPY(me.map_generation);
629 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
633 // FIXME copy only needed parts
635 backup_duplicate_context(&bak, dst);
636 memcpy(dst, src, sizeof(MpegEncContext));
637 backup_duplicate_context(dst, &bak);
638 for (i = 0; i < 12; i++) {
639 dst->pblocks[i] = &dst->block[i];
641 if (!dst->edge_emu_buffer &&
642 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
643 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
644 "scratch buffers.\n");
647 // STOP_TIMER("update_duplicate_context")
648 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
652 int ff_mpeg_update_thread_context(AVCodecContext *dst,
653 const AVCodecContext *src)
656 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
663 // FIXME can parameters change on I-frames?
664 // in that case dst may need a reinit
665 if (!s->context_initialized) {
666 memcpy(s, s1, sizeof(MpegEncContext));
669 s->bitstream_buffer = NULL;
670 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
672 if (s1->context_initialized){
673 // s->picture_range_start += MAX_PICTURE_COUNT;
674 // s->picture_range_end += MAX_PICTURE_COUNT;
675 if((ret = ff_MPV_common_init(s)) < 0){
676 memset(s, 0, sizeof(MpegEncContext));
683 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
684 s->context_reinit = 0;
685 s->height = s1->height;
686 s->width = s1->width;
687 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
691 s->avctx->coded_height = s1->avctx->coded_height;
692 s->avctx->coded_width = s1->avctx->coded_width;
693 s->avctx->width = s1->avctx->width;
694 s->avctx->height = s1->avctx->height;
696 s->coded_picture_number = s1->coded_picture_number;
697 s->picture_number = s1->picture_number;
698 s->input_picture_number = s1->input_picture_number;
700 av_assert0(!s->picture || s->picture != s1->picture);
702 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
703 ff_mpeg_unref_picture(s, &s->picture[i]);
704 if (s1->picture[i].f.data[0] &&
705 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
709 #define UPDATE_PICTURE(pic)\
711 ff_mpeg_unref_picture(s, &s->pic);\
712 if (s1->pic.f.data[0])\
713 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
715 ret = update_picture_tables(&s->pic, &s1->pic);\
720 UPDATE_PICTURE(current_picture);
721 UPDATE_PICTURE(last_picture);
722 UPDATE_PICTURE(next_picture);
724 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
725 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
726 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
728 // Error/bug resilience
729 s->next_p_frame_damaged = s1->next_p_frame_damaged;
730 s->workaround_bugs = s1->workaround_bugs;
731 s->padding_bug_score = s1->padding_bug_score;
734 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
735 (char *) &s1->shape - (char *) &s1->time_increment_bits);
738 s->max_b_frames = s1->max_b_frames;
739 s->low_delay = s1->low_delay;
740 s->droppable = s1->droppable;
742 // DivX handling (doesn't work)
743 s->divx_packed = s1->divx_packed;
745 if (s1->bitstream_buffer) {
746 if (s1->bitstream_buffer_size +
747 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
748 av_fast_malloc(&s->bitstream_buffer,
749 &s->allocated_bitstream_buffer_size,
750 s1->allocated_bitstream_buffer_size);
751 s->bitstream_buffer_size = s1->bitstream_buffer_size;
752 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
753 s1->bitstream_buffer_size);
754 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
755 FF_INPUT_BUFFER_PADDING_SIZE);
758 // linesize dependend scratch buffer allocation
759 if (!s->edge_emu_buffer)
761 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
762 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
763 "scratch buffers.\n");
764 return AVERROR(ENOMEM);
767 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
768 "be allocated due to unknown size.\n");
771 // MPEG2/interlacing info
772 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
773 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
775 if (!s1->first_field) {
776 s->last_pict_type = s1->pict_type;
777 if (s1->current_picture_ptr)
778 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
780 if (s1->pict_type != AV_PICTURE_TYPE_B) {
781 s->last_non_b_pict_type = s1->pict_type;
789 * Set the given MpegEncContext to common defaults
790 * (same for encoding and decoding).
791 * The changed fields will not depend upon the
792 * prior state of the MpegEncContext.
794 void ff_MPV_common_defaults(MpegEncContext *s)
796 s->y_dc_scale_table =
797 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
798 s->chroma_qscale_table = ff_default_chroma_qscale_table;
799 s->progressive_frame = 1;
800 s->progressive_sequence = 1;
801 s->picture_structure = PICT_FRAME;
803 s->coded_picture_number = 0;
804 s->picture_number = 0;
805 s->input_picture_number = 0;
807 s->picture_in_gop_number = 0;
812 s->slice_context_count = 1;
816 * Set the given MpegEncContext to defaults for decoding.
817 * the changed fields will not depend upon
818 * the prior state of the MpegEncContext.
820 void ff_MPV_decode_defaults(MpegEncContext *s)
822 ff_MPV_common_defaults(s);
825 static int init_er(MpegEncContext *s)
827 ERContext *er = &s->er;
828 int mb_array_size = s->mb_height * s->mb_stride;
831 er->avctx = s->avctx;
834 er->mb_index2xy = s->mb_index2xy;
835 er->mb_num = s->mb_num;
836 er->mb_width = s->mb_width;
837 er->mb_height = s->mb_height;
838 er->mb_stride = s->mb_stride;
839 er->b8_stride = s->b8_stride;
841 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
842 er->error_status_table = av_mallocz(mb_array_size);
843 if (!er->er_temp_buffer || !er->error_status_table)
846 er->mbskip_table = s->mbskip_table;
847 er->mbintra_table = s->mbintra_table;
849 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
850 er->dc_val[i] = s->dc_val[i];
852 er->decode_mb = mpeg_er_decode_mb;
857 av_freep(&er->er_temp_buffer);
858 av_freep(&er->error_status_table);
859 return AVERROR(ENOMEM);
863 * Initialize and allocates MpegEncContext fields dependent on the resolution.
865 static int init_context_frame(MpegEncContext *s)
867 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
869 s->mb_width = (s->width + 15) / 16;
870 s->mb_stride = s->mb_width + 1;
871 s->b8_stride = s->mb_width * 2 + 1;
872 s->b4_stride = s->mb_width * 4 + 1;
873 mb_array_size = s->mb_height * s->mb_stride;
874 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
876 /* set default edge pos, will be overriden
877 * in decode_header if needed */
878 s->h_edge_pos = s->mb_width * 16;
879 s->v_edge_pos = s->mb_height * 16;
881 s->mb_num = s->mb_width * s->mb_height;
886 s->block_wrap[3] = s->b8_stride;
888 s->block_wrap[5] = s->mb_stride;
890 y_size = s->b8_stride * (2 * s->mb_height + 1);
891 c_size = s->mb_stride * (s->mb_height + 1);
892 yc_size = y_size + 2 * c_size;
894 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
895 for (y = 0; y < s->mb_height; y++)
896 for (x = 0; x < s->mb_width; x++)
897 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
899 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
902 /* Allocate MV tables */
903 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
904 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
905 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
907 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
909 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
910 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
911 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
912 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
913 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
914 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
916 /* Allocate MB type table */
917 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
919 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
921 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
922 mb_array_size * sizeof(float), fail);
923 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
924 mb_array_size * sizeof(float), fail);
928 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
929 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
930 /* interlaced direct mode decoding tables */
931 for (i = 0; i < 2; i++) {
933 for (j = 0; j < 2; j++) {
934 for (k = 0; k < 2; k++) {
935 FF_ALLOCZ_OR_GOTO(s->avctx,
936 s->b_field_mv_table_base[i][j][k],
937 mv_table_size * 2 * sizeof(int16_t),
939 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
942 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
944 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
946 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
949 if (s->out_format == FMT_H263) {
951 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
952 s->coded_block = s->coded_block_base + s->b8_stride + 1;
954 /* cbp, ac_pred, pred_dir */
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
959 if (s->h263_pred || s->h263_plus || !s->encoding) {
961 // MN: we need these for error resilience of intra-frames
962 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
963 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
964 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
965 s->dc_val[2] = s->dc_val[1] + c_size;
966 for (i = 0; i < yc_size; i++)
967 s->dc_val_base[i] = 1024;
970 /* which mb is a intra block */
971 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
972 memset(s->mbintra_table, 1, mb_array_size);
974 /* init macroblock skip table */
975 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
976 // Note the + 1 is for a quicker mpeg4 slice_end detection
980 return AVERROR(ENOMEM);
984 * init common structure for both encoder and decoder.
985 * this assumes that some variables like width/height are already set
987 av_cold int ff_MPV_common_init(MpegEncContext *s)
990 int nb_slices = (HAVE_THREADS &&
991 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
992 s->avctx->thread_count : 1;
994 if (s->encoding && s->avctx->slices)
995 nb_slices = s->avctx->slices;
997 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
998 s->mb_height = (s->height + 31) / 32 * 2;
1000 s->mb_height = (s->height + 15) / 16;
1002 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1003 av_log(s->avctx, AV_LOG_ERROR,
1004 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1008 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1011 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1013 max_slices = MAX_THREADS;
1014 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1015 " reducing to %d\n", nb_slices, max_slices);
1016 nb_slices = max_slices;
1019 if ((s->width || s->height) &&
1020 av_image_check_size(s->width, s->height, 0, s->avctx))
1023 ff_dct_common_init(s);
1025 s->flags = s->avctx->flags;
1026 s->flags2 = s->avctx->flags2;
1028 /* set chroma shifts */
1029 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
1031 /* convert fourcc to upper case */
1032 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1033 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1035 s->avctx->coded_frame = &s->current_picture.f;
1038 if (s->msmpeg4_version) {
1039 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1040 2 * 2 * (MAX_LEVEL + 1) *
1041 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1043 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1045 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1046 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1047 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1048 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1054 if (s->avctx->noise_reduction) {
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1059 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1060 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1061 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1062 avcodec_get_frame_defaults(&s->picture[i].f);
1064 memset(&s->next_picture, 0, sizeof(s->next_picture));
1065 memset(&s->last_picture, 0, sizeof(s->last_picture));
1066 memset(&s->current_picture, 0, sizeof(s->current_picture));
1067 avcodec_get_frame_defaults(&s->next_picture.f);
1068 avcodec_get_frame_defaults(&s->last_picture.f);
1069 avcodec_get_frame_defaults(&s->current_picture.f);
1071 if (init_context_frame(s))
1074 s->parse_context.state = -1;
1076 s->context_initialized = 1;
1077 s->thread_context[0] = s;
1079 // if (s->width && s->height) {
1080 if (nb_slices > 1) {
1081 for (i = 1; i < nb_slices; i++) {
1082 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1083 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1086 for (i = 0; i < nb_slices; i++) {
1087 if (init_duplicate_context(s->thread_context[i]) < 0)
1089 s->thread_context[i]->start_mb_y =
1090 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1091 s->thread_context[i]->end_mb_y =
1092 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1095 if (init_duplicate_context(s) < 0)
1098 s->end_mb_y = s->mb_height;
1100 s->slice_context_count = nb_slices;
1105 ff_MPV_common_end(s);
1110 * Frees and resets MpegEncContext fields depending on the resolution.
1111 * Is used during resolution changes to avoid a full reinitialization of the
1114 static int free_context_frame(MpegEncContext *s)
1118 av_freep(&s->mb_type);
1119 av_freep(&s->p_mv_table_base);
1120 av_freep(&s->b_forw_mv_table_base);
1121 av_freep(&s->b_back_mv_table_base);
1122 av_freep(&s->b_bidir_forw_mv_table_base);
1123 av_freep(&s->b_bidir_back_mv_table_base);
1124 av_freep(&s->b_direct_mv_table_base);
1125 s->p_mv_table = NULL;
1126 s->b_forw_mv_table = NULL;
1127 s->b_back_mv_table = NULL;
1128 s->b_bidir_forw_mv_table = NULL;
1129 s->b_bidir_back_mv_table = NULL;
1130 s->b_direct_mv_table = NULL;
1131 for (i = 0; i < 2; i++) {
1132 for (j = 0; j < 2; j++) {
1133 for (k = 0; k < 2; k++) {
1134 av_freep(&s->b_field_mv_table_base[i][j][k]);
1135 s->b_field_mv_table[i][j][k] = NULL;
1137 av_freep(&s->b_field_select_table[i][j]);
1138 av_freep(&s->p_field_mv_table_base[i][j]);
1139 s->p_field_mv_table[i][j] = NULL;
1141 av_freep(&s->p_field_select_table[i]);
1144 av_freep(&s->dc_val_base);
1145 av_freep(&s->coded_block_base);
1146 av_freep(&s->mbintra_table);
1147 av_freep(&s->cbp_table);
1148 av_freep(&s->pred_dir_table);
1150 av_freep(&s->mbskip_table);
1152 av_freep(&s->er.error_status_table);
1153 av_freep(&s->er.er_temp_buffer);
1154 av_freep(&s->mb_index2xy);
1155 av_freep(&s->lambda_table);
1157 av_freep(&s->cplx_tab);
1158 av_freep(&s->bits_tab);
1160 s->linesize = s->uvlinesize = 0;
1165 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1169 if (s->slice_context_count > 1) {
1170 for (i = 0; i < s->slice_context_count; i++) {
1171 free_duplicate_context(s->thread_context[i]);
1173 for (i = 1; i < s->slice_context_count; i++) {
1174 av_freep(&s->thread_context[i]);
1177 free_duplicate_context(s);
1179 if ((err = free_context_frame(s)) < 0)
1183 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1184 s->picture[i].needs_realloc = 1;
1187 s->last_picture_ptr =
1188 s->next_picture_ptr =
1189 s->current_picture_ptr = NULL;
1192 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1193 s->mb_height = (s->height + 31) / 32 * 2;
1195 s->mb_height = (s->height + 15) / 16;
1197 if ((s->width || s->height) &&
1198 av_image_check_size(s->width, s->height, 0, s->avctx))
1199 return AVERROR_INVALIDDATA;
1201 if ((err = init_context_frame(s)))
1204 s->thread_context[0] = s;
1206 if (s->width && s->height) {
1207 int nb_slices = s->slice_context_count;
1208 if (nb_slices > 1) {
1209 for (i = 1; i < nb_slices; i++) {
1210 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1211 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1214 for (i = 0; i < nb_slices; i++) {
1215 if (init_duplicate_context(s->thread_context[i]) < 0)
1217 s->thread_context[i]->start_mb_y =
1218 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1219 s->thread_context[i]->end_mb_y =
1220 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1223 if (init_duplicate_context(s) < 0)
1226 s->end_mb_y = s->mb_height;
1228 s->slice_context_count = nb_slices;
1233 ff_MPV_common_end(s);
1237 /* init common structure for both encoder and decoder */
1238 void ff_MPV_common_end(MpegEncContext *s)
1242 if (s->slice_context_count > 1) {
1243 for (i = 0; i < s->slice_context_count; i++) {
1244 free_duplicate_context(s->thread_context[i]);
1246 for (i = 1; i < s->slice_context_count; i++) {
1247 av_freep(&s->thread_context[i]);
1249 s->slice_context_count = 1;
1250 } else free_duplicate_context(s);
1252 av_freep(&s->parse_context.buffer);
1253 s->parse_context.buffer_size = 0;
1255 av_freep(&s->bitstream_buffer);
1256 s->allocated_bitstream_buffer_size = 0;
1258 av_freep(&s->avctx->stats_out);
1259 av_freep(&s->ac_stats);
1261 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1262 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1263 s->q_chroma_intra_matrix= NULL;
1264 s->q_chroma_intra_matrix16= NULL;
1265 av_freep(&s->q_intra_matrix);
1266 av_freep(&s->q_inter_matrix);
1267 av_freep(&s->q_intra_matrix16);
1268 av_freep(&s->q_inter_matrix16);
1269 av_freep(&s->input_picture);
1270 av_freep(&s->reordered_input_picture);
1271 av_freep(&s->dct_offset);
1274 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1275 free_picture_tables(&s->picture[i]);
1276 ff_mpeg_unref_picture(s, &s->picture[i]);
1279 av_freep(&s->picture);
1280 free_picture_tables(&s->last_picture);
1281 ff_mpeg_unref_picture(s, &s->last_picture);
1282 free_picture_tables(&s->current_picture);
1283 ff_mpeg_unref_picture(s, &s->current_picture);
1284 free_picture_tables(&s->next_picture);
1285 ff_mpeg_unref_picture(s, &s->next_picture);
1286 free_picture_tables(&s->new_picture);
1287 ff_mpeg_unref_picture(s, &s->new_picture);
1289 free_context_frame(s);
1291 s->context_initialized = 0;
1292 s->last_picture_ptr =
1293 s->next_picture_ptr =
1294 s->current_picture_ptr = NULL;
1295 s->linesize = s->uvlinesize = 0;
1298 av_cold void ff_init_rl(RLTable *rl,
1299 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1301 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1302 uint8_t index_run[MAX_RUN + 1];
1303 int last, run, level, start, end, i;
1305 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1306 if (static_store && rl->max_level[0])
1309 /* compute max_level[], max_run[] and index_run[] */
1310 for (last = 0; last < 2; last++) {
1319 memset(max_level, 0, MAX_RUN + 1);
1320 memset(max_run, 0, MAX_LEVEL + 1);
1321 memset(index_run, rl->n, MAX_RUN + 1);
1322 for (i = start; i < end; i++) {
1323 run = rl->table_run[i];
1324 level = rl->table_level[i];
1325 if (index_run[run] == rl->n)
1327 if (level > max_level[run])
1328 max_level[run] = level;
1329 if (run > max_run[level])
1330 max_run[level] = run;
1333 rl->max_level[last] = static_store[last];
1335 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1336 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1338 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1340 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1341 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1343 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1345 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1346 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1350 av_cold void ff_init_vlc_rl(RLTable *rl)
1354 for (q = 0; q < 32; q++) {
1356 int qadd = (q - 1) | 1;
1362 for (i = 0; i < rl->vlc.table_size; i++) {
1363 int code = rl->vlc.table[i][0];
1364 int len = rl->vlc.table[i][1];
1367 if (len == 0) { // illegal code
1370 } else if (len < 0) { // more bits needed
1374 if (code == rl->n) { // esc
1378 run = rl->table_run[code] + 1;
1379 level = rl->table_level[code] * qmul + qadd;
1380 if (code >= rl->last) run += 192;
1383 rl->rl_vlc[q][i].len = len;
1384 rl->rl_vlc[q][i].level = level;
1385 rl->rl_vlc[q][i].run = run;
1390 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1394 /* release non reference frames */
1395 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1396 if (!s->picture[i].reference &&
1397 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1398 ff_mpeg_unref_picture(s, &s->picture[i]);
1403 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1405 if (pic == s->last_picture_ptr)
1407 if (pic->f.data[0] == NULL)
1409 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1414 static int find_unused_picture(MpegEncContext *s, int shared)
1419 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1420 if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1424 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1425 if (pic_is_unused(s, &s->picture[i]))
1430 av_log(s->avctx, AV_LOG_FATAL,
1431 "Internal error, picture buffer overflow\n");
1432 /* We could return -1, but the codec would crash trying to draw into a
1433 * non-existing frame anyway. This is safer than waiting for a random crash.
1434 * Also the return of this is never useful, an encoder must only allocate
1435 * as much as allowed in the specification. This has no relationship to how
1436 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1437 * enough for such valid streams).
1438 * Plus, a decoder has to check stream validity and remove frames if too
1439 * many reference frames are around. Waiting for "OOM" is not correct at
1440 * all. Similarly, missing reference frames have to be replaced by
1441 * interpolated/MC frames, anything else is a bug in the codec ...
1447 int ff_find_unused_picture(MpegEncContext *s, int shared)
1449 int ret = find_unused_picture(s, shared);
1451 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1452 if (s->picture[ret].needs_realloc) {
1453 s->picture[ret].needs_realloc = 0;
1454 free_picture_tables(&s->picture[ret]);
1455 ff_mpeg_unref_picture(s, &s->picture[ret]);
1456 avcodec_get_frame_defaults(&s->picture[ret].f);
1462 static void update_noise_reduction(MpegEncContext *s)
1466 for (intra = 0; intra < 2; intra++) {
1467 if (s->dct_count[intra] > (1 << 16)) {
1468 for (i = 0; i < 64; i++) {
1469 s->dct_error_sum[intra][i] >>= 1;
1471 s->dct_count[intra] >>= 1;
1474 for (i = 0; i < 64; i++) {
1475 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1476 s->dct_count[intra] +
1477 s->dct_error_sum[intra][i] / 2) /
1478 (s->dct_error_sum[intra][i] + 1);
1484 * generic function for encode/decode called after coding/decoding
1485 * the header and before a frame is coded/decoded.
1487 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1493 if (!ff_thread_can_start_frame(avctx)) {
1494 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1498 /* mark & release old frames */
1499 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1500 s->last_picture_ptr != s->next_picture_ptr &&
1501 s->last_picture_ptr->f.data[0]) {
1502 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1505 /* release forgotten pictures */
1506 /* if (mpeg124/h263) */
1508 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1509 if (&s->picture[i] != s->last_picture_ptr &&
1510 &s->picture[i] != s->next_picture_ptr &&
1511 s->picture[i].reference && !s->picture[i].needs_realloc) {
1512 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1513 av_log(avctx, AV_LOG_ERROR,
1514 "releasing zombie picture\n");
1515 ff_mpeg_unref_picture(s, &s->picture[i]);
1521 ff_release_unused_pictures(s, 1);
1523 if (s->current_picture_ptr &&
1524 s->current_picture_ptr->f.data[0] == NULL) {
1525 // we already have a unused image
1526 // (maybe it was set before reading the header)
1527 pic = s->current_picture_ptr;
1529 i = ff_find_unused_picture(s, 0);
1531 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1534 pic = &s->picture[i];
1538 if (!s->droppable) {
1539 if (s->pict_type != AV_PICTURE_TYPE_B)
1543 pic->f.coded_picture_number = s->coded_picture_number++;
1545 if (ff_alloc_picture(s, pic, 0) < 0)
1548 s->current_picture_ptr = pic;
1549 // FIXME use only the vars from current_pic
1550 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1551 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1552 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1553 if (s->picture_structure != PICT_FRAME)
1554 s->current_picture_ptr->f.top_field_first =
1555 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1557 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1558 !s->progressive_sequence;
1559 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1562 s->current_picture_ptr->f.pict_type = s->pict_type;
1563 // if (s->flags && CODEC_FLAG_QSCALE)
1564 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1565 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1567 ff_mpeg_unref_picture(s, &s->current_picture);
1568 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1569 s->current_picture_ptr)) < 0)
1572 if (s->pict_type != AV_PICTURE_TYPE_B) {
1573 s->last_picture_ptr = s->next_picture_ptr;
1575 s->next_picture_ptr = s->current_picture_ptr;
1577 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1578 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1579 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1580 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1581 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1582 s->pict_type, s->droppable);
1584 if ((s->last_picture_ptr == NULL ||
1585 s->last_picture_ptr->f.data[0] == NULL) &&
1586 (s->pict_type != AV_PICTURE_TYPE_I ||
1587 s->picture_structure != PICT_FRAME)) {
1588 int h_chroma_shift, v_chroma_shift;
1589 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1590 &h_chroma_shift, &v_chroma_shift);
1591 if (s->pict_type != AV_PICTURE_TYPE_I)
1592 av_log(avctx, AV_LOG_ERROR,
1593 "warning: first frame is no keyframe\n");
1594 else if (s->picture_structure != PICT_FRAME)
1595 av_log(avctx, AV_LOG_INFO,
1596 "allocate dummy last picture for field based first keyframe\n");
1598 /* Allocate a dummy frame */
1599 i = ff_find_unused_picture(s, 0);
1601 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1604 s->last_picture_ptr = &s->picture[i];
1605 s->last_picture_ptr->f.key_frame = 0;
1606 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1607 s->last_picture_ptr = NULL;
1611 memset(s->last_picture_ptr->f.data[0], 0x80,
1612 avctx->height * s->last_picture_ptr->f.linesize[0]);
1613 memset(s->last_picture_ptr->f.data[1], 0x80,
1614 (avctx->height >> v_chroma_shift) *
1615 s->last_picture_ptr->f.linesize[1]);
1616 memset(s->last_picture_ptr->f.data[2], 0x80,
1617 (avctx->height >> v_chroma_shift) *
1618 s->last_picture_ptr->f.linesize[2]);
1620 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1621 for(i=0; i<avctx->height; i++)
1622 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1625 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1626 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1628 if ((s->next_picture_ptr == NULL ||
1629 s->next_picture_ptr->f.data[0] == NULL) &&
1630 s->pict_type == AV_PICTURE_TYPE_B) {
1631 /* Allocate a dummy frame */
1632 i = ff_find_unused_picture(s, 0);
1634 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1637 s->next_picture_ptr = &s->picture[i];
1638 s->next_picture_ptr->f.key_frame = 0;
1639 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1640 s->next_picture_ptr = NULL;
1643 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1644 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1647 #if 0 // BUFREF-FIXME
1648 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1649 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1651 if (s->last_picture_ptr) {
1652 ff_mpeg_unref_picture(s, &s->last_picture);
1653 if (s->last_picture_ptr->f.data[0] &&
1654 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1655 s->last_picture_ptr)) < 0)
1658 if (s->next_picture_ptr) {
1659 ff_mpeg_unref_picture(s, &s->next_picture);
1660 if (s->next_picture_ptr->f.data[0] &&
1661 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1662 s->next_picture_ptr)) < 0)
1666 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1667 s->last_picture_ptr->f.data[0]));
1669 if (s->picture_structure!= PICT_FRAME) {
1671 for (i = 0; i < 4; i++) {
1672 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1673 s->current_picture.f.data[i] +=
1674 s->current_picture.f.linesize[i];
1676 s->current_picture.f.linesize[i] *= 2;
1677 s->last_picture.f.linesize[i] *= 2;
1678 s->next_picture.f.linesize[i] *= 2;
1682 s->err_recognition = avctx->err_recognition;
1684 /* set dequantizer, we can't do it during init as
1685 * it might change for mpeg4 and we can't do it in the header
1686 * decode as init is not called for mpeg4 there yet */
1687 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1688 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1689 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1690 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1691 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1692 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1694 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1695 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1698 if (s->dct_error_sum) {
1699 av_assert2(s->avctx->noise_reduction && s->encoding);
1700 update_noise_reduction(s);
1703 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1704 return ff_xvmc_field_start(s, avctx);
1709 /* generic function for encode/decode called after a
1710 * frame has been coded/decoded. */
1711 void ff_MPV_frame_end(MpegEncContext *s)
1714 /* redraw edges for the frame if decoding didn't complete */
1715 // just to make sure that all data is rendered.
1716 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1717 ff_xvmc_field_end(s);
1718 } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1719 !s->avctx->hwaccel &&
1720 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1721 s->unrestricted_mv &&
1722 s->current_picture.reference &&
1724 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1727 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1728 int hshift = desc->log2_chroma_w;
1729 int vshift = desc->log2_chroma_h;
1730 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1731 s->h_edge_pos, s->v_edge_pos,
1732 EDGE_WIDTH, EDGE_WIDTH,
1733 EDGE_TOP | EDGE_BOTTOM);
1734 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1735 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1736 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1737 EDGE_TOP | EDGE_BOTTOM);
1738 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1739 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1740 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1741 EDGE_TOP | EDGE_BOTTOM);
1746 s->last_pict_type = s->pict_type;
1747 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1748 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1749 s->last_non_b_pict_type = s->pict_type;
1752 /* copy back current_picture variables */
1753 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1754 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1755 s->picture[i] = s->current_picture;
1759 assert(i < MAX_PICTURE_COUNT);
1763 /* release non-reference frames */
1764 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1765 if (!s->picture[i].reference)
1766 ff_mpeg_unref_picture(s, &s->picture[i]);
1769 // clear copies, to avoid confusion
1771 memset(&s->last_picture, 0, sizeof(Picture));
1772 memset(&s->next_picture, 0, sizeof(Picture));
1773 memset(&s->current_picture, 0, sizeof(Picture));
1775 s->avctx->coded_frame = &s->current_picture_ptr->f;
1777 if (s->current_picture.reference)
1778 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1782 * Draw a line from (ex, ey) -> (sx, sy).
1783 * @param w width of the image
1784 * @param h height of the image
1785 * @param stride stride/linesize of the image
1786 * @param color color of the arrow
1788 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1789 int w, int h, int stride, int color)
1793 sx = av_clip(sx, 0, w - 1);
1794 sy = av_clip(sy, 0, h - 1);
1795 ex = av_clip(ex, 0, w - 1);
1796 ey = av_clip(ey, 0, h - 1);
1798 buf[sy * stride + sx] += color;
1800 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1802 FFSWAP(int, sx, ex);
1803 FFSWAP(int, sy, ey);
1805 buf += sx + sy * stride;
1807 f = ((ey - sy) << 16) / ex;
1808 for (x = 0; x <= ex; x++) {
1810 fr = (x * f) & 0xFFFF;
1811 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1812 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1816 FFSWAP(int, sx, ex);
1817 FFSWAP(int, sy, ey);
1819 buf += sx + sy * stride;
1822 f = ((ex - sx) << 16) / ey;
1825 for(y= 0; y <= ey; y++){
1827 fr = (y*f) & 0xFFFF;
1828 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1829 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1835 * Draw an arrow from (ex, ey) -> (sx, sy).
1836 * @param w width of the image
1837 * @param h height of the image
1838 * @param stride stride/linesize of the image
1839 * @param color color of the arrow
1841 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1842 int ey, int w, int h, int stride, int color)
1846 sx = av_clip(sx, -100, w + 100);
1847 sy = av_clip(sy, -100, h + 100);
1848 ex = av_clip(ex, -100, w + 100);
1849 ey = av_clip(ey, -100, h + 100);
1854 if (dx * dx + dy * dy > 3 * 3) {
1857 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1859 // FIXME subpixel accuracy
1860 rx = ROUNDED_DIV(rx * 3 << 4, length);
1861 ry = ROUNDED_DIV(ry * 3 << 4, length);
1863 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1864 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1866 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1870 * Print debugging info for the given picture.
1872 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1874 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1876 if (avctx->hwaccel || !p || !p->mb_type
1877 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1881 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1884 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1885 av_get_picture_type_char(pict->pict_type));
1886 for (y = 0; y < mb_height; y++) {
1887 for (x = 0; x < mb_width; x++) {
1888 if (avctx->debug & FF_DEBUG_SKIP) {
1889 int count = mbskip_table[x + y * mb_stride];
1892 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1894 if (avctx->debug & FF_DEBUG_QP) {
1895 av_log(avctx, AV_LOG_DEBUG, "%2d",
1896 p->qscale_table[x + y * mb_stride]);
1898 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1899 int mb_type = p->mb_type[x + y * mb_stride];
1900 // Type & MV direction
1901 if (IS_PCM(mb_type))
1902 av_log(avctx, AV_LOG_DEBUG, "P");
1903 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1904 av_log(avctx, AV_LOG_DEBUG, "A");
1905 else if (IS_INTRA4x4(mb_type))
1906 av_log(avctx, AV_LOG_DEBUG, "i");
1907 else if (IS_INTRA16x16(mb_type))
1908 av_log(avctx, AV_LOG_DEBUG, "I");
1909 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1910 av_log(avctx, AV_LOG_DEBUG, "d");
1911 else if (IS_DIRECT(mb_type))
1912 av_log(avctx, AV_LOG_DEBUG, "D");
1913 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1914 av_log(avctx, AV_LOG_DEBUG, "g");
1915 else if (IS_GMC(mb_type))
1916 av_log(avctx, AV_LOG_DEBUG, "G");
1917 else if (IS_SKIP(mb_type))
1918 av_log(avctx, AV_LOG_DEBUG, "S");
1919 else if (!USES_LIST(mb_type, 1))
1920 av_log(avctx, AV_LOG_DEBUG, ">");
1921 else if (!USES_LIST(mb_type, 0))
1922 av_log(avctx, AV_LOG_DEBUG, "<");
1924 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1925 av_log(avctx, AV_LOG_DEBUG, "X");
1929 if (IS_8X8(mb_type))
1930 av_log(avctx, AV_LOG_DEBUG, "+");
1931 else if (IS_16X8(mb_type))
1932 av_log(avctx, AV_LOG_DEBUG, "-");
1933 else if (IS_8X16(mb_type))
1934 av_log(avctx, AV_LOG_DEBUG, "|");
1935 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1936 av_log(avctx, AV_LOG_DEBUG, " ");
1938 av_log(avctx, AV_LOG_DEBUG, "?");
1941 if (IS_INTERLACED(mb_type))
1942 av_log(avctx, AV_LOG_DEBUG, "=");
1944 av_log(avctx, AV_LOG_DEBUG, " ");
1947 av_log(avctx, AV_LOG_DEBUG, "\n");
1951 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1952 (avctx->debug_mv)) {
1953 const int shift = 1 + quarter_sample;
1957 int h_chroma_shift, v_chroma_shift, block_height;
1958 const int width = avctx->width;
1959 const int height = avctx->height;
1960 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1961 const int mv_stride = (mb_width << mv_sample_log2) +
1962 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1964 *low_delay = 0; // needed to see the vectors without trashing the buffers
1966 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1968 av_frame_make_writable(pict);
1970 pict->opaque = NULL;
1971 ptr = pict->data[0];
1972 block_height = 16 >> v_chroma_shift;
1974 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1976 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1977 const int mb_index = mb_x + mb_y * mb_stride;
1978 if ((avctx->debug_mv) && p->motion_val[0]) {
1980 for (type = 0; type < 3; type++) {
1984 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1985 (pict->pict_type!= AV_PICTURE_TYPE_P))
1990 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1991 (pict->pict_type!= AV_PICTURE_TYPE_B))
1996 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1997 (pict->pict_type!= AV_PICTURE_TYPE_B))
2002 if (!USES_LIST(p->mb_type[mb_index], direction))
2005 if (IS_8X8(p->mb_type[mb_index])) {
2007 for (i = 0; i < 4; i++) {
2008 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2009 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2010 int xy = (mb_x * 2 + (i & 1) +
2011 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2012 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2013 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2014 draw_arrow(ptr, sx, sy, mx, my, width,
2015 height, pict->linesize[0], 100);
2017 } else if (IS_16X8(p->mb_type[mb_index])) {
2019 for (i = 0; i < 2; i++) {
2020 int sx = mb_x * 16 + 8;
2021 int sy = mb_y * 16 + 4 + 8 * i;
2022 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2023 int mx = (p->motion_val[direction][xy][0] >> shift);
2024 int my = (p->motion_val[direction][xy][1] >> shift);
2026 if (IS_INTERLACED(p->mb_type[mb_index]))
2029 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2030 height, pict->linesize[0], 100);
2032 } else if (IS_8X16(p->mb_type[mb_index])) {
2034 for (i = 0; i < 2; i++) {
2035 int sx = mb_x * 16 + 4 + 8 * i;
2036 int sy = mb_y * 16 + 8;
2037 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2038 int mx = p->motion_val[direction][xy][0] >> shift;
2039 int my = p->motion_val[direction][xy][1] >> shift;
2041 if (IS_INTERLACED(p->mb_type[mb_index]))
2044 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2045 height, pict->linesize[0], 100);
2048 int sx= mb_x * 16 + 8;
2049 int sy= mb_y * 16 + 8;
2050 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2051 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2052 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2053 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2057 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2058 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2059 0x0101010101010101ULL;
2061 for (y = 0; y < block_height; y++) {
2062 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2063 (block_height * mb_y + y) *
2064 pict->linesize[1]) = c;
2065 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2066 (block_height * mb_y + y) *
2067 pict->linesize[2]) = c;
2070 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2072 int mb_type = p->mb_type[mb_index];
2075 #define COLOR(theta, r) \
2076 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2077 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2081 if (IS_PCM(mb_type)) {
2083 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2084 IS_INTRA16x16(mb_type)) {
2086 } else if (IS_INTRA4x4(mb_type)) {
2088 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2090 } else if (IS_DIRECT(mb_type)) {
2092 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2094 } else if (IS_GMC(mb_type)) {
2096 } else if (IS_SKIP(mb_type)) {
2098 } else if (!USES_LIST(mb_type, 1)) {
2100 } else if (!USES_LIST(mb_type, 0)) {
2103 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2107 u *= 0x0101010101010101ULL;
2108 v *= 0x0101010101010101ULL;
2109 for (y = 0; y < block_height; y++) {
2110 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2111 (block_height * mb_y + y) * pict->linesize[1]) = u;
2112 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2113 (block_height * mb_y + y) * pict->linesize[2]) = v;
2117 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2118 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2119 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2120 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2121 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2123 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2124 for (y = 0; y < 16; y++)
2125 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2126 pict->linesize[0]] ^= 0x80;
2128 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2129 int dm = 1 << (mv_sample_log2 - 2);
2130 for (i = 0; i < 4; i++) {
2131 int sx = mb_x * 16 + 8 * (i & 1);
2132 int sy = mb_y * 16 + 8 * (i >> 1);
2133 int xy = (mb_x * 2 + (i & 1) +
2134 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2136 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2137 if (mv[0] != mv[dm] ||
2138 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2139 for (y = 0; y < 8; y++)
2140 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2141 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2142 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2143 pict->linesize[0]) ^= 0x8080808080808080ULL;
2147 if (IS_INTERLACED(mb_type) &&
2148 avctx->codec->id == AV_CODEC_ID_H264) {
2152 mbskip_table[mb_index] = 0;
2158 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2160 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2161 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2164 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2166 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2167 int offset = 2*s->mb_stride + 1;
2169 return AVERROR(ENOMEM);
2170 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2171 ref->size -= offset;
2172 ref->data += offset;
2173 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2176 static inline int hpel_motion_lowres(MpegEncContext *s,
2177 uint8_t *dest, uint8_t *src,
2178 int field_based, int field_select,
2179 int src_x, int src_y,
2180 int width, int height, int stride,
2181 int h_edge_pos, int v_edge_pos,
2182 int w, int h, h264_chroma_mc_func *pix_op,
2183 int motion_x, int motion_y)
2185 const int lowres = s->avctx->lowres;
2186 const int op_index = FFMIN(lowres, 2);
2187 const int s_mask = (2 << lowres) - 1;
2191 if (s->quarter_sample) {
2196 sx = motion_x & s_mask;
2197 sy = motion_y & s_mask;
2198 src_x += motion_x >> lowres + 1;
2199 src_y += motion_y >> lowres + 1;
2201 src += src_y * stride + src_x;
2203 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2204 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2205 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2206 (h + 1) << field_based, src_x,
2207 src_y << field_based,
2210 src = s->edge_emu_buffer;
2214 sx = (sx << 2) >> lowres;
2215 sy = (sy << 2) >> lowres;
2218 pix_op[op_index](dest, src, stride, h, sx, sy);
2222 /* apply one mpeg motion vector to the three components */
2223 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2230 uint8_t **ref_picture,
2231 h264_chroma_mc_func *pix_op,
2232 int motion_x, int motion_y,
2235 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2236 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2238 const int lowres = s->avctx->lowres;
2239 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
2240 const int block_s = 8>>lowres;
2241 const int s_mask = (2 << lowres) - 1;
2242 const int h_edge_pos = s->h_edge_pos >> lowres;
2243 const int v_edge_pos = s->v_edge_pos >> lowres;
2244 linesize = s->current_picture.f.linesize[0] << field_based;
2245 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2247 // FIXME obviously not perfect but qpel will not work in lowres anyway
2248 if (s->quarter_sample) {
2254 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2257 sx = motion_x & s_mask;
2258 sy = motion_y & s_mask;
2259 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2260 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2262 if (s->out_format == FMT_H263) {
2263 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2264 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2265 uvsrc_x = src_x >> 1;
2266 uvsrc_y = src_y >> 1;
2267 } else if (s->out_format == FMT_H261) {
2268 // even chroma mv's are full pel in H261
2271 uvsx = (2 * mx) & s_mask;
2272 uvsy = (2 * my) & s_mask;
2273 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2274 uvsrc_y = mb_y * block_s + (my >> lowres);
2276 if(s->chroma_y_shift){
2281 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2282 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2284 if(s->chroma_x_shift){
2288 uvsy = motion_y & s_mask;
2290 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2293 uvsx = motion_x & s_mask;
2294 uvsy = motion_y & s_mask;
2301 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2302 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2303 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2305 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2306 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2307 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2308 linesize >> field_based, 17, 17 + field_based,
2309 src_x, src_y << field_based, h_edge_pos,
2311 ptr_y = s->edge_emu_buffer;
2312 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2313 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2314 s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2316 uvsrc_x, uvsrc_y << field_based,
2317 h_edge_pos >> 1, v_edge_pos >> 1);
2318 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2320 uvsrc_x, uvsrc_y << field_based,
2321 h_edge_pos >> 1, v_edge_pos >> 1);
2323 ptr_cr = uvbuf + 16;
2327 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2329 dest_y += s->linesize;
2330 dest_cb += s->uvlinesize;
2331 dest_cr += s->uvlinesize;
2335 ptr_y += s->linesize;
2336 ptr_cb += s->uvlinesize;
2337 ptr_cr += s->uvlinesize;
2340 sx = (sx << 2) >> lowres;
2341 sy = (sy << 2) >> lowres;
2342 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2344 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2345 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2346 uvsx = (uvsx << 2) >> lowres;
2347 uvsy = (uvsy << 2) >> lowres;
2349 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2350 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2353 // FIXME h261 lowres loop filter
2356 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2357 uint8_t *dest_cb, uint8_t *dest_cr,
2358 uint8_t **ref_picture,
2359 h264_chroma_mc_func * pix_op,
2362 const int lowres = s->avctx->lowres;
2363 const int op_index = FFMIN(lowres, 2);
2364 const int block_s = 8 >> lowres;
2365 const int s_mask = (2 << lowres) - 1;
2366 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2367 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2368 int emu = 0, src_x, src_y, offset, sx, sy;
2371 if (s->quarter_sample) {
2376 /* In case of 8X8, we construct a single chroma motion vector
2377 with a special rounding */
2378 mx = ff_h263_round_chroma(mx);
2379 my = ff_h263_round_chroma(my);
2383 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2384 src_y = s->mb_y * block_s + (my >> lowres + 1);
2386 offset = src_y * s->uvlinesize + src_x;
2387 ptr = ref_picture[1] + offset;
2388 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2389 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2390 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2391 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2392 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2393 ptr = s->edge_emu_buffer;
2397 sx = (sx << 2) >> lowres;
2398 sy = (sy << 2) >> lowres;
2399 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2401 ptr = ref_picture[2] + offset;
2403 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2404 src_x, src_y, h_edge_pos, v_edge_pos);
2405 ptr = s->edge_emu_buffer;
2407 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2411 * motion compensation of a single macroblock
2413 * @param dest_y luma destination pointer
2414 * @param dest_cb chroma cb/u destination pointer
2415 * @param dest_cr chroma cr/v destination pointer
2416 * @param dir direction (0->forward, 1->backward)
2417 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2418 * @param pix_op halfpel motion compensation function (average or put normally)
2419 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2421 static inline void MPV_motion_lowres(MpegEncContext *s,
2422 uint8_t *dest_y, uint8_t *dest_cb,
2424 int dir, uint8_t **ref_picture,
2425 h264_chroma_mc_func *pix_op)
2429 const int lowres = s->avctx->lowres;
2430 const int block_s = 8 >>lowres;
2435 switch (s->mv_type) {
2437 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2439 ref_picture, pix_op,
2440 s->mv[dir][0][0], s->mv[dir][0][1],
2446 for (i = 0; i < 4; i++) {
2447 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2448 s->linesize) * block_s,
2449 ref_picture[0], 0, 0,
2450 (2 * mb_x + (i & 1)) * block_s,
2451 (2 * mb_y + (i >> 1)) * block_s,
2452 s->width, s->height, s->linesize,
2453 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2454 block_s, block_s, pix_op,
2455 s->mv[dir][i][0], s->mv[dir][i][1]);
2457 mx += s->mv[dir][i][0];
2458 my += s->mv[dir][i][1];
2461 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2462 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2466 if (s->picture_structure == PICT_FRAME) {
2468 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2469 1, 0, s->field_select[dir][0],
2470 ref_picture, pix_op,
2471 s->mv[dir][0][0], s->mv[dir][0][1],
2474 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2475 1, 1, s->field_select[dir][1],
2476 ref_picture, pix_op,
2477 s->mv[dir][1][0], s->mv[dir][1][1],
2480 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2481 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2482 ref_picture = s->current_picture_ptr->f.data;
2485 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2486 0, 0, s->field_select[dir][0],
2487 ref_picture, pix_op,
2489 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2493 for (i = 0; i < 2; i++) {
2494 uint8_t **ref2picture;
2496 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2497 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2498 ref2picture = ref_picture;
2500 ref2picture = s->current_picture_ptr->f.data;
2503 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2504 0, 0, s->field_select[dir][i],
2505 ref2picture, pix_op,
2506 s->mv[dir][i][0], s->mv[dir][i][1] +
2507 2 * block_s * i, block_s, mb_y >> 1);
2509 dest_y += 2 * block_s * s->linesize;
2510 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2511 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2515 if (s->picture_structure == PICT_FRAME) {
2516 for (i = 0; i < 2; i++) {
2518 for (j = 0; j < 2; j++) {
2519 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2521 ref_picture, pix_op,
2522 s->mv[dir][2 * i + j][0],
2523 s->mv[dir][2 * i + j][1],
2526 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2529 for (i = 0; i < 2; i++) {
2530 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2531 0, 0, s->picture_structure != i + 1,
2532 ref_picture, pix_op,
2533 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2534 2 * block_s, mb_y >> 1);
2536 // after put we make avg of the same block
2537 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2539 // opposite parity is always in the same
2540 // frame if this is second field
2541 if (!s->first_field) {
2542 ref_picture = s->current_picture_ptr->f.data;
2553 * find the lowest MB row referenced in the MVs
2555 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2557 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2558 int my, off, i, mvs;
2560 if (s->picture_structure != PICT_FRAME || s->mcsel)
2563 switch (s->mv_type) {
2577 for (i = 0; i < mvs; i++) {
2578 my = s->mv[dir][i][1]<<qpel_shift;
2579 my_max = FFMAX(my_max, my);
2580 my_min = FFMIN(my_min, my);
2583 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2585 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2587 return s->mb_height-1;
2590 /* put block[] to dest[] */
2591 static inline void put_dct(MpegEncContext *s,
2592 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2594 s->dct_unquantize_intra(s, block, i, qscale);
2595 s->dsp.idct_put (dest, line_size, block);
2598 /* add block[] to dest[] */
2599 static inline void add_dct(MpegEncContext *s,
2600 int16_t *block, int i, uint8_t *dest, int line_size)
2602 if (s->block_last_index[i] >= 0) {
2603 s->dsp.idct_add (dest, line_size, block);
2607 static inline void add_dequant_dct(MpegEncContext *s,
2608 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2610 if (s->block_last_index[i] >= 0) {
2611 s->dct_unquantize_inter(s, block, i, qscale);
2613 s->dsp.idct_add (dest, line_size, block);
2618 * Clean dc, ac, coded_block for the current non-intra MB.
2620 void ff_clean_intra_table_entries(MpegEncContext *s)
2622 int wrap = s->b8_stride;
2623 int xy = s->block_index[0];
2626 s->dc_val[0][xy + 1 ] =
2627 s->dc_val[0][xy + wrap] =
2628 s->dc_val[0][xy + 1 + wrap] = 1024;
2630 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2631 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2632 if (s->msmpeg4_version>=3) {
2633 s->coded_block[xy ] =
2634 s->coded_block[xy + 1 ] =
2635 s->coded_block[xy + wrap] =
2636 s->coded_block[xy + 1 + wrap] = 0;
2639 wrap = s->mb_stride;
2640 xy = s->mb_x + s->mb_y * wrap;
2642 s->dc_val[2][xy] = 1024;
2644 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2645 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2647 s->mbintra_table[xy]= 0;
2650 /* generic function called after a macroblock has been parsed by the
2651 decoder or after it has been encoded by the encoder.
2653 Important variables used:
2654 s->mb_intra : true if intra macroblock
2655 s->mv_dir : motion vector direction
2656 s->mv_type : motion vector type
2657 s->mv : motion vector
2658 s->interlaced_dct : true if interlaced dct used (mpeg2)
2660 static av_always_inline
2661 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2662 int lowres_flag, int is_mpeg12)
2664 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2665 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2666 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2670 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2671 /* print DCT coefficients */
2673 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2675 for(j=0; j<64; j++){
2676 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2678 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2682 s->current_picture.qscale_table[mb_xy] = s->qscale;
2684 /* update DC predictors for P macroblocks */
2686 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2687 if(s->mbintra_table[mb_xy])
2688 ff_clean_intra_table_entries(s);
2692 s->last_dc[2] = 128 << s->intra_dc_precision;
2695 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2696 s->mbintra_table[mb_xy]=1;
2698 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2699 uint8_t *dest_y, *dest_cb, *dest_cr;
2700 int dct_linesize, dct_offset;
2701 op_pixels_func (*op_pix)[4];
2702 qpel_mc_func (*op_qpix)[16];
2703 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2704 const int uvlinesize = s->current_picture.f.linesize[1];
2705 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2706 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2708 /* avoid copy if macroblock skipped in last frame too */
2709 /* skip only during decoding as we might trash the buffers during encoding a bit */
2711 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2713 if (s->mb_skipped) {
2715 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2717 } else if(!s->current_picture.reference) {
2720 *mbskip_ptr = 0; /* not skipped */
2724 dct_linesize = linesize << s->interlaced_dct;
2725 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2729 dest_cb= s->dest[1];
2730 dest_cr= s->dest[2];
2732 dest_y = s->b_scratchpad;
2733 dest_cb= s->b_scratchpad+16*linesize;
2734 dest_cr= s->b_scratchpad+32*linesize;
2738 /* motion handling */
2739 /* decoding or more than one mb_type (MC was already done otherwise) */
2742 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2743 if (s->mv_dir & MV_DIR_FORWARD) {
2744 ff_thread_await_progress(&s->last_picture_ptr->tf,
2745 ff_MPV_lowest_referenced_row(s, 0),
2748 if (s->mv_dir & MV_DIR_BACKWARD) {
2749 ff_thread_await_progress(&s->next_picture_ptr->tf,
2750 ff_MPV_lowest_referenced_row(s, 1),
2756 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2758 if (s->mv_dir & MV_DIR_FORWARD) {
2759 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2760 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2762 if (s->mv_dir & MV_DIR_BACKWARD) {
2763 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2766 op_qpix= s->me.qpel_put;
2767 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2768 op_pix = s->hdsp.put_pixels_tab;
2770 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2772 if (s->mv_dir & MV_DIR_FORWARD) {
2773 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2774 op_pix = s->hdsp.avg_pixels_tab;
2775 op_qpix= s->me.qpel_avg;
2777 if (s->mv_dir & MV_DIR_BACKWARD) {
2778 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2783 /* skip dequant / idct if we are really late ;) */
2784 if(s->avctx->skip_idct){
2785 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2786 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2787 || s->avctx->skip_idct >= AVDISCARD_ALL)
2791 /* add dct residue */
2792 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2793 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2794 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2795 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2796 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2797 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2799 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2800 if (s->chroma_y_shift){
2801 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2802 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2806 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2807 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2808 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2809 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2812 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2813 add_dct(s, block[0], 0, dest_y , dct_linesize);
2814 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2815 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2816 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2818 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2819 if(s->chroma_y_shift){//Chroma420
2820 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2821 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2824 dct_linesize = uvlinesize << s->interlaced_dct;
2825 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2827 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2828 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2829 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2830 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2831 if(!s->chroma_x_shift){//Chroma444
2832 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2833 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2834 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2835 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2840 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2841 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2844 /* dct only in intra block */
2845 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2846 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2847 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2848 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2849 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2851 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2852 if(s->chroma_y_shift){
2853 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2854 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2858 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2859 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2860 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2861 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2865 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2866 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2867 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2868 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2870 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2871 if(s->chroma_y_shift){
2872 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2873 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2876 dct_linesize = uvlinesize << s->interlaced_dct;
2877 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2879 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2880 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2881 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2882 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2883 if(!s->chroma_x_shift){//Chroma444
2884 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2885 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2886 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2887 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2895 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2896 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2897 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2902 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2904 if(s->out_format == FMT_MPEG1) {
2905 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2906 else MPV_decode_mb_internal(s, block, 0, 1);
2909 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2910 else MPV_decode_mb_internal(s, block, 0, 0);
2914 * @param h is the normal height, this will be reduced automatically if needed for the last row
2916 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2917 Picture *last, int y, int h, int picture_structure,
2918 int first_field, int draw_edges, int low_delay,
2919 int v_edge_pos, int h_edge_pos)
2921 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2922 int hshift = desc->log2_chroma_w;
2923 int vshift = desc->log2_chroma_h;
2924 const int field_pic = picture_structure != PICT_FRAME;
2930 if (!avctx->hwaccel &&
2931 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2934 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2935 int *linesize = cur->f.linesize;
2936 int sides = 0, edge_h;
2937 if (y==0) sides |= EDGE_TOP;
2938 if (y + h >= v_edge_pos)
2939 sides |= EDGE_BOTTOM;
2941 edge_h= FFMIN(h, v_edge_pos - y);
2943 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2944 linesize[0], h_edge_pos, edge_h,
2945 EDGE_WIDTH, EDGE_WIDTH, sides);
2946 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2947 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2948 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2949 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2950 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2951 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2954 h = FFMIN(h, avctx->height - y);
2956 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2958 if (avctx->draw_horiz_band) {
2960 int offset[AV_NUM_DATA_POINTERS];
2963 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2964 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2971 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2972 picture_structure == PICT_FRAME &&
2973 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2974 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2977 offset[0]= y * src->linesize[0];
2979 offset[2]= (y >> vshift) * src->linesize[1];
2980 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2986 avctx->draw_horiz_band(avctx, src, offset,
2987 y, picture_structure, h);
2991 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2993 int draw_edges = s->unrestricted_mv && !s->intra_only;
2994 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2995 &s->last_picture, y, h, s->picture_structure,
2996 s->first_field, draw_edges, s->low_delay,
2997 s->v_edge_pos, s->h_edge_pos);
3000 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3001 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3002 const int uvlinesize = s->current_picture.f.linesize[1];
3003 const int mb_size= 4 - s->avctx->lowres;
3005 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3006 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3007 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3008 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3009 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3010 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3011 //block_index is not used by mpeg2, so it is not affected by chroma_format
3013 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3014 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3015 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3017 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3019 if(s->picture_structure==PICT_FRAME){
3020 s->dest[0] += s->mb_y * linesize << mb_size;
3021 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3022 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3024 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3025 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3026 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3027 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3033 * Permute an 8x8 block.
3034 * @param block the block which will be permuted according to the given permutation vector
3035 * @param permutation the permutation vector
3036 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3037 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3038 * (inverse) permutated to scantable order!
3040 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3046 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3048 for(i=0; i<=last; i++){
3049 const int j= scantable[i];
3054 for(i=0; i<=last; i++){
3055 const int j= scantable[i];
3056 const int perm_j= permutation[j];
3057 block[perm_j]= temp[j];
3061 void ff_mpeg_flush(AVCodecContext *avctx){
3063 MpegEncContext *s = avctx->priv_data;
3065 if(s==NULL || s->picture==NULL)
3068 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3069 ff_mpeg_unref_picture(s, &s->picture[i]);
3070 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3072 ff_mpeg_unref_picture(s, &s->current_picture);
3073 ff_mpeg_unref_picture(s, &s->last_picture);
3074 ff_mpeg_unref_picture(s, &s->next_picture);
3076 s->mb_x= s->mb_y= 0;
3079 s->parse_context.state= -1;
3080 s->parse_context.frame_start_found= 0;
3081 s->parse_context.overread= 0;
3082 s->parse_context.overread_index= 0;
3083 s->parse_context.index= 0;
3084 s->parse_context.last_index= 0;
3085 s->bitstream_buffer_size=0;
3089 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3090 int16_t *block, int n, int qscale)
3092 int i, level, nCoeffs;
3093 const uint16_t *quant_matrix;
3095 nCoeffs= s->block_last_index[n];
3097 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3098 /* XXX: only mpeg1 */
3099 quant_matrix = s->intra_matrix;
3100 for(i=1;i<=nCoeffs;i++) {
3101 int j= s->intra_scantable.permutated[i];
3106 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3107 level = (level - 1) | 1;
3110 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3111 level = (level - 1) | 1;
3118 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3119 int16_t *block, int n, int qscale)
3121 int i, level, nCoeffs;
3122 const uint16_t *quant_matrix;
3124 nCoeffs= s->block_last_index[n];
3126 quant_matrix = s->inter_matrix;
3127 for(i=0; i<=nCoeffs; i++) {
3128 int j= s->intra_scantable.permutated[i];
3133 level = (((level << 1) + 1) * qscale *
3134 ((int) (quant_matrix[j]))) >> 4;
3135 level = (level - 1) | 1;
3138 level = (((level << 1) + 1) * qscale *
3139 ((int) (quant_matrix[j]))) >> 4;
3140 level = (level - 1) | 1;
3147 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3148 int16_t *block, int n, int qscale)
3150 int i, level, nCoeffs;
3151 const uint16_t *quant_matrix;
3153 if(s->alternate_scan) nCoeffs= 63;
3154 else nCoeffs= s->block_last_index[n];
3156 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3157 quant_matrix = s->intra_matrix;
3158 for(i=1;i<=nCoeffs;i++) {
3159 int j= s->intra_scantable.permutated[i];
3164 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3167 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3174 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3175 int16_t *block, int n, int qscale)
3177 int i, level, nCoeffs;
3178 const uint16_t *quant_matrix;
3181 if(s->alternate_scan) nCoeffs= 63;
3182 else nCoeffs= s->block_last_index[n];
3184 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3186 quant_matrix = s->intra_matrix;
3187 for(i=1;i<=nCoeffs;i++) {
3188 int j= s->intra_scantable.permutated[i];
3193 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3196 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3205 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3206 int16_t *block, int n, int qscale)
3208 int i, level, nCoeffs;
3209 const uint16_t *quant_matrix;
3212 if(s->alternate_scan) nCoeffs= 63;
3213 else nCoeffs= s->block_last_index[n];
3215 quant_matrix = s->inter_matrix;
3216 for(i=0; i<=nCoeffs; i++) {
3217 int j= s->intra_scantable.permutated[i];
3222 level = (((level << 1) + 1) * qscale *
3223 ((int) (quant_matrix[j]))) >> 4;
3226 level = (((level << 1) + 1) * qscale *
3227 ((int) (quant_matrix[j]))) >> 4;
3236 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3237 int16_t *block, int n, int qscale)
3239 int i, level, qmul, qadd;
3242 av_assert2(s->block_last_index[n]>=0);
3247 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3248 qadd = (qscale - 1) | 1;
3255 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3257 for(i=1; i<=nCoeffs; i++) {
3261 level = level * qmul - qadd;
3263 level = level * qmul + qadd;
3270 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3271 int16_t *block, int n, int qscale)
3273 int i, level, qmul, qadd;
3276 av_assert2(s->block_last_index[n]>=0);
3278 qadd = (qscale - 1) | 1;
3281 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3283 for(i=0; i<=nCoeffs; i++) {
3287 level = level * qmul - qadd;
3289 level = level * qmul + qadd;
3297 * set qscale and update qscale dependent variables.
3299 void ff_set_qscale(MpegEncContext * s, int qscale)
3303 else if (qscale > 31)
3307 s->chroma_qscale= s->chroma_qscale_table[qscale];
3309 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3310 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3313 void ff_MPV_report_decode_progress(MpegEncContext *s)
3315 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3316 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3319 #if CONFIG_ERROR_RESILIENCE
3320 void ff_mpeg_er_frame_start(MpegEncContext *s)
3322 ERContext *er = &s->er;
3324 er->cur_pic = s->current_picture_ptr;
3325 er->last_pic = s->last_picture_ptr;
3326 er->next_pic = s->next_picture_ptr;
3328 er->pp_time = s->pp_time;
3329 er->pb_time = s->pb_time;
3330 er->quarter_sample = s->quarter_sample;
3331 er->partitioned_frame = s->partitioned_frame;
3333 ff_er_frame_start(er);
3335 #endif /* CONFIG_ERROR_RESILIENCE */