2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
35 #include "h264chroma.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
130 MpegEncContext *s = opaque;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
138 memcpy(s->mv, mv, sizeof(*mv));
140 ff_init_block_index(s);
141 ff_update_block_index(s);
143 s->dsp.clear_blocks(s->block[0]);
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
150 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
151 ff_MPV_decode_mb(s, s->block);
154 /* init common dct for both encoder and decoder */
155 av_cold int ff_dct_common_init(MpegEncContext *s)
157 ff_dsputil_init(&s->dsp, s->avctx);
158 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
159 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
160 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
162 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
163 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
164 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
165 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
166 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
167 if (s->flags & CODEC_FLAG_BITEXACT)
168 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
169 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
172 ff_MPV_common_init_x86(s);
174 ff_MPV_common_init_axp(s);
176 ff_MPV_common_init_arm(s);
178 ff_MPV_common_init_bfin(s);
180 ff_MPV_common_init_ppc(s);
183 /* load & permutate scantables
184 * note: only wmv uses different ones
186 if (s->alternate_scan) {
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
201 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
203 // edge emu needs blocksize + filter length - 1
204 // (= 17x17 for halfpel / 21x21 for h264)
205 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
206 // at uvlinesize. It supports only YUV420 so 24x24 is enough
207 // linesize * interlaced * MBsize
208 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
211 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
213 s->me.temp = s->me.scratchpad;
214 s->rd_scratchpad = s->me.scratchpad;
215 s->b_scratchpad = s->me.scratchpad;
216 s->obmc_scratchpad = s->me.scratchpad + 16;
220 av_freep(&s->edge_emu_buffer);
221 return AVERROR(ENOMEM);
225 * Allocate a frame buffer
227 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
232 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
233 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
234 s->codec_id != AV_CODEC_ID_MSS2)
235 r = ff_thread_get_buffer(s->avctx, &pic->tf,
236 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
238 pic->f.width = s->avctx->width;
239 pic->f.height = s->avctx->height;
240 pic->f.format = s->avctx->pix_fmt;
241 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
244 if (r < 0 || !pic->f.data[0]) {
245 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
250 if (s->avctx->hwaccel) {
251 assert(!pic->hwaccel_picture_private);
252 if (s->avctx->hwaccel->priv_data_size) {
253 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
254 if (!pic->hwaccel_priv_buf) {
255 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
258 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
262 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
263 s->uvlinesize != pic->f.linesize[1])) {
264 av_log(s->avctx, AV_LOG_ERROR,
265 "get_buffer() failed (stride changed)\n");
266 ff_mpeg_unref_picture(s, pic);
270 if (pic->f.linesize[1] != pic->f.linesize[2]) {
271 av_log(s->avctx, AV_LOG_ERROR,
272 "get_buffer() failed (uv stride mismatch)\n");
273 ff_mpeg_unref_picture(s, pic);
277 if (!s->edge_emu_buffer &&
278 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
279 av_log(s->avctx, AV_LOG_ERROR,
280 "get_buffer() failed to allocate context scratch buffers.\n");
281 ff_mpeg_unref_picture(s, pic);
288 static void free_picture_tables(Picture *pic)
292 pic->alloc_mb_width =
293 pic->alloc_mb_height = 0;
295 av_buffer_unref(&pic->mb_var_buf);
296 av_buffer_unref(&pic->mc_mb_var_buf);
297 av_buffer_unref(&pic->mb_mean_buf);
298 av_buffer_unref(&pic->mbskip_table_buf);
299 av_buffer_unref(&pic->qscale_table_buf);
300 av_buffer_unref(&pic->mb_type_buf);
302 for (i = 0; i < 2; i++) {
303 av_buffer_unref(&pic->motion_val_buf[i]);
304 av_buffer_unref(&pic->ref_index_buf[i]);
308 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
310 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
311 const int mb_array_size = s->mb_stride * s->mb_height;
312 const int b8_array_size = s->b8_stride * s->mb_height * 2;
316 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
317 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
318 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
320 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
321 return AVERROR(ENOMEM);
324 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
326 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
327 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
328 return AVERROR(ENOMEM);
331 if (s->out_format == FMT_H263 || s->encoding ||
332 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
333 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
334 int ref_index_size = 4 * mb_array_size;
336 for (i = 0; mv_size && i < 2; i++) {
337 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
338 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
339 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
340 return AVERROR(ENOMEM);
344 pic->alloc_mb_width = s->mb_width;
345 pic->alloc_mb_height = s->mb_height;
350 static int make_tables_writable(Picture *pic)
353 #define MAKE_WRITABLE(table) \
356 (ret = av_buffer_make_writable(&pic->table)) < 0)\
360 MAKE_WRITABLE(mb_var_buf);
361 MAKE_WRITABLE(mc_mb_var_buf);
362 MAKE_WRITABLE(mb_mean_buf);
363 MAKE_WRITABLE(mbskip_table_buf);
364 MAKE_WRITABLE(qscale_table_buf);
365 MAKE_WRITABLE(mb_type_buf);
367 for (i = 0; i < 2; i++) {
368 MAKE_WRITABLE(motion_val_buf[i]);
369 MAKE_WRITABLE(ref_index_buf[i]);
376 * Allocate a Picture.
377 * The pixels are allocated/set by calling get_buffer() if shared = 0
379 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
383 if (pic->qscale_table_buf)
384 if ( pic->alloc_mb_width != s->mb_width
385 || pic->alloc_mb_height != s->mb_height)
386 free_picture_tables(pic);
389 av_assert0(pic->f.data[0]);
392 av_assert0(!pic->f.data[0]);
394 if (alloc_frame_buffer(s, pic) < 0)
397 s->linesize = pic->f.linesize[0];
398 s->uvlinesize = pic->f.linesize[1];
401 if (!pic->qscale_table_buf)
402 ret = alloc_picture_tables(s, pic);
404 ret = make_tables_writable(pic);
409 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
410 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
411 pic->mb_mean = pic->mb_mean_buf->data;
414 pic->mbskip_table = pic->mbskip_table_buf->data;
415 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
416 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
418 if (pic->motion_val_buf[0]) {
419 for (i = 0; i < 2; i++) {
420 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
421 pic->ref_index[i] = pic->ref_index_buf[i]->data;
427 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
428 ff_mpeg_unref_picture(s, pic);
429 free_picture_tables(pic);
430 return AVERROR(ENOMEM);
434 * Deallocate a picture.
436 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
438 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
441 /* WM Image / Screen codecs allocate internal buffers with different
442 * dimensions / colorspaces; ignore user-defined callbacks for these. */
443 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
444 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
445 s->codec_id != AV_CODEC_ID_MSS2)
446 ff_thread_release_buffer(s->avctx, &pic->tf);
448 av_frame_unref(&pic->f);
450 av_buffer_unref(&pic->hwaccel_priv_buf);
452 if (pic->needs_realloc)
453 free_picture_tables(pic);
455 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
458 static int update_picture_tables(Picture *dst, Picture *src)
462 #define UPDATE_TABLE(table)\
465 (!dst->table || dst->table->buffer != src->table->buffer)) {\
466 av_buffer_unref(&dst->table);\
467 dst->table = av_buffer_ref(src->table);\
469 free_picture_tables(dst);\
470 return AVERROR(ENOMEM);\
475 UPDATE_TABLE(mb_var_buf);
476 UPDATE_TABLE(mc_mb_var_buf);
477 UPDATE_TABLE(mb_mean_buf);
478 UPDATE_TABLE(mbskip_table_buf);
479 UPDATE_TABLE(qscale_table_buf);
480 UPDATE_TABLE(mb_type_buf);
481 for (i = 0; i < 2; i++) {
482 UPDATE_TABLE(motion_val_buf[i]);
483 UPDATE_TABLE(ref_index_buf[i]);
486 dst->mb_var = src->mb_var;
487 dst->mc_mb_var = src->mc_mb_var;
488 dst->mb_mean = src->mb_mean;
489 dst->mbskip_table = src->mbskip_table;
490 dst->qscale_table = src->qscale_table;
491 dst->mb_type = src->mb_type;
492 for (i = 0; i < 2; i++) {
493 dst->motion_val[i] = src->motion_val[i];
494 dst->ref_index[i] = src->ref_index[i];
497 dst->alloc_mb_width = src->alloc_mb_width;
498 dst->alloc_mb_height = src->alloc_mb_height;
503 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
507 av_assert0(!dst->f.buf[0]);
508 av_assert0(src->f.buf[0]);
512 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
516 ret = update_picture_tables(dst, src);
520 if (src->hwaccel_picture_private) {
521 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
522 if (!dst->hwaccel_priv_buf)
524 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
527 dst->field_picture = src->field_picture;
528 dst->mb_var_sum = src->mb_var_sum;
529 dst->mc_mb_var_sum = src->mc_mb_var_sum;
530 dst->b_frame_score = src->b_frame_score;
531 dst->needs_realloc = src->needs_realloc;
532 dst->reference = src->reference;
533 dst->shared = src->shared;
537 ff_mpeg_unref_picture(s, dst);
541 static int init_duplicate_context(MpegEncContext *s)
543 int y_size = s->b8_stride * (2 * s->mb_height + 1);
544 int c_size = s->mb_stride * (s->mb_height + 1);
545 int yc_size = y_size + 2 * c_size;
553 s->obmc_scratchpad = NULL;
556 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
557 ME_MAP_SIZE * sizeof(uint32_t), fail)
558 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
559 ME_MAP_SIZE * sizeof(uint32_t), fail)
560 if (s->avctx->noise_reduction) {
561 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
562 2 * 64 * sizeof(int), fail)
565 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
566 s->block = s->blocks[0];
568 for (i = 0; i < 12; i++) {
569 s->pblocks[i] = &s->block[i];
572 if (s->out_format == FMT_H263) {
574 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
575 yc_size * sizeof(int16_t) * 16, fail);
576 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
577 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
578 s->ac_val[2] = s->ac_val[1] + c_size;
583 return -1; // free() through ff_MPV_common_end()
586 static void free_duplicate_context(MpegEncContext *s)
591 av_freep(&s->edge_emu_buffer);
592 av_freep(&s->me.scratchpad);
596 s->obmc_scratchpad = NULL;
598 av_freep(&s->dct_error_sum);
599 av_freep(&s->me.map);
600 av_freep(&s->me.score_map);
601 av_freep(&s->blocks);
602 av_freep(&s->ac_val_base);
606 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
608 #define COPY(a) bak->a = src->a
609 COPY(edge_emu_buffer);
614 COPY(obmc_scratchpad);
621 COPY(me.map_generation);
633 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
637 // FIXME copy only needed parts
639 backup_duplicate_context(&bak, dst);
640 memcpy(dst, src, sizeof(MpegEncContext));
641 backup_duplicate_context(dst, &bak);
642 for (i = 0; i < 12; i++) {
643 dst->pblocks[i] = &dst->block[i];
645 if (!dst->edge_emu_buffer &&
646 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
647 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
648 "scratch buffers.\n");
651 // STOP_TIMER("update_duplicate_context")
652 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
656 int ff_mpeg_update_thread_context(AVCodecContext *dst,
657 const AVCodecContext *src)
660 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
667 // FIXME can parameters change on I-frames?
668 // in that case dst may need a reinit
669 if (!s->context_initialized) {
670 memcpy(s, s1, sizeof(MpegEncContext));
673 s->bitstream_buffer = NULL;
674 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
676 if (s1->context_initialized){
677 // s->picture_range_start += MAX_PICTURE_COUNT;
678 // s->picture_range_end += MAX_PICTURE_COUNT;
679 if((ret = ff_MPV_common_init(s)) < 0){
680 memset(s, 0, sizeof(MpegEncContext));
687 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
688 s->context_reinit = 0;
689 s->height = s1->height;
690 s->width = s1->width;
691 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
695 s->avctx->coded_height = s1->avctx->coded_height;
696 s->avctx->coded_width = s1->avctx->coded_width;
697 s->avctx->width = s1->avctx->width;
698 s->avctx->height = s1->avctx->height;
700 s->coded_picture_number = s1->coded_picture_number;
701 s->picture_number = s1->picture_number;
702 s->input_picture_number = s1->input_picture_number;
704 av_assert0(!s->picture || s->picture != s1->picture);
706 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
707 ff_mpeg_unref_picture(s, &s->picture[i]);
708 if (s1->picture[i].f.data[0] &&
709 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
713 #define UPDATE_PICTURE(pic)\
715 ff_mpeg_unref_picture(s, &s->pic);\
716 if (s1->pic.f.data[0])\
717 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
719 ret = update_picture_tables(&s->pic, &s1->pic);\
724 UPDATE_PICTURE(current_picture);
725 UPDATE_PICTURE(last_picture);
726 UPDATE_PICTURE(next_picture);
728 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
729 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
730 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
732 // Error/bug resilience
733 s->next_p_frame_damaged = s1->next_p_frame_damaged;
734 s->workaround_bugs = s1->workaround_bugs;
735 s->padding_bug_score = s1->padding_bug_score;
738 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
739 (char *) &s1->shape - (char *) &s1->time_increment_bits);
742 s->max_b_frames = s1->max_b_frames;
743 s->low_delay = s1->low_delay;
744 s->droppable = s1->droppable;
746 // DivX handling (doesn't work)
747 s->divx_packed = s1->divx_packed;
749 if (s1->bitstream_buffer) {
750 if (s1->bitstream_buffer_size +
751 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
752 av_fast_malloc(&s->bitstream_buffer,
753 &s->allocated_bitstream_buffer_size,
754 s1->allocated_bitstream_buffer_size);
755 s->bitstream_buffer_size = s1->bitstream_buffer_size;
756 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
757 s1->bitstream_buffer_size);
758 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
759 FF_INPUT_BUFFER_PADDING_SIZE);
762 // linesize dependend scratch buffer allocation
763 if (!s->edge_emu_buffer)
765 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
766 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
767 "scratch buffers.\n");
768 return AVERROR(ENOMEM);
771 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
772 "be allocated due to unknown size.\n");
775 // MPEG2/interlacing info
776 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
777 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
779 if (!s1->first_field) {
780 s->last_pict_type = s1->pict_type;
781 if (s1->current_picture_ptr)
782 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
784 if (s1->pict_type != AV_PICTURE_TYPE_B) {
785 s->last_non_b_pict_type = s1->pict_type;
793 * Set the given MpegEncContext to common defaults
794 * (same for encoding and decoding).
795 * The changed fields will not depend upon the
796 * prior state of the MpegEncContext.
798 void ff_MPV_common_defaults(MpegEncContext *s)
800 s->y_dc_scale_table =
801 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
802 s->chroma_qscale_table = ff_default_chroma_qscale_table;
803 s->progressive_frame = 1;
804 s->progressive_sequence = 1;
805 s->picture_structure = PICT_FRAME;
807 s->coded_picture_number = 0;
808 s->picture_number = 0;
809 s->input_picture_number = 0;
811 s->picture_in_gop_number = 0;
816 s->slice_context_count = 1;
820 * Set the given MpegEncContext to defaults for decoding.
821 * the changed fields will not depend upon
822 * the prior state of the MpegEncContext.
824 void ff_MPV_decode_defaults(MpegEncContext *s)
826 ff_MPV_common_defaults(s);
829 static int init_er(MpegEncContext *s)
831 ERContext *er = &s->er;
832 int mb_array_size = s->mb_height * s->mb_stride;
835 er->avctx = s->avctx;
838 er->mb_index2xy = s->mb_index2xy;
839 er->mb_num = s->mb_num;
840 er->mb_width = s->mb_width;
841 er->mb_height = s->mb_height;
842 er->mb_stride = s->mb_stride;
843 er->b8_stride = s->b8_stride;
845 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
846 er->error_status_table = av_mallocz(mb_array_size);
847 if (!er->er_temp_buffer || !er->error_status_table)
850 er->mbskip_table = s->mbskip_table;
851 er->mbintra_table = s->mbintra_table;
853 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
854 er->dc_val[i] = s->dc_val[i];
856 er->decode_mb = mpeg_er_decode_mb;
861 av_freep(&er->er_temp_buffer);
862 av_freep(&er->error_status_table);
863 return AVERROR(ENOMEM);
867 * Initialize and allocates MpegEncContext fields dependent on the resolution.
869 static int init_context_frame(MpegEncContext *s)
871 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
873 s->mb_width = (s->width + 15) / 16;
874 s->mb_stride = s->mb_width + 1;
875 s->b8_stride = s->mb_width * 2 + 1;
876 s->b4_stride = s->mb_width * 4 + 1;
877 mb_array_size = s->mb_height * s->mb_stride;
878 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
880 /* set default edge pos, will be overriden
881 * in decode_header if needed */
882 s->h_edge_pos = s->mb_width * 16;
883 s->v_edge_pos = s->mb_height * 16;
885 s->mb_num = s->mb_width * s->mb_height;
890 s->block_wrap[3] = s->b8_stride;
892 s->block_wrap[5] = s->mb_stride;
894 y_size = s->b8_stride * (2 * s->mb_height + 1);
895 c_size = s->mb_stride * (s->mb_height + 1);
896 yc_size = y_size + 2 * c_size;
898 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
899 for (y = 0; y < s->mb_height; y++)
900 for (x = 0; x < s->mb_width; x++)
901 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
903 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
906 /* Allocate MV tables */
907 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
911 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
912 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
913 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
914 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
915 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
916 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
917 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
918 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
920 /* Allocate MB type table */
921 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
923 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
925 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
926 mb_array_size * sizeof(float), fail);
927 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
928 mb_array_size * sizeof(float), fail);
932 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
933 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
934 /* interlaced direct mode decoding tables */
935 for (i = 0; i < 2; i++) {
937 for (j = 0; j < 2; j++) {
938 for (k = 0; k < 2; k++) {
939 FF_ALLOCZ_OR_GOTO(s->avctx,
940 s->b_field_mv_table_base[i][j][k],
941 mv_table_size * 2 * sizeof(int16_t),
943 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
946 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
948 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
950 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
953 if (s->out_format == FMT_H263) {
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
956 s->coded_block = s->coded_block_base + s->b8_stride + 1;
958 /* cbp, ac_pred, pred_dir */
959 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
960 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
963 if (s->h263_pred || s->h263_plus || !s->encoding) {
965 // MN: we need these for error resilience of intra-frames
966 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
967 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
968 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
969 s->dc_val[2] = s->dc_val[1] + c_size;
970 for (i = 0; i < yc_size; i++)
971 s->dc_val_base[i] = 1024;
974 /* which mb is a intra block */
975 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
976 memset(s->mbintra_table, 1, mb_array_size);
978 /* init macroblock skip table */
979 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
980 // Note the + 1 is for a quicker mpeg4 slice_end detection
984 return AVERROR(ENOMEM);
988 * init common structure for both encoder and decoder.
989 * this assumes that some variables like width/height are already set
991 av_cold int ff_MPV_common_init(MpegEncContext *s)
994 int nb_slices = (HAVE_THREADS &&
995 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
996 s->avctx->thread_count : 1;
998 if (s->encoding && s->avctx->slices)
999 nb_slices = s->avctx->slices;
1001 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1002 s->mb_height = (s->height + 31) / 32 * 2;
1004 s->mb_height = (s->height + 15) / 16;
1006 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1007 av_log(s->avctx, AV_LOG_ERROR,
1008 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1012 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1015 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1017 max_slices = MAX_THREADS;
1018 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1019 " reducing to %d\n", nb_slices, max_slices);
1020 nb_slices = max_slices;
1023 if ((s->width || s->height) &&
1024 av_image_check_size(s->width, s->height, 0, s->avctx))
1027 ff_dct_common_init(s);
1029 s->flags = s->avctx->flags;
1030 s->flags2 = s->avctx->flags2;
1032 /* set chroma shifts */
1033 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
1035 /* convert fourcc to upper case */
1036 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1037 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1039 s->avctx->coded_frame = &s->current_picture.f;
1042 if (s->msmpeg4_version) {
1043 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1044 2 * 2 * (MAX_LEVEL + 1) *
1045 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1047 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1056 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1058 if (s->avctx->noise_reduction) {
1059 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1063 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1064 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1065 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1066 avcodec_get_frame_defaults(&s->picture[i].f);
1068 memset(&s->next_picture, 0, sizeof(s->next_picture));
1069 memset(&s->last_picture, 0, sizeof(s->last_picture));
1070 memset(&s->current_picture, 0, sizeof(s->current_picture));
1071 avcodec_get_frame_defaults(&s->next_picture.f);
1072 avcodec_get_frame_defaults(&s->last_picture.f);
1073 avcodec_get_frame_defaults(&s->current_picture.f);
1075 if (init_context_frame(s))
1078 s->parse_context.state = -1;
1080 s->context_initialized = 1;
1081 s->thread_context[0] = s;
1083 // if (s->width && s->height) {
1084 if (nb_slices > 1) {
1085 for (i = 1; i < nb_slices; i++) {
1086 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1087 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1090 for (i = 0; i < nb_slices; i++) {
1091 if (init_duplicate_context(s->thread_context[i]) < 0)
1093 s->thread_context[i]->start_mb_y =
1094 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1095 s->thread_context[i]->end_mb_y =
1096 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1099 if (init_duplicate_context(s) < 0)
1102 s->end_mb_y = s->mb_height;
1104 s->slice_context_count = nb_slices;
1109 ff_MPV_common_end(s);
1114 * Frees and resets MpegEncContext fields depending on the resolution.
1115 * Is used during resolution changes to avoid a full reinitialization of the
1118 static int free_context_frame(MpegEncContext *s)
1122 av_freep(&s->mb_type);
1123 av_freep(&s->p_mv_table_base);
1124 av_freep(&s->b_forw_mv_table_base);
1125 av_freep(&s->b_back_mv_table_base);
1126 av_freep(&s->b_bidir_forw_mv_table_base);
1127 av_freep(&s->b_bidir_back_mv_table_base);
1128 av_freep(&s->b_direct_mv_table_base);
1129 s->p_mv_table = NULL;
1130 s->b_forw_mv_table = NULL;
1131 s->b_back_mv_table = NULL;
1132 s->b_bidir_forw_mv_table = NULL;
1133 s->b_bidir_back_mv_table = NULL;
1134 s->b_direct_mv_table = NULL;
1135 for (i = 0; i < 2; i++) {
1136 for (j = 0; j < 2; j++) {
1137 for (k = 0; k < 2; k++) {
1138 av_freep(&s->b_field_mv_table_base[i][j][k]);
1139 s->b_field_mv_table[i][j][k] = NULL;
1141 av_freep(&s->b_field_select_table[i][j]);
1142 av_freep(&s->p_field_mv_table_base[i][j]);
1143 s->p_field_mv_table[i][j] = NULL;
1145 av_freep(&s->p_field_select_table[i]);
1148 av_freep(&s->dc_val_base);
1149 av_freep(&s->coded_block_base);
1150 av_freep(&s->mbintra_table);
1151 av_freep(&s->cbp_table);
1152 av_freep(&s->pred_dir_table);
1154 av_freep(&s->mbskip_table);
1156 av_freep(&s->er.error_status_table);
1157 av_freep(&s->er.er_temp_buffer);
1158 av_freep(&s->mb_index2xy);
1159 av_freep(&s->lambda_table);
1161 av_freep(&s->cplx_tab);
1162 av_freep(&s->bits_tab);
1164 s->linesize = s->uvlinesize = 0;
1169 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1173 if (s->slice_context_count > 1) {
1174 for (i = 0; i < s->slice_context_count; i++) {
1175 free_duplicate_context(s->thread_context[i]);
1177 for (i = 1; i < s->slice_context_count; i++) {
1178 av_freep(&s->thread_context[i]);
1181 free_duplicate_context(s);
1183 if ((err = free_context_frame(s)) < 0)
1187 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1188 s->picture[i].needs_realloc = 1;
1191 s->last_picture_ptr =
1192 s->next_picture_ptr =
1193 s->current_picture_ptr = NULL;
1196 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1197 s->mb_height = (s->height + 31) / 32 * 2;
1199 s->mb_height = (s->height + 15) / 16;
1201 if ((s->width || s->height) &&
1202 av_image_check_size(s->width, s->height, 0, s->avctx))
1203 return AVERROR_INVALIDDATA;
1205 if ((err = init_context_frame(s)))
1208 s->thread_context[0] = s;
1210 if (s->width && s->height) {
1211 int nb_slices = s->slice_context_count;
1212 if (nb_slices > 1) {
1213 for (i = 1; i < nb_slices; i++) {
1214 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1215 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1218 for (i = 0; i < nb_slices; i++) {
1219 if (init_duplicate_context(s->thread_context[i]) < 0)
1221 s->thread_context[i]->start_mb_y =
1222 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1223 s->thread_context[i]->end_mb_y =
1224 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1227 err = init_duplicate_context(s);
1231 s->end_mb_y = s->mb_height;
1233 s->slice_context_count = nb_slices;
1238 ff_MPV_common_end(s);
1242 /* init common structure for both encoder and decoder */
1243 void ff_MPV_common_end(MpegEncContext *s)
1247 if (s->slice_context_count > 1) {
1248 for (i = 0; i < s->slice_context_count; i++) {
1249 free_duplicate_context(s->thread_context[i]);
1251 for (i = 1; i < s->slice_context_count; i++) {
1252 av_freep(&s->thread_context[i]);
1254 s->slice_context_count = 1;
1255 } else free_duplicate_context(s);
1257 av_freep(&s->parse_context.buffer);
1258 s->parse_context.buffer_size = 0;
1260 av_freep(&s->bitstream_buffer);
1261 s->allocated_bitstream_buffer_size = 0;
1263 av_freep(&s->avctx->stats_out);
1264 av_freep(&s->ac_stats);
1266 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1267 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1268 s->q_chroma_intra_matrix= NULL;
1269 s->q_chroma_intra_matrix16= NULL;
1270 av_freep(&s->q_intra_matrix);
1271 av_freep(&s->q_inter_matrix);
1272 av_freep(&s->q_intra_matrix16);
1273 av_freep(&s->q_inter_matrix16);
1274 av_freep(&s->input_picture);
1275 av_freep(&s->reordered_input_picture);
1276 av_freep(&s->dct_offset);
1279 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1280 free_picture_tables(&s->picture[i]);
1281 ff_mpeg_unref_picture(s, &s->picture[i]);
1284 av_freep(&s->picture);
1285 free_picture_tables(&s->last_picture);
1286 ff_mpeg_unref_picture(s, &s->last_picture);
1287 free_picture_tables(&s->current_picture);
1288 ff_mpeg_unref_picture(s, &s->current_picture);
1289 free_picture_tables(&s->next_picture);
1290 ff_mpeg_unref_picture(s, &s->next_picture);
1291 free_picture_tables(&s->new_picture);
1292 ff_mpeg_unref_picture(s, &s->new_picture);
1294 free_context_frame(s);
1296 s->context_initialized = 0;
1297 s->last_picture_ptr =
1298 s->next_picture_ptr =
1299 s->current_picture_ptr = NULL;
1300 s->linesize = s->uvlinesize = 0;
1303 av_cold void ff_init_rl(RLTable *rl,
1304 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1306 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1307 uint8_t index_run[MAX_RUN + 1];
1308 int last, run, level, start, end, i;
1310 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1311 if (static_store && rl->max_level[0])
1314 /* compute max_level[], max_run[] and index_run[] */
1315 for (last = 0; last < 2; last++) {
1324 memset(max_level, 0, MAX_RUN + 1);
1325 memset(max_run, 0, MAX_LEVEL + 1);
1326 memset(index_run, rl->n, MAX_RUN + 1);
1327 for (i = start; i < end; i++) {
1328 run = rl->table_run[i];
1329 level = rl->table_level[i];
1330 if (index_run[run] == rl->n)
1332 if (level > max_level[run])
1333 max_level[run] = level;
1334 if (run > max_run[level])
1335 max_run[level] = run;
1338 rl->max_level[last] = static_store[last];
1340 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1341 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1343 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1345 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1346 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1348 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1350 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1351 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1355 av_cold void ff_init_vlc_rl(RLTable *rl)
1359 for (q = 0; q < 32; q++) {
1361 int qadd = (q - 1) | 1;
1367 for (i = 0; i < rl->vlc.table_size; i++) {
1368 int code = rl->vlc.table[i][0];
1369 int len = rl->vlc.table[i][1];
1372 if (len == 0) { // illegal code
1375 } else if (len < 0) { // more bits needed
1379 if (code == rl->n) { // esc
1383 run = rl->table_run[code] + 1;
1384 level = rl->table_level[code] * qmul + qadd;
1385 if (code >= rl->last) run += 192;
1388 rl->rl_vlc[q][i].len = len;
1389 rl->rl_vlc[q][i].level = level;
1390 rl->rl_vlc[q][i].run = run;
1395 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1399 /* release non reference frames */
1400 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1401 if (!s->picture[i].reference &&
1402 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1403 ff_mpeg_unref_picture(s, &s->picture[i]);
1408 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1410 if (pic == s->last_picture_ptr)
1412 if (pic->f.data[0] == NULL)
1414 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1419 static int find_unused_picture(MpegEncContext *s, int shared)
1424 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1425 if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1429 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1430 if (pic_is_unused(s, &s->picture[i]))
1435 av_log(s->avctx, AV_LOG_FATAL,
1436 "Internal error, picture buffer overflow\n");
1437 /* We could return -1, but the codec would crash trying to draw into a
1438 * non-existing frame anyway. This is safer than waiting for a random crash.
1439 * Also the return of this is never useful, an encoder must only allocate
1440 * as much as allowed in the specification. This has no relationship to how
1441 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1442 * enough for such valid streams).
1443 * Plus, a decoder has to check stream validity and remove frames if too
1444 * many reference frames are around. Waiting for "OOM" is not correct at
1445 * all. Similarly, missing reference frames have to be replaced by
1446 * interpolated/MC frames, anything else is a bug in the codec ...
1452 int ff_find_unused_picture(MpegEncContext *s, int shared)
1454 int ret = find_unused_picture(s, shared);
1456 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1457 if (s->picture[ret].needs_realloc) {
1458 s->picture[ret].needs_realloc = 0;
1459 free_picture_tables(&s->picture[ret]);
1460 ff_mpeg_unref_picture(s, &s->picture[ret]);
1461 avcodec_get_frame_defaults(&s->picture[ret].f);
1467 static void update_noise_reduction(MpegEncContext *s)
1471 for (intra = 0; intra < 2; intra++) {
1472 if (s->dct_count[intra] > (1 << 16)) {
1473 for (i = 0; i < 64; i++) {
1474 s->dct_error_sum[intra][i] >>= 1;
1476 s->dct_count[intra] >>= 1;
1479 for (i = 0; i < 64; i++) {
1480 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1481 s->dct_count[intra] +
1482 s->dct_error_sum[intra][i] / 2) /
1483 (s->dct_error_sum[intra][i] + 1);
1489 * generic function for encode/decode called after coding/decoding
1490 * the header and before a frame is coded/decoded.
1492 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1498 if (!ff_thread_can_start_frame(avctx)) {
1499 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1503 /* mark & release old frames */
1504 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1505 s->last_picture_ptr != s->next_picture_ptr &&
1506 s->last_picture_ptr->f.data[0]) {
1507 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1510 /* release forgotten pictures */
1511 /* if (mpeg124/h263) */
1513 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1514 if (&s->picture[i] != s->last_picture_ptr &&
1515 &s->picture[i] != s->next_picture_ptr &&
1516 s->picture[i].reference && !s->picture[i].needs_realloc) {
1517 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1518 av_log(avctx, AV_LOG_ERROR,
1519 "releasing zombie picture\n");
1520 ff_mpeg_unref_picture(s, &s->picture[i]);
1525 ff_mpeg_unref_picture(s, &s->current_picture);
1528 ff_release_unused_pictures(s, 1);
1530 if (s->current_picture_ptr &&
1531 s->current_picture_ptr->f.data[0] == NULL) {
1532 // we already have a unused image
1533 // (maybe it was set before reading the header)
1534 pic = s->current_picture_ptr;
1536 i = ff_find_unused_picture(s, 0);
1538 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1541 pic = &s->picture[i];
1545 if (!s->droppable) {
1546 if (s->pict_type != AV_PICTURE_TYPE_B)
1550 pic->f.coded_picture_number = s->coded_picture_number++;
1552 if (ff_alloc_picture(s, pic, 0) < 0)
1555 s->current_picture_ptr = pic;
1556 // FIXME use only the vars from current_pic
1557 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1558 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1559 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1560 if (s->picture_structure != PICT_FRAME)
1561 s->current_picture_ptr->f.top_field_first =
1562 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1564 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1565 !s->progressive_sequence;
1566 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1569 s->current_picture_ptr->f.pict_type = s->pict_type;
1570 // if (s->flags && CODEC_FLAG_QSCALE)
1571 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1572 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1574 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1575 s->current_picture_ptr)) < 0)
1578 if (s->pict_type != AV_PICTURE_TYPE_B) {
1579 s->last_picture_ptr = s->next_picture_ptr;
1581 s->next_picture_ptr = s->current_picture_ptr;
1583 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1584 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1585 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1586 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1587 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1588 s->pict_type, s->droppable);
1590 if ((s->last_picture_ptr == NULL ||
1591 s->last_picture_ptr->f.data[0] == NULL) &&
1592 (s->pict_type != AV_PICTURE_TYPE_I ||
1593 s->picture_structure != PICT_FRAME)) {
1594 int h_chroma_shift, v_chroma_shift;
1595 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1596 &h_chroma_shift, &v_chroma_shift);
1597 if (s->pict_type != AV_PICTURE_TYPE_I)
1598 av_log(avctx, AV_LOG_ERROR,
1599 "warning: first frame is no keyframe\n");
1600 else if (s->picture_structure != PICT_FRAME)
1601 av_log(avctx, AV_LOG_INFO,
1602 "allocate dummy last picture for field based first keyframe\n");
1604 /* Allocate a dummy frame */
1605 i = ff_find_unused_picture(s, 0);
1607 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1610 s->last_picture_ptr = &s->picture[i];
1611 s->last_picture_ptr->f.key_frame = 0;
1612 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1613 s->last_picture_ptr = NULL;
1617 memset(s->last_picture_ptr->f.data[0], 0x80,
1618 avctx->height * s->last_picture_ptr->f.linesize[0]);
1619 memset(s->last_picture_ptr->f.data[1], 0x80,
1620 (avctx->height >> v_chroma_shift) *
1621 s->last_picture_ptr->f.linesize[1]);
1622 memset(s->last_picture_ptr->f.data[2], 0x80,
1623 (avctx->height >> v_chroma_shift) *
1624 s->last_picture_ptr->f.linesize[2]);
1626 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1627 for(i=0; i<avctx->height; i++)
1628 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1631 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1632 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1634 if ((s->next_picture_ptr == NULL ||
1635 s->next_picture_ptr->f.data[0] == NULL) &&
1636 s->pict_type == AV_PICTURE_TYPE_B) {
1637 /* Allocate a dummy frame */
1638 i = ff_find_unused_picture(s, 0);
1640 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1643 s->next_picture_ptr = &s->picture[i];
1644 s->next_picture_ptr->f.key_frame = 0;
1645 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1646 s->next_picture_ptr = NULL;
1649 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1650 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1653 #if 0 // BUFREF-FIXME
1654 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1655 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1657 if (s->last_picture_ptr) {
1658 ff_mpeg_unref_picture(s, &s->last_picture);
1659 if (s->last_picture_ptr->f.data[0] &&
1660 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1661 s->last_picture_ptr)) < 0)
1664 if (s->next_picture_ptr) {
1665 ff_mpeg_unref_picture(s, &s->next_picture);
1666 if (s->next_picture_ptr->f.data[0] &&
1667 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1668 s->next_picture_ptr)) < 0)
1672 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1673 s->last_picture_ptr->f.data[0]));
1675 if (s->picture_structure!= PICT_FRAME) {
1677 for (i = 0; i < 4; i++) {
1678 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1679 s->current_picture.f.data[i] +=
1680 s->current_picture.f.linesize[i];
1682 s->current_picture.f.linesize[i] *= 2;
1683 s->last_picture.f.linesize[i] *= 2;
1684 s->next_picture.f.linesize[i] *= 2;
1688 s->err_recognition = avctx->err_recognition;
1690 /* set dequantizer, we can't do it during init as
1691 * it might change for mpeg4 and we can't do it in the header
1692 * decode as init is not called for mpeg4 there yet */
1693 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1694 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1695 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1696 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1697 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1698 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1700 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1701 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1704 if (s->dct_error_sum) {
1705 av_assert2(s->avctx->noise_reduction && s->encoding);
1706 update_noise_reduction(s);
1709 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1710 return ff_xvmc_field_start(s, avctx);
1715 /* generic function for encode/decode called after a
1716 * frame has been coded/decoded. */
1717 void ff_MPV_frame_end(MpegEncContext *s)
1719 /* redraw edges for the frame if decoding didn't complete */
1720 // just to make sure that all data is rendered.
1721 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1722 ff_xvmc_field_end(s);
1723 } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1724 !s->avctx->hwaccel &&
1725 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1726 s->unrestricted_mv &&
1727 s->current_picture.reference &&
1729 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1732 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1733 int hshift = desc->log2_chroma_w;
1734 int vshift = desc->log2_chroma_h;
1735 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1736 s->h_edge_pos, s->v_edge_pos,
1737 EDGE_WIDTH, EDGE_WIDTH,
1738 EDGE_TOP | EDGE_BOTTOM);
1739 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1740 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1741 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1742 EDGE_TOP | EDGE_BOTTOM);
1743 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1744 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1745 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1746 EDGE_TOP | EDGE_BOTTOM);
1751 s->last_pict_type = s->pict_type;
1752 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1753 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1754 s->last_non_b_pict_type = s->pict_type;
1757 /* copy back current_picture variables */
1758 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1759 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1760 s->picture[i] = s->current_picture;
1764 av_assert0(i < MAX_PICTURE_COUNT);
1767 // clear copies, to avoid confusion
1769 memset(&s->last_picture, 0, sizeof(Picture));
1770 memset(&s->next_picture, 0, sizeof(Picture));
1771 memset(&s->current_picture, 0, sizeof(Picture));
1773 s->avctx->coded_frame = &s->current_picture_ptr->f;
1775 if (s->current_picture.reference)
1776 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1780 * Draw a line from (ex, ey) -> (sx, sy).
1781 * @param w width of the image
1782 * @param h height of the image
1783 * @param stride stride/linesize of the image
1784 * @param color color of the arrow
1786 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1787 int w, int h, int stride, int color)
1791 sx = av_clip(sx, 0, w - 1);
1792 sy = av_clip(sy, 0, h - 1);
1793 ex = av_clip(ex, 0, w - 1);
1794 ey = av_clip(ey, 0, h - 1);
1796 buf[sy * stride + sx] += color;
1798 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1800 FFSWAP(int, sx, ex);
1801 FFSWAP(int, sy, ey);
1803 buf += sx + sy * stride;
1805 f = ((ey - sy) << 16) / ex;
1806 for (x = 0; x <= ex; x++) {
1808 fr = (x * f) & 0xFFFF;
1809 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1810 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1814 FFSWAP(int, sx, ex);
1815 FFSWAP(int, sy, ey);
1817 buf += sx + sy * stride;
1820 f = ((ex - sx) << 16) / ey;
1823 for(y= 0; y <= ey; y++){
1825 fr = (y*f) & 0xFFFF;
1826 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1827 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1833 * Draw an arrow from (ex, ey) -> (sx, sy).
1834 * @param w width of the image
1835 * @param h height of the image
1836 * @param stride stride/linesize of the image
1837 * @param color color of the arrow
1839 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1840 int ey, int w, int h, int stride, int color)
1844 sx = av_clip(sx, -100, w + 100);
1845 sy = av_clip(sy, -100, h + 100);
1846 ex = av_clip(ex, -100, w + 100);
1847 ey = av_clip(ey, -100, h + 100);
1852 if (dx * dx + dy * dy > 3 * 3) {
1855 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1857 // FIXME subpixel accuracy
1858 rx = ROUNDED_DIV(rx * 3 << 4, length);
1859 ry = ROUNDED_DIV(ry * 3 << 4, length);
1861 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1862 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1864 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1868 * Print debugging info for the given picture.
1870 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1872 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1874 if (avctx->hwaccel || !p || !p->mb_type
1875 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1879 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1882 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1883 av_get_picture_type_char(pict->pict_type));
1884 for (y = 0; y < mb_height; y++) {
1885 for (x = 0; x < mb_width; x++) {
1886 if (avctx->debug & FF_DEBUG_SKIP) {
1887 int count = mbskip_table[x + y * mb_stride];
1890 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1892 if (avctx->debug & FF_DEBUG_QP) {
1893 av_log(avctx, AV_LOG_DEBUG, "%2d",
1894 p->qscale_table[x + y * mb_stride]);
1896 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1897 int mb_type = p->mb_type[x + y * mb_stride];
1898 // Type & MV direction
1899 if (IS_PCM(mb_type))
1900 av_log(avctx, AV_LOG_DEBUG, "P");
1901 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1902 av_log(avctx, AV_LOG_DEBUG, "A");
1903 else if (IS_INTRA4x4(mb_type))
1904 av_log(avctx, AV_LOG_DEBUG, "i");
1905 else if (IS_INTRA16x16(mb_type))
1906 av_log(avctx, AV_LOG_DEBUG, "I");
1907 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1908 av_log(avctx, AV_LOG_DEBUG, "d");
1909 else if (IS_DIRECT(mb_type))
1910 av_log(avctx, AV_LOG_DEBUG, "D");
1911 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1912 av_log(avctx, AV_LOG_DEBUG, "g");
1913 else if (IS_GMC(mb_type))
1914 av_log(avctx, AV_LOG_DEBUG, "G");
1915 else if (IS_SKIP(mb_type))
1916 av_log(avctx, AV_LOG_DEBUG, "S");
1917 else if (!USES_LIST(mb_type, 1))
1918 av_log(avctx, AV_LOG_DEBUG, ">");
1919 else if (!USES_LIST(mb_type, 0))
1920 av_log(avctx, AV_LOG_DEBUG, "<");
1922 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1923 av_log(avctx, AV_LOG_DEBUG, "X");
1927 if (IS_8X8(mb_type))
1928 av_log(avctx, AV_LOG_DEBUG, "+");
1929 else if (IS_16X8(mb_type))
1930 av_log(avctx, AV_LOG_DEBUG, "-");
1931 else if (IS_8X16(mb_type))
1932 av_log(avctx, AV_LOG_DEBUG, "|");
1933 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1934 av_log(avctx, AV_LOG_DEBUG, " ");
1936 av_log(avctx, AV_LOG_DEBUG, "?");
1939 if (IS_INTERLACED(mb_type))
1940 av_log(avctx, AV_LOG_DEBUG, "=");
1942 av_log(avctx, AV_LOG_DEBUG, " ");
1945 av_log(avctx, AV_LOG_DEBUG, "\n");
1949 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1950 (avctx->debug_mv)) {
1951 const int shift = 1 + quarter_sample;
1955 int h_chroma_shift, v_chroma_shift, block_height;
1956 const int width = avctx->width;
1957 const int height = avctx->height;
1958 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1959 const int mv_stride = (mb_width << mv_sample_log2) +
1960 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1962 *low_delay = 0; // needed to see the vectors without trashing the buffers
1964 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1966 av_frame_make_writable(pict);
1968 pict->opaque = NULL;
1969 ptr = pict->data[0];
1970 block_height = 16 >> v_chroma_shift;
1972 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1974 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1975 const int mb_index = mb_x + mb_y * mb_stride;
1976 if ((avctx->debug_mv) && p->motion_val[0]) {
1978 for (type = 0; type < 3; type++) {
1982 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1983 (pict->pict_type!= AV_PICTURE_TYPE_P))
1988 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1989 (pict->pict_type!= AV_PICTURE_TYPE_B))
1994 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1995 (pict->pict_type!= AV_PICTURE_TYPE_B))
2000 if (!USES_LIST(p->mb_type[mb_index], direction))
2003 if (IS_8X8(p->mb_type[mb_index])) {
2005 for (i = 0; i < 4; i++) {
2006 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2007 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2008 int xy = (mb_x * 2 + (i & 1) +
2009 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2010 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2011 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2012 draw_arrow(ptr, sx, sy, mx, my, width,
2013 height, pict->linesize[0], 100);
2015 } else if (IS_16X8(p->mb_type[mb_index])) {
2017 for (i = 0; i < 2; i++) {
2018 int sx = mb_x * 16 + 8;
2019 int sy = mb_y * 16 + 4 + 8 * i;
2020 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2021 int mx = (p->motion_val[direction][xy][0] >> shift);
2022 int my = (p->motion_val[direction][xy][1] >> shift);
2024 if (IS_INTERLACED(p->mb_type[mb_index]))
2027 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2028 height, pict->linesize[0], 100);
2030 } else if (IS_8X16(p->mb_type[mb_index])) {
2032 for (i = 0; i < 2; i++) {
2033 int sx = mb_x * 16 + 4 + 8 * i;
2034 int sy = mb_y * 16 + 8;
2035 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2036 int mx = p->motion_val[direction][xy][0] >> shift;
2037 int my = p->motion_val[direction][xy][1] >> shift;
2039 if (IS_INTERLACED(p->mb_type[mb_index]))
2042 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2043 height, pict->linesize[0], 100);
2046 int sx= mb_x * 16 + 8;
2047 int sy= mb_y * 16 + 8;
2048 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2049 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2050 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2051 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2055 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2056 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2057 0x0101010101010101ULL;
2059 for (y = 0; y < block_height; y++) {
2060 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2061 (block_height * mb_y + y) *
2062 pict->linesize[1]) = c;
2063 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2064 (block_height * mb_y + y) *
2065 pict->linesize[2]) = c;
2068 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2070 int mb_type = p->mb_type[mb_index];
2073 #define COLOR(theta, r) \
2074 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2075 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2079 if (IS_PCM(mb_type)) {
2081 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2082 IS_INTRA16x16(mb_type)) {
2084 } else if (IS_INTRA4x4(mb_type)) {
2086 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2088 } else if (IS_DIRECT(mb_type)) {
2090 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2092 } else if (IS_GMC(mb_type)) {
2094 } else if (IS_SKIP(mb_type)) {
2096 } else if (!USES_LIST(mb_type, 1)) {
2098 } else if (!USES_LIST(mb_type, 0)) {
2101 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2105 u *= 0x0101010101010101ULL;
2106 v *= 0x0101010101010101ULL;
2107 for (y = 0; y < block_height; y++) {
2108 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2109 (block_height * mb_y + y) * pict->linesize[1]) = u;
2110 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2111 (block_height * mb_y + y) * pict->linesize[2]) = v;
2115 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2116 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2117 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2118 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2119 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2121 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2122 for (y = 0; y < 16; y++)
2123 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2124 pict->linesize[0]] ^= 0x80;
2126 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2127 int dm = 1 << (mv_sample_log2 - 2);
2128 for (i = 0; i < 4; i++) {
2129 int sx = mb_x * 16 + 8 * (i & 1);
2130 int sy = mb_y * 16 + 8 * (i >> 1);
2131 int xy = (mb_x * 2 + (i & 1) +
2132 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2134 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2135 if (mv[0] != mv[dm] ||
2136 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2137 for (y = 0; y < 8; y++)
2138 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2139 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2140 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2141 pict->linesize[0]) ^= 0x8080808080808080ULL;
2145 if (IS_INTERLACED(mb_type) &&
2146 avctx->codec->id == AV_CODEC_ID_H264) {
2150 mbskip_table[mb_index] = 0;
2156 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2158 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2159 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2162 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2164 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2165 int offset = 2*s->mb_stride + 1;
2167 return AVERROR(ENOMEM);
2168 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2169 ref->size -= offset;
2170 ref->data += offset;
2171 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2174 static inline int hpel_motion_lowres(MpegEncContext *s,
2175 uint8_t *dest, uint8_t *src,
2176 int field_based, int field_select,
2177 int src_x, int src_y,
2178 int width, int height, int stride,
2179 int h_edge_pos, int v_edge_pos,
2180 int w, int h, h264_chroma_mc_func *pix_op,
2181 int motion_x, int motion_y)
2183 const int lowres = s->avctx->lowres;
2184 const int op_index = FFMIN(lowres, 3);
2185 const int s_mask = (2 << lowres) - 1;
2189 if (s->quarter_sample) {
2194 sx = motion_x & s_mask;
2195 sy = motion_y & s_mask;
2196 src_x += motion_x >> lowres + 1;
2197 src_y += motion_y >> lowres + 1;
2199 src += src_y * stride + src_x;
2201 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2202 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2203 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2204 (h + 1) << field_based, src_x,
2205 src_y << field_based,
2208 src = s->edge_emu_buffer;
2212 sx = (sx << 2) >> lowres;
2213 sy = (sy << 2) >> lowres;
2216 pix_op[op_index](dest, src, stride, h, sx, sy);
2220 /* apply one mpeg motion vector to the three components */
2221 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2228 uint8_t **ref_picture,
2229 h264_chroma_mc_func *pix_op,
2230 int motion_x, int motion_y,
2233 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2234 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2236 const int lowres = s->avctx->lowres;
2237 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2238 const int block_s = 8>>lowres;
2239 const int s_mask = (2 << lowres) - 1;
2240 const int h_edge_pos = s->h_edge_pos >> lowres;
2241 const int v_edge_pos = s->v_edge_pos >> lowres;
2242 linesize = s->current_picture.f.linesize[0] << field_based;
2243 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2245 // FIXME obviously not perfect but qpel will not work in lowres anyway
2246 if (s->quarter_sample) {
2252 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2255 sx = motion_x & s_mask;
2256 sy = motion_y & s_mask;
2257 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2258 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2260 if (s->out_format == FMT_H263) {
2261 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2262 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2263 uvsrc_x = src_x >> 1;
2264 uvsrc_y = src_y >> 1;
2265 } else if (s->out_format == FMT_H261) {
2266 // even chroma mv's are full pel in H261
2269 uvsx = (2 * mx) & s_mask;
2270 uvsy = (2 * my) & s_mask;
2271 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2272 uvsrc_y = mb_y * block_s + (my >> lowres);
2274 if(s->chroma_y_shift){
2279 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2280 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2282 if(s->chroma_x_shift){
2286 uvsy = motion_y & s_mask;
2288 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2291 uvsx = motion_x & s_mask;
2292 uvsy = motion_y & s_mask;
2299 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2300 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2301 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2303 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2304 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2305 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2306 linesize >> field_based, 17, 17 + field_based,
2307 src_x, src_y << field_based, h_edge_pos,
2309 ptr_y = s->edge_emu_buffer;
2310 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2311 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2312 s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2314 uvsrc_x, uvsrc_y << field_based,
2315 h_edge_pos >> 1, v_edge_pos >> 1);
2316 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2318 uvsrc_x, uvsrc_y << field_based,
2319 h_edge_pos >> 1, v_edge_pos >> 1);
2321 ptr_cr = uvbuf + 16;
2325 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2327 dest_y += s->linesize;
2328 dest_cb += s->uvlinesize;
2329 dest_cr += s->uvlinesize;
2333 ptr_y += s->linesize;
2334 ptr_cb += s->uvlinesize;
2335 ptr_cr += s->uvlinesize;
2338 sx = (sx << 2) >> lowres;
2339 sy = (sy << 2) >> lowres;
2340 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2342 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2343 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2344 uvsx = (uvsx << 2) >> lowres;
2345 uvsy = (uvsy << 2) >> lowres;
2347 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2348 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2351 // FIXME h261 lowres loop filter
2354 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2355 uint8_t *dest_cb, uint8_t *dest_cr,
2356 uint8_t **ref_picture,
2357 h264_chroma_mc_func * pix_op,
2360 const int lowres = s->avctx->lowres;
2361 const int op_index = FFMIN(lowres, 3);
2362 const int block_s = 8 >> lowres;
2363 const int s_mask = (2 << lowres) - 1;
2364 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2365 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2366 int emu = 0, src_x, src_y, offset, sx, sy;
2369 if (s->quarter_sample) {
2374 /* In case of 8X8, we construct a single chroma motion vector
2375 with a special rounding */
2376 mx = ff_h263_round_chroma(mx);
2377 my = ff_h263_round_chroma(my);
2381 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2382 src_y = s->mb_y * block_s + (my >> lowres + 1);
2384 offset = src_y * s->uvlinesize + src_x;
2385 ptr = ref_picture[1] + offset;
2386 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2387 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2388 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2389 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2390 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2391 ptr = s->edge_emu_buffer;
2395 sx = (sx << 2) >> lowres;
2396 sy = (sy << 2) >> lowres;
2397 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2399 ptr = ref_picture[2] + offset;
2401 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2402 src_x, src_y, h_edge_pos, v_edge_pos);
2403 ptr = s->edge_emu_buffer;
2405 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2409 * motion compensation of a single macroblock
2411 * @param dest_y luma destination pointer
2412 * @param dest_cb chroma cb/u destination pointer
2413 * @param dest_cr chroma cr/v destination pointer
2414 * @param dir direction (0->forward, 1->backward)
2415 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2416 * @param pix_op halfpel motion compensation function (average or put normally)
2417 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2419 static inline void MPV_motion_lowres(MpegEncContext *s,
2420 uint8_t *dest_y, uint8_t *dest_cb,
2422 int dir, uint8_t **ref_picture,
2423 h264_chroma_mc_func *pix_op)
2427 const int lowres = s->avctx->lowres;
2428 const int block_s = 8 >>lowres;
2433 switch (s->mv_type) {
2435 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2437 ref_picture, pix_op,
2438 s->mv[dir][0][0], s->mv[dir][0][1],
2444 for (i = 0; i < 4; i++) {
2445 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2446 s->linesize) * block_s,
2447 ref_picture[0], 0, 0,
2448 (2 * mb_x + (i & 1)) * block_s,
2449 (2 * mb_y + (i >> 1)) * block_s,
2450 s->width, s->height, s->linesize,
2451 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2452 block_s, block_s, pix_op,
2453 s->mv[dir][i][0], s->mv[dir][i][1]);
2455 mx += s->mv[dir][i][0];
2456 my += s->mv[dir][i][1];
2459 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2460 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2464 if (s->picture_structure == PICT_FRAME) {
2466 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2467 1, 0, s->field_select[dir][0],
2468 ref_picture, pix_op,
2469 s->mv[dir][0][0], s->mv[dir][0][1],
2472 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2473 1, 1, s->field_select[dir][1],
2474 ref_picture, pix_op,
2475 s->mv[dir][1][0], s->mv[dir][1][1],
2478 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2479 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2480 ref_picture = s->current_picture_ptr->f.data;
2483 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2484 0, 0, s->field_select[dir][0],
2485 ref_picture, pix_op,
2487 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2491 for (i = 0; i < 2; i++) {
2492 uint8_t **ref2picture;
2494 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2495 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2496 ref2picture = ref_picture;
2498 ref2picture = s->current_picture_ptr->f.data;
2501 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2502 0, 0, s->field_select[dir][i],
2503 ref2picture, pix_op,
2504 s->mv[dir][i][0], s->mv[dir][i][1] +
2505 2 * block_s * i, block_s, mb_y >> 1);
2507 dest_y += 2 * block_s * s->linesize;
2508 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2509 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2513 if (s->picture_structure == PICT_FRAME) {
2514 for (i = 0; i < 2; i++) {
2516 for (j = 0; j < 2; j++) {
2517 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2519 ref_picture, pix_op,
2520 s->mv[dir][2 * i + j][0],
2521 s->mv[dir][2 * i + j][1],
2524 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2527 for (i = 0; i < 2; i++) {
2528 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2529 0, 0, s->picture_structure != i + 1,
2530 ref_picture, pix_op,
2531 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2532 2 * block_s, mb_y >> 1);
2534 // after put we make avg of the same block
2535 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2537 // opposite parity is always in the same
2538 // frame if this is second field
2539 if (!s->first_field) {
2540 ref_picture = s->current_picture_ptr->f.data;
2551 * find the lowest MB row referenced in the MVs
2553 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2555 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2556 int my, off, i, mvs;
2558 if (s->picture_structure != PICT_FRAME || s->mcsel)
2561 switch (s->mv_type) {
2575 for (i = 0; i < mvs; i++) {
2576 my = s->mv[dir][i][1]<<qpel_shift;
2577 my_max = FFMAX(my_max, my);
2578 my_min = FFMIN(my_min, my);
2581 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2583 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2585 return s->mb_height-1;
2588 /* put block[] to dest[] */
2589 static inline void put_dct(MpegEncContext *s,
2590 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2592 s->dct_unquantize_intra(s, block, i, qscale);
2593 s->dsp.idct_put (dest, line_size, block);
2596 /* add block[] to dest[] */
2597 static inline void add_dct(MpegEncContext *s,
2598 int16_t *block, int i, uint8_t *dest, int line_size)
2600 if (s->block_last_index[i] >= 0) {
2601 s->dsp.idct_add (dest, line_size, block);
2605 static inline void add_dequant_dct(MpegEncContext *s,
2606 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2608 if (s->block_last_index[i] >= 0) {
2609 s->dct_unquantize_inter(s, block, i, qscale);
2611 s->dsp.idct_add (dest, line_size, block);
2616 * Clean dc, ac, coded_block for the current non-intra MB.
2618 void ff_clean_intra_table_entries(MpegEncContext *s)
2620 int wrap = s->b8_stride;
2621 int xy = s->block_index[0];
2624 s->dc_val[0][xy + 1 ] =
2625 s->dc_val[0][xy + wrap] =
2626 s->dc_val[0][xy + 1 + wrap] = 1024;
2628 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2629 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2630 if (s->msmpeg4_version>=3) {
2631 s->coded_block[xy ] =
2632 s->coded_block[xy + 1 ] =
2633 s->coded_block[xy + wrap] =
2634 s->coded_block[xy + 1 + wrap] = 0;
2637 wrap = s->mb_stride;
2638 xy = s->mb_x + s->mb_y * wrap;
2640 s->dc_val[2][xy] = 1024;
2642 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2643 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2645 s->mbintra_table[xy]= 0;
2648 /* generic function called after a macroblock has been parsed by the
2649 decoder or after it has been encoded by the encoder.
2651 Important variables used:
2652 s->mb_intra : true if intra macroblock
2653 s->mv_dir : motion vector direction
2654 s->mv_type : motion vector type
2655 s->mv : motion vector
2656 s->interlaced_dct : true if interlaced dct used (mpeg2)
2658 static av_always_inline
2659 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2660 int lowres_flag, int is_mpeg12)
2662 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2663 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2664 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2668 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2669 /* print DCT coefficients */
2671 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2673 for(j=0; j<64; j++){
2674 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2676 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2680 s->current_picture.qscale_table[mb_xy] = s->qscale;
2682 /* update DC predictors for P macroblocks */
2684 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2685 if(s->mbintra_table[mb_xy])
2686 ff_clean_intra_table_entries(s);
2690 s->last_dc[2] = 128 << s->intra_dc_precision;
2693 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2694 s->mbintra_table[mb_xy]=1;
2696 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2697 uint8_t *dest_y, *dest_cb, *dest_cr;
2698 int dct_linesize, dct_offset;
2699 op_pixels_func (*op_pix)[4];
2700 qpel_mc_func (*op_qpix)[16];
2701 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2702 const int uvlinesize = s->current_picture.f.linesize[1];
2703 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2704 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2706 /* avoid copy if macroblock skipped in last frame too */
2707 /* skip only during decoding as we might trash the buffers during encoding a bit */
2709 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2711 if (s->mb_skipped) {
2713 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2715 } else if(!s->current_picture.reference) {
2718 *mbskip_ptr = 0; /* not skipped */
2722 dct_linesize = linesize << s->interlaced_dct;
2723 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2727 dest_cb= s->dest[1];
2728 dest_cr= s->dest[2];
2730 dest_y = s->b_scratchpad;
2731 dest_cb= s->b_scratchpad+16*linesize;
2732 dest_cr= s->b_scratchpad+32*linesize;
2736 /* motion handling */
2737 /* decoding or more than one mb_type (MC was already done otherwise) */
2740 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2741 if (s->mv_dir & MV_DIR_FORWARD) {
2742 ff_thread_await_progress(&s->last_picture_ptr->tf,
2743 ff_MPV_lowest_referenced_row(s, 0),
2746 if (s->mv_dir & MV_DIR_BACKWARD) {
2747 ff_thread_await_progress(&s->next_picture_ptr->tf,
2748 ff_MPV_lowest_referenced_row(s, 1),
2754 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2756 if (s->mv_dir & MV_DIR_FORWARD) {
2757 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2758 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2760 if (s->mv_dir & MV_DIR_BACKWARD) {
2761 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2764 op_qpix= s->me.qpel_put;
2765 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2766 op_pix = s->hdsp.put_pixels_tab;
2768 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2770 if (s->mv_dir & MV_DIR_FORWARD) {
2771 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2772 op_pix = s->hdsp.avg_pixels_tab;
2773 op_qpix= s->me.qpel_avg;
2775 if (s->mv_dir & MV_DIR_BACKWARD) {
2776 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2781 /* skip dequant / idct if we are really late ;) */
2782 if(s->avctx->skip_idct){
2783 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2784 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2785 || s->avctx->skip_idct >= AVDISCARD_ALL)
2789 /* add dct residue */
2790 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2791 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2792 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2793 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2794 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2795 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2797 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2798 if (s->chroma_y_shift){
2799 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2800 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2804 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2805 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2806 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2807 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2810 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2811 add_dct(s, block[0], 0, dest_y , dct_linesize);
2812 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2813 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2814 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2816 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2817 if(s->chroma_y_shift){//Chroma420
2818 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2819 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2822 dct_linesize = uvlinesize << s->interlaced_dct;
2823 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2825 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2826 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2827 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2828 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2829 if(!s->chroma_x_shift){//Chroma444
2830 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2831 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2832 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2833 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2838 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2839 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2842 /* dct only in intra block */
2843 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2844 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2845 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2846 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2847 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2849 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2850 if(s->chroma_y_shift){
2851 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2852 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2856 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2857 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2858 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2859 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2863 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2864 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2865 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2866 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2868 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2869 if(s->chroma_y_shift){
2870 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2871 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2874 dct_linesize = uvlinesize << s->interlaced_dct;
2875 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2877 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2878 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2879 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2880 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2881 if(!s->chroma_x_shift){//Chroma444
2882 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2883 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2884 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2885 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2893 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2894 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2895 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2900 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2902 if(s->out_format == FMT_MPEG1) {
2903 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2904 else MPV_decode_mb_internal(s, block, 0, 1);
2907 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2908 else MPV_decode_mb_internal(s, block, 0, 0);
2912 * @param h is the normal height, this will be reduced automatically if needed for the last row
2914 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2915 Picture *last, int y, int h, int picture_structure,
2916 int first_field, int draw_edges, int low_delay,
2917 int v_edge_pos, int h_edge_pos)
2919 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2920 int hshift = desc->log2_chroma_w;
2921 int vshift = desc->log2_chroma_h;
2922 const int field_pic = picture_structure != PICT_FRAME;
2928 if (!avctx->hwaccel &&
2929 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2932 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2933 int *linesize = cur->f.linesize;
2934 int sides = 0, edge_h;
2935 if (y==0) sides |= EDGE_TOP;
2936 if (y + h >= v_edge_pos)
2937 sides |= EDGE_BOTTOM;
2939 edge_h= FFMIN(h, v_edge_pos - y);
2941 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2942 linesize[0], h_edge_pos, edge_h,
2943 EDGE_WIDTH, EDGE_WIDTH, sides);
2944 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2945 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2946 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2947 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2948 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2949 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2952 h = FFMIN(h, avctx->height - y);
2954 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2956 if (avctx->draw_horiz_band) {
2958 int offset[AV_NUM_DATA_POINTERS];
2961 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2962 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2969 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2970 picture_structure == PICT_FRAME &&
2971 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2972 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2975 offset[0]= y * src->linesize[0];
2977 offset[2]= (y >> vshift) * src->linesize[1];
2978 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2984 avctx->draw_horiz_band(avctx, src, offset,
2985 y, picture_structure, h);
2989 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2991 int draw_edges = s->unrestricted_mv && !s->intra_only;
2992 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2993 &s->last_picture, y, h, s->picture_structure,
2994 s->first_field, draw_edges, s->low_delay,
2995 s->v_edge_pos, s->h_edge_pos);
2998 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2999 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3000 const int uvlinesize = s->current_picture.f.linesize[1];
3001 const int mb_size= 4 - s->avctx->lowres;
3003 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3004 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3005 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3006 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3007 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3008 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3009 //block_index is not used by mpeg2, so it is not affected by chroma_format
3011 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3012 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3013 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3015 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3017 if(s->picture_structure==PICT_FRAME){
3018 s->dest[0] += s->mb_y * linesize << mb_size;
3019 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3020 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3022 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3023 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3024 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3025 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3031 * Permute an 8x8 block.
3032 * @param block the block which will be permuted according to the given permutation vector
3033 * @param permutation the permutation vector
3034 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3035 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3036 * (inverse) permutated to scantable order!
3038 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3044 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3046 for(i=0; i<=last; i++){
3047 const int j= scantable[i];
3052 for(i=0; i<=last; i++){
3053 const int j= scantable[i];
3054 const int perm_j= permutation[j];
3055 block[perm_j]= temp[j];
3059 void ff_mpeg_flush(AVCodecContext *avctx){
3061 MpegEncContext *s = avctx->priv_data;
3063 if(s==NULL || s->picture==NULL)
3066 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3067 ff_mpeg_unref_picture(s, &s->picture[i]);
3068 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3070 ff_mpeg_unref_picture(s, &s->current_picture);
3071 ff_mpeg_unref_picture(s, &s->last_picture);
3072 ff_mpeg_unref_picture(s, &s->next_picture);
3074 s->mb_x= s->mb_y= 0;
3077 s->parse_context.state= -1;
3078 s->parse_context.frame_start_found= 0;
3079 s->parse_context.overread= 0;
3080 s->parse_context.overread_index= 0;
3081 s->parse_context.index= 0;
3082 s->parse_context.last_index= 0;
3083 s->bitstream_buffer_size=0;
3087 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3088 int16_t *block, int n, int qscale)
3090 int i, level, nCoeffs;
3091 const uint16_t *quant_matrix;
3093 nCoeffs= s->block_last_index[n];
3095 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3096 /* XXX: only mpeg1 */
3097 quant_matrix = s->intra_matrix;
3098 for(i=1;i<=nCoeffs;i++) {
3099 int j= s->intra_scantable.permutated[i];
3104 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3105 level = (level - 1) | 1;
3108 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3109 level = (level - 1) | 1;
3116 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3117 int16_t *block, int n, int qscale)
3119 int i, level, nCoeffs;
3120 const uint16_t *quant_matrix;
3122 nCoeffs= s->block_last_index[n];
3124 quant_matrix = s->inter_matrix;
3125 for(i=0; i<=nCoeffs; i++) {
3126 int j= s->intra_scantable.permutated[i];
3131 level = (((level << 1) + 1) * qscale *
3132 ((int) (quant_matrix[j]))) >> 4;
3133 level = (level - 1) | 1;
3136 level = (((level << 1) + 1) * qscale *
3137 ((int) (quant_matrix[j]))) >> 4;
3138 level = (level - 1) | 1;
3145 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3146 int16_t *block, int n, int qscale)
3148 int i, level, nCoeffs;
3149 const uint16_t *quant_matrix;
3151 if(s->alternate_scan) nCoeffs= 63;
3152 else nCoeffs= s->block_last_index[n];
3154 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3155 quant_matrix = s->intra_matrix;
3156 for(i=1;i<=nCoeffs;i++) {
3157 int j= s->intra_scantable.permutated[i];
3162 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3165 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3172 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3173 int16_t *block, int n, int qscale)
3175 int i, level, nCoeffs;
3176 const uint16_t *quant_matrix;
3179 if(s->alternate_scan) nCoeffs= 63;
3180 else nCoeffs= s->block_last_index[n];
3182 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3184 quant_matrix = s->intra_matrix;
3185 for(i=1;i<=nCoeffs;i++) {
3186 int j= s->intra_scantable.permutated[i];
3191 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3194 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3203 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3204 int16_t *block, int n, int qscale)
3206 int i, level, nCoeffs;
3207 const uint16_t *quant_matrix;
3210 if(s->alternate_scan) nCoeffs= 63;
3211 else nCoeffs= s->block_last_index[n];
3213 quant_matrix = s->inter_matrix;
3214 for(i=0; i<=nCoeffs; i++) {
3215 int j= s->intra_scantable.permutated[i];
3220 level = (((level << 1) + 1) * qscale *
3221 ((int) (quant_matrix[j]))) >> 4;
3224 level = (((level << 1) + 1) * qscale *
3225 ((int) (quant_matrix[j]))) >> 4;
3234 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3235 int16_t *block, int n, int qscale)
3237 int i, level, qmul, qadd;
3240 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
3245 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3246 qadd = (qscale - 1) | 1;
3253 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3255 for(i=1; i<=nCoeffs; i++) {
3259 level = level * qmul - qadd;
3261 level = level * qmul + qadd;
3268 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3269 int16_t *block, int n, int qscale)
3271 int i, level, qmul, qadd;
3274 av_assert2(s->block_last_index[n]>=0);
3276 qadd = (qscale - 1) | 1;
3279 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3281 for(i=0; i<=nCoeffs; i++) {
3285 level = level * qmul - qadd;
3287 level = level * qmul + qadd;
3295 * set qscale and update qscale dependent variables.
3297 void ff_set_qscale(MpegEncContext * s, int qscale)
3301 else if (qscale > 31)
3305 s->chroma_qscale= s->chroma_qscale_table[qscale];
3307 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3308 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3311 void ff_MPV_report_decode_progress(MpegEncContext *s)
3313 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3314 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3317 #if CONFIG_ERROR_RESILIENCE
3318 void ff_mpeg_er_frame_start(MpegEncContext *s)
3320 ERContext *er = &s->er;
3322 er->cur_pic = s->current_picture_ptr;
3323 er->last_pic = s->last_picture_ptr;
3324 er->next_pic = s->next_picture_ptr;
3326 er->pp_time = s->pp_time;
3327 er->pb_time = s->pb_time;
3328 er->quarter_sample = s->quarter_sample;
3329 er->partitioned_frame = s->partitioned_frame;
3331 ff_er_frame_start(er);
3333 #endif /* CONFIG_ERROR_RESILIENCE */