2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
35 #include "h264chroma.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
130 MpegEncContext *s = opaque;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
138 memcpy(s->mv, mv, sizeof(*mv));
140 ff_init_block_index(s);
141 ff_update_block_index(s);
143 s->dsp.clear_blocks(s->block[0]);
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
150 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
151 ff_MPV_decode_mb(s, s->block);
154 /* init common dct for both encoder and decoder */
155 av_cold int ff_dct_common_init(MpegEncContext *s)
157 ff_dsputil_init(&s->dsp, s->avctx);
158 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
159 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
160 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
162 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
163 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
164 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
165 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
166 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
167 if (s->flags & CODEC_FLAG_BITEXACT)
168 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
169 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
172 ff_MPV_common_init_axp(s);
174 ff_MPV_common_init_arm(s);
176 ff_MPV_common_init_bfin(s);
178 ff_MPV_common_init_ppc(s);
180 ff_MPV_common_init_x86(s);
182 /* load & permutate scantables
183 * note: only wmv uses different ones
185 if (s->alternate_scan) {
186 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
187 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
198 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
200 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
202 // edge emu needs blocksize + filter length - 1
203 // (= 17x17 for halfpel / 21x21 for h264)
204 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
205 // at uvlinesize. It supports only YUV420 so 24x24 is enough
206 // linesize * interlaced * MBsize
207 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
210 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
212 s->me.temp = s->me.scratchpad;
213 s->rd_scratchpad = s->me.scratchpad;
214 s->b_scratchpad = s->me.scratchpad;
215 s->obmc_scratchpad = s->me.scratchpad + 16;
219 av_freep(&s->edge_emu_buffer);
220 return AVERROR(ENOMEM);
224 * Allocate a frame buffer
226 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
231 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
233 s->codec_id != AV_CODEC_ID_MSS2)
234 r = ff_thread_get_buffer(s->avctx, &pic->tf,
235 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
237 pic->f.width = s->avctx->width;
238 pic->f.height = s->avctx->height;
239 pic->f.format = s->avctx->pix_fmt;
240 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
243 if (r < 0 || !pic->f.data[0]) {
244 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
249 if (s->avctx->hwaccel) {
250 assert(!pic->hwaccel_picture_private);
251 if (s->avctx->hwaccel->priv_data_size) {
252 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
253 if (!pic->hwaccel_priv_buf) {
254 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
257 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
261 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
262 s->uvlinesize != pic->f.linesize[1])) {
263 av_log(s->avctx, AV_LOG_ERROR,
264 "get_buffer() failed (stride changed)\n");
265 ff_mpeg_unref_picture(s, pic);
269 if (pic->f.linesize[1] != pic->f.linesize[2]) {
270 av_log(s->avctx, AV_LOG_ERROR,
271 "get_buffer() failed (uv stride mismatch)\n");
272 ff_mpeg_unref_picture(s, pic);
276 if (!s->edge_emu_buffer &&
277 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
278 av_log(s->avctx, AV_LOG_ERROR,
279 "get_buffer() failed to allocate context scratch buffers.\n");
280 ff_mpeg_unref_picture(s, pic);
287 static void free_picture_tables(Picture *pic)
291 pic->alloc_mb_width =
292 pic->alloc_mb_height = 0;
294 av_buffer_unref(&pic->mb_var_buf);
295 av_buffer_unref(&pic->mc_mb_var_buf);
296 av_buffer_unref(&pic->mb_mean_buf);
297 av_buffer_unref(&pic->mbskip_table_buf);
298 av_buffer_unref(&pic->qscale_table_buf);
299 av_buffer_unref(&pic->mb_type_buf);
301 for (i = 0; i < 2; i++) {
302 av_buffer_unref(&pic->motion_val_buf[i]);
303 av_buffer_unref(&pic->ref_index_buf[i]);
307 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
309 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
310 const int mb_array_size = s->mb_stride * s->mb_height;
311 const int b8_array_size = s->b8_stride * s->mb_height * 2;
315 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
316 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
317 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
319 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
320 return AVERROR(ENOMEM);
323 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
324 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
326 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
327 return AVERROR(ENOMEM);
330 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
331 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
332 int ref_index_size = 4 * mb_array_size;
334 for (i = 0; mv_size && i < 2; i++) {
335 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
336 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
337 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
338 return AVERROR(ENOMEM);
342 pic->alloc_mb_width = s->mb_width;
343 pic->alloc_mb_height = s->mb_height;
348 static int make_tables_writable(Picture *pic)
351 #define MAKE_WRITABLE(table) \
354 (ret = av_buffer_make_writable(&pic->table)) < 0)\
358 MAKE_WRITABLE(mb_var_buf);
359 MAKE_WRITABLE(mc_mb_var_buf);
360 MAKE_WRITABLE(mb_mean_buf);
361 MAKE_WRITABLE(mbskip_table_buf);
362 MAKE_WRITABLE(qscale_table_buf);
363 MAKE_WRITABLE(mb_type_buf);
365 for (i = 0; i < 2; i++) {
366 MAKE_WRITABLE(motion_val_buf[i]);
367 MAKE_WRITABLE(ref_index_buf[i]);
374 * Allocate a Picture.
375 * The pixels are allocated/set by calling get_buffer() if shared = 0
377 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
381 if (pic->qscale_table_buf)
382 if ( pic->alloc_mb_width != s->mb_width
383 || pic->alloc_mb_height != s->mb_height)
384 free_picture_tables(pic);
387 av_assert0(pic->f.data[0]);
390 av_assert0(!pic->f.data[0]);
392 if (alloc_frame_buffer(s, pic) < 0)
395 s->linesize = pic->f.linesize[0];
396 s->uvlinesize = pic->f.linesize[1];
399 if (!pic->qscale_table_buf)
400 ret = alloc_picture_tables(s, pic);
402 ret = make_tables_writable(pic);
407 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
408 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
409 pic->mb_mean = pic->mb_mean_buf->data;
412 pic->mbskip_table = pic->mbskip_table_buf->data;
413 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
414 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
416 if (pic->motion_val_buf[0]) {
417 for (i = 0; i < 2; i++) {
418 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
419 pic->ref_index[i] = pic->ref_index_buf[i]->data;
425 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
426 ff_mpeg_unref_picture(s, pic);
427 free_picture_tables(pic);
428 return AVERROR(ENOMEM);
432 * Deallocate a picture.
434 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
436 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
439 /* WM Image / Screen codecs allocate internal buffers with different
440 * dimensions / colorspaces; ignore user-defined callbacks for these. */
441 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
442 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
443 s->codec_id != AV_CODEC_ID_MSS2)
444 ff_thread_release_buffer(s->avctx, &pic->tf);
446 av_frame_unref(&pic->f);
448 av_buffer_unref(&pic->hwaccel_priv_buf);
450 if (pic->needs_realloc)
451 free_picture_tables(pic);
453 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
456 static int update_picture_tables(Picture *dst, Picture *src)
460 #define UPDATE_TABLE(table)\
463 (!dst->table || dst->table->buffer != src->table->buffer)) {\
464 av_buffer_unref(&dst->table);\
465 dst->table = av_buffer_ref(src->table);\
467 free_picture_tables(dst);\
468 return AVERROR(ENOMEM);\
473 UPDATE_TABLE(mb_var_buf);
474 UPDATE_TABLE(mc_mb_var_buf);
475 UPDATE_TABLE(mb_mean_buf);
476 UPDATE_TABLE(mbskip_table_buf);
477 UPDATE_TABLE(qscale_table_buf);
478 UPDATE_TABLE(mb_type_buf);
479 for (i = 0; i < 2; i++) {
480 UPDATE_TABLE(motion_val_buf[i]);
481 UPDATE_TABLE(ref_index_buf[i]);
484 dst->mb_var = src->mb_var;
485 dst->mc_mb_var = src->mc_mb_var;
486 dst->mb_mean = src->mb_mean;
487 dst->mbskip_table = src->mbskip_table;
488 dst->qscale_table = src->qscale_table;
489 dst->mb_type = src->mb_type;
490 for (i = 0; i < 2; i++) {
491 dst->motion_val[i] = src->motion_val[i];
492 dst->ref_index[i] = src->ref_index[i];
495 dst->alloc_mb_width = src->alloc_mb_width;
496 dst->alloc_mb_height = src->alloc_mb_height;
501 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
505 av_assert0(!dst->f.buf[0]);
506 av_assert0(src->f.buf[0]);
510 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
514 ret = update_picture_tables(dst, src);
518 if (src->hwaccel_picture_private) {
519 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
520 if (!dst->hwaccel_priv_buf)
522 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
525 dst->field_picture = src->field_picture;
526 dst->mb_var_sum = src->mb_var_sum;
527 dst->mc_mb_var_sum = src->mc_mb_var_sum;
528 dst->b_frame_score = src->b_frame_score;
529 dst->needs_realloc = src->needs_realloc;
530 dst->reference = src->reference;
531 dst->shared = src->shared;
535 ff_mpeg_unref_picture(s, dst);
539 static int init_duplicate_context(MpegEncContext *s)
541 int y_size = s->b8_stride * (2 * s->mb_height + 1);
542 int c_size = s->mb_stride * (s->mb_height + 1);
543 int yc_size = y_size + 2 * c_size;
551 s->obmc_scratchpad = NULL;
554 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
555 ME_MAP_SIZE * sizeof(uint32_t), fail)
556 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
557 ME_MAP_SIZE * sizeof(uint32_t), fail)
558 if (s->avctx->noise_reduction) {
559 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
560 2 * 64 * sizeof(int), fail)
563 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
564 s->block = s->blocks[0];
566 for (i = 0; i < 12; i++) {
567 s->pblocks[i] = &s->block[i];
570 if (s->out_format == FMT_H263) {
572 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
573 yc_size * sizeof(int16_t) * 16, fail);
574 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
575 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
576 s->ac_val[2] = s->ac_val[1] + c_size;
581 return -1; // free() through ff_MPV_common_end()
584 static void free_duplicate_context(MpegEncContext *s)
589 av_freep(&s->edge_emu_buffer);
590 av_freep(&s->me.scratchpad);
594 s->obmc_scratchpad = NULL;
596 av_freep(&s->dct_error_sum);
597 av_freep(&s->me.map);
598 av_freep(&s->me.score_map);
599 av_freep(&s->blocks);
600 av_freep(&s->ac_val_base);
604 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
606 #define COPY(a) bak->a = src->a
607 COPY(edge_emu_buffer);
612 COPY(obmc_scratchpad);
619 COPY(me.map_generation);
631 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
635 // FIXME copy only needed parts
637 backup_duplicate_context(&bak, dst);
638 memcpy(dst, src, sizeof(MpegEncContext));
639 backup_duplicate_context(dst, &bak);
640 for (i = 0; i < 12; i++) {
641 dst->pblocks[i] = &dst->block[i];
643 if (!dst->edge_emu_buffer &&
644 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
645 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
646 "scratch buffers.\n");
649 // STOP_TIMER("update_duplicate_context")
650 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
654 int ff_mpeg_update_thread_context(AVCodecContext *dst,
655 const AVCodecContext *src)
658 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
665 // FIXME can parameters change on I-frames?
666 // in that case dst may need a reinit
667 if (!s->context_initialized) {
668 memcpy(s, s1, sizeof(MpegEncContext));
671 s->bitstream_buffer = NULL;
672 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
674 if (s1->context_initialized){
675 // s->picture_range_start += MAX_PICTURE_COUNT;
676 // s->picture_range_end += MAX_PICTURE_COUNT;
677 if((ret = ff_MPV_common_init(s)) < 0){
678 memset(s, 0, sizeof(MpegEncContext));
685 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
686 s->context_reinit = 0;
687 s->height = s1->height;
688 s->width = s1->width;
689 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
693 s->avctx->coded_height = s1->avctx->coded_height;
694 s->avctx->coded_width = s1->avctx->coded_width;
695 s->avctx->width = s1->avctx->width;
696 s->avctx->height = s1->avctx->height;
698 s->coded_picture_number = s1->coded_picture_number;
699 s->picture_number = s1->picture_number;
700 s->input_picture_number = s1->input_picture_number;
702 av_assert0(!s->picture || s->picture != s1->picture);
704 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
705 ff_mpeg_unref_picture(s, &s->picture[i]);
706 if (s1->picture[i].f.data[0] &&
707 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
711 #define UPDATE_PICTURE(pic)\
713 ff_mpeg_unref_picture(s, &s->pic);\
714 if (s1->pic.f.data[0])\
715 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
717 ret = update_picture_tables(&s->pic, &s1->pic);\
722 UPDATE_PICTURE(current_picture);
723 UPDATE_PICTURE(last_picture);
724 UPDATE_PICTURE(next_picture);
726 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
727 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
728 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
730 // Error/bug resilience
731 s->next_p_frame_damaged = s1->next_p_frame_damaged;
732 s->workaround_bugs = s1->workaround_bugs;
733 s->padding_bug_score = s1->padding_bug_score;
736 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
737 (char *) &s1->shape - (char *) &s1->time_increment_bits);
740 s->max_b_frames = s1->max_b_frames;
741 s->low_delay = s1->low_delay;
742 s->droppable = s1->droppable;
744 // DivX handling (doesn't work)
745 s->divx_packed = s1->divx_packed;
747 if (s1->bitstream_buffer) {
748 if (s1->bitstream_buffer_size +
749 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
750 av_fast_malloc(&s->bitstream_buffer,
751 &s->allocated_bitstream_buffer_size,
752 s1->allocated_bitstream_buffer_size);
753 s->bitstream_buffer_size = s1->bitstream_buffer_size;
754 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
755 s1->bitstream_buffer_size);
756 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
757 FF_INPUT_BUFFER_PADDING_SIZE);
760 // linesize dependend scratch buffer allocation
761 if (!s->edge_emu_buffer)
763 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
764 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
765 "scratch buffers.\n");
766 return AVERROR(ENOMEM);
769 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
770 "be allocated due to unknown size.\n");
773 // MPEG2/interlacing info
774 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
775 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
777 if (!s1->first_field) {
778 s->last_pict_type = s1->pict_type;
779 if (s1->current_picture_ptr)
780 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
782 if (s1->pict_type != AV_PICTURE_TYPE_B) {
783 s->last_non_b_pict_type = s1->pict_type;
791 * Set the given MpegEncContext to common defaults
792 * (same for encoding and decoding).
793 * The changed fields will not depend upon the
794 * prior state of the MpegEncContext.
796 void ff_MPV_common_defaults(MpegEncContext *s)
798 s->y_dc_scale_table =
799 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
800 s->chroma_qscale_table = ff_default_chroma_qscale_table;
801 s->progressive_frame = 1;
802 s->progressive_sequence = 1;
803 s->picture_structure = PICT_FRAME;
805 s->coded_picture_number = 0;
806 s->picture_number = 0;
807 s->input_picture_number = 0;
809 s->picture_in_gop_number = 0;
814 s->slice_context_count = 1;
818 * Set the given MpegEncContext to defaults for decoding.
819 * the changed fields will not depend upon
820 * the prior state of the MpegEncContext.
822 void ff_MPV_decode_defaults(MpegEncContext *s)
824 ff_MPV_common_defaults(s);
827 static int init_er(MpegEncContext *s)
829 ERContext *er = &s->er;
830 int mb_array_size = s->mb_height * s->mb_stride;
833 er->avctx = s->avctx;
836 er->mb_index2xy = s->mb_index2xy;
837 er->mb_num = s->mb_num;
838 er->mb_width = s->mb_width;
839 er->mb_height = s->mb_height;
840 er->mb_stride = s->mb_stride;
841 er->b8_stride = s->b8_stride;
843 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
844 er->error_status_table = av_mallocz(mb_array_size);
845 if (!er->er_temp_buffer || !er->error_status_table)
848 er->mbskip_table = s->mbskip_table;
849 er->mbintra_table = s->mbintra_table;
851 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
852 er->dc_val[i] = s->dc_val[i];
854 er->decode_mb = mpeg_er_decode_mb;
859 av_freep(&er->er_temp_buffer);
860 av_freep(&er->error_status_table);
861 return AVERROR(ENOMEM);
865 * Initialize and allocates MpegEncContext fields dependent on the resolution.
867 static int init_context_frame(MpegEncContext *s)
869 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
871 s->mb_width = (s->width + 15) / 16;
872 s->mb_stride = s->mb_width + 1;
873 s->b8_stride = s->mb_width * 2 + 1;
874 s->b4_stride = s->mb_width * 4 + 1;
875 mb_array_size = s->mb_height * s->mb_stride;
876 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
878 /* set default edge pos, will be overriden
879 * in decode_header if needed */
880 s->h_edge_pos = s->mb_width * 16;
881 s->v_edge_pos = s->mb_height * 16;
883 s->mb_num = s->mb_width * s->mb_height;
888 s->block_wrap[3] = s->b8_stride;
890 s->block_wrap[5] = s->mb_stride;
892 y_size = s->b8_stride * (2 * s->mb_height + 1);
893 c_size = s->mb_stride * (s->mb_height + 1);
894 yc_size = y_size + 2 * c_size;
896 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
897 for (y = 0; y < s->mb_height; y++)
898 for (x = 0; x < s->mb_width; x++)
899 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
901 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
904 /* Allocate MV tables */
905 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
907 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
911 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
912 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
913 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
914 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
915 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
916 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
918 /* Allocate MB type table */
919 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
921 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
923 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
924 mb_array_size * sizeof(float), fail);
925 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
926 mb_array_size * sizeof(float), fail);
930 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
931 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
932 /* interlaced direct mode decoding tables */
933 for (i = 0; i < 2; i++) {
935 for (j = 0; j < 2; j++) {
936 for (k = 0; k < 2; k++) {
937 FF_ALLOCZ_OR_GOTO(s->avctx,
938 s->b_field_mv_table_base[i][j][k],
939 mv_table_size * 2 * sizeof(int16_t),
941 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
944 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
946 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
948 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
951 if (s->out_format == FMT_H263) {
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
954 s->coded_block = s->coded_block_base + s->b8_stride + 1;
956 /* cbp, ac_pred, pred_dir */
957 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
958 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
961 if (s->h263_pred || s->h263_plus || !s->encoding) {
963 // MN: we need these for error resilience of intra-frames
964 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
965 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
966 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
967 s->dc_val[2] = s->dc_val[1] + c_size;
968 for (i = 0; i < yc_size; i++)
969 s->dc_val_base[i] = 1024;
972 /* which mb is a intra block */
973 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
974 memset(s->mbintra_table, 1, mb_array_size);
976 /* init macroblock skip table */
977 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
978 // Note the + 1 is for a quicker mpeg4 slice_end detection
982 return AVERROR(ENOMEM);
986 * init common structure for both encoder and decoder.
987 * this assumes that some variables like width/height are already set
989 av_cold int ff_MPV_common_init(MpegEncContext *s)
992 int nb_slices = (HAVE_THREADS &&
993 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
994 s->avctx->thread_count : 1;
996 if (s->encoding && s->avctx->slices)
997 nb_slices = s->avctx->slices;
999 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1000 s->mb_height = (s->height + 31) / 32 * 2;
1002 s->mb_height = (s->height + 15) / 16;
1004 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1005 av_log(s->avctx, AV_LOG_ERROR,
1006 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1010 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1013 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1015 max_slices = MAX_THREADS;
1016 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1017 " reducing to %d\n", nb_slices, max_slices);
1018 nb_slices = max_slices;
1021 if ((s->width || s->height) &&
1022 av_image_check_size(s->width, s->height, 0, s->avctx))
1025 ff_dct_common_init(s);
1027 s->flags = s->avctx->flags;
1028 s->flags2 = s->avctx->flags2;
1030 /* set chroma shifts */
1031 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1033 &s->chroma_y_shift);
1035 /* convert fourcc to upper case */
1036 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1038 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1040 s->avctx->coded_frame = &s->current_picture.f;
1043 if (s->msmpeg4_version) {
1044 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1045 2 * 2 * (MAX_LEVEL + 1) *
1046 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1048 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1056 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1057 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1059 if (s->avctx->noise_reduction) {
1060 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1064 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1065 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1066 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1067 avcodec_get_frame_defaults(&s->picture[i].f);
1069 memset(&s->next_picture, 0, sizeof(s->next_picture));
1070 memset(&s->last_picture, 0, sizeof(s->last_picture));
1071 memset(&s->current_picture, 0, sizeof(s->current_picture));
1072 avcodec_get_frame_defaults(&s->next_picture.f);
1073 avcodec_get_frame_defaults(&s->last_picture.f);
1074 avcodec_get_frame_defaults(&s->current_picture.f);
1076 if (init_context_frame(s))
1079 s->parse_context.state = -1;
1081 s->context_initialized = 1;
1082 s->thread_context[0] = s;
1084 // if (s->width && s->height) {
1085 if (nb_slices > 1) {
1086 for (i = 1; i < nb_slices; i++) {
1087 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1088 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1091 for (i = 0; i < nb_slices; i++) {
1092 if (init_duplicate_context(s->thread_context[i]) < 0)
1094 s->thread_context[i]->start_mb_y =
1095 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1096 s->thread_context[i]->end_mb_y =
1097 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1100 if (init_duplicate_context(s) < 0)
1103 s->end_mb_y = s->mb_height;
1105 s->slice_context_count = nb_slices;
1110 ff_MPV_common_end(s);
1115 * Frees and resets MpegEncContext fields depending on the resolution.
1116 * Is used during resolution changes to avoid a full reinitialization of the
1119 static int free_context_frame(MpegEncContext *s)
1123 av_freep(&s->mb_type);
1124 av_freep(&s->p_mv_table_base);
1125 av_freep(&s->b_forw_mv_table_base);
1126 av_freep(&s->b_back_mv_table_base);
1127 av_freep(&s->b_bidir_forw_mv_table_base);
1128 av_freep(&s->b_bidir_back_mv_table_base);
1129 av_freep(&s->b_direct_mv_table_base);
1130 s->p_mv_table = NULL;
1131 s->b_forw_mv_table = NULL;
1132 s->b_back_mv_table = NULL;
1133 s->b_bidir_forw_mv_table = NULL;
1134 s->b_bidir_back_mv_table = NULL;
1135 s->b_direct_mv_table = NULL;
1136 for (i = 0; i < 2; i++) {
1137 for (j = 0; j < 2; j++) {
1138 for (k = 0; k < 2; k++) {
1139 av_freep(&s->b_field_mv_table_base[i][j][k]);
1140 s->b_field_mv_table[i][j][k] = NULL;
1142 av_freep(&s->b_field_select_table[i][j]);
1143 av_freep(&s->p_field_mv_table_base[i][j]);
1144 s->p_field_mv_table[i][j] = NULL;
1146 av_freep(&s->p_field_select_table[i]);
1149 av_freep(&s->dc_val_base);
1150 av_freep(&s->coded_block_base);
1151 av_freep(&s->mbintra_table);
1152 av_freep(&s->cbp_table);
1153 av_freep(&s->pred_dir_table);
1155 av_freep(&s->mbskip_table);
1157 av_freep(&s->er.error_status_table);
1158 av_freep(&s->er.er_temp_buffer);
1159 av_freep(&s->mb_index2xy);
1160 av_freep(&s->lambda_table);
1162 av_freep(&s->cplx_tab);
1163 av_freep(&s->bits_tab);
1165 s->linesize = s->uvlinesize = 0;
1170 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1174 if (s->slice_context_count > 1) {
1175 for (i = 0; i < s->slice_context_count; i++) {
1176 free_duplicate_context(s->thread_context[i]);
1178 for (i = 1; i < s->slice_context_count; i++) {
1179 av_freep(&s->thread_context[i]);
1182 free_duplicate_context(s);
1184 if ((err = free_context_frame(s)) < 0)
1188 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1189 s->picture[i].needs_realloc = 1;
1192 s->last_picture_ptr =
1193 s->next_picture_ptr =
1194 s->current_picture_ptr = NULL;
1197 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1198 s->mb_height = (s->height + 31) / 32 * 2;
1200 s->mb_height = (s->height + 15) / 16;
1202 if ((s->width || s->height) &&
1203 av_image_check_size(s->width, s->height, 0, s->avctx))
1204 return AVERROR_INVALIDDATA;
1206 if ((err = init_context_frame(s)))
1209 s->thread_context[0] = s;
1211 if (s->width && s->height) {
1212 int nb_slices = s->slice_context_count;
1213 if (nb_slices > 1) {
1214 for (i = 1; i < nb_slices; i++) {
1215 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1216 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1219 for (i = 0; i < nb_slices; i++) {
1220 if (init_duplicate_context(s->thread_context[i]) < 0)
1222 s->thread_context[i]->start_mb_y =
1223 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1224 s->thread_context[i]->end_mb_y =
1225 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1228 err = init_duplicate_context(s);
1232 s->end_mb_y = s->mb_height;
1234 s->slice_context_count = nb_slices;
1239 ff_MPV_common_end(s);
1243 /* init common structure for both encoder and decoder */
1244 void ff_MPV_common_end(MpegEncContext *s)
1248 if (s->slice_context_count > 1) {
1249 for (i = 0; i < s->slice_context_count; i++) {
1250 free_duplicate_context(s->thread_context[i]);
1252 for (i = 1; i < s->slice_context_count; i++) {
1253 av_freep(&s->thread_context[i]);
1255 s->slice_context_count = 1;
1256 } else free_duplicate_context(s);
1258 av_freep(&s->parse_context.buffer);
1259 s->parse_context.buffer_size = 0;
1261 av_freep(&s->bitstream_buffer);
1262 s->allocated_bitstream_buffer_size = 0;
1264 av_freep(&s->avctx->stats_out);
1265 av_freep(&s->ac_stats);
1267 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1268 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1269 s->q_chroma_intra_matrix= NULL;
1270 s->q_chroma_intra_matrix16= NULL;
1271 av_freep(&s->q_intra_matrix);
1272 av_freep(&s->q_inter_matrix);
1273 av_freep(&s->q_intra_matrix16);
1274 av_freep(&s->q_inter_matrix16);
1275 av_freep(&s->input_picture);
1276 av_freep(&s->reordered_input_picture);
1277 av_freep(&s->dct_offset);
1280 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1281 free_picture_tables(&s->picture[i]);
1282 ff_mpeg_unref_picture(s, &s->picture[i]);
1285 av_freep(&s->picture);
1286 free_picture_tables(&s->last_picture);
1287 ff_mpeg_unref_picture(s, &s->last_picture);
1288 free_picture_tables(&s->current_picture);
1289 ff_mpeg_unref_picture(s, &s->current_picture);
1290 free_picture_tables(&s->next_picture);
1291 ff_mpeg_unref_picture(s, &s->next_picture);
1292 free_picture_tables(&s->new_picture);
1293 ff_mpeg_unref_picture(s, &s->new_picture);
1295 free_context_frame(s);
1297 s->context_initialized = 0;
1298 s->last_picture_ptr =
1299 s->next_picture_ptr =
1300 s->current_picture_ptr = NULL;
1301 s->linesize = s->uvlinesize = 0;
1304 av_cold void ff_init_rl(RLTable *rl,
1305 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1307 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1308 uint8_t index_run[MAX_RUN + 1];
1309 int last, run, level, start, end, i;
1311 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1312 if (static_store && rl->max_level[0])
1315 /* compute max_level[], max_run[] and index_run[] */
1316 for (last = 0; last < 2; last++) {
1325 memset(max_level, 0, MAX_RUN + 1);
1326 memset(max_run, 0, MAX_LEVEL + 1);
1327 memset(index_run, rl->n, MAX_RUN + 1);
1328 for (i = start; i < end; i++) {
1329 run = rl->table_run[i];
1330 level = rl->table_level[i];
1331 if (index_run[run] == rl->n)
1333 if (level > max_level[run])
1334 max_level[run] = level;
1335 if (run > max_run[level])
1336 max_run[level] = run;
1339 rl->max_level[last] = static_store[last];
1341 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1342 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1344 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1346 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1347 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1349 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1351 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1352 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1356 av_cold void ff_init_vlc_rl(RLTable *rl)
1360 for (q = 0; q < 32; q++) {
1362 int qadd = (q - 1) | 1;
1368 for (i = 0; i < rl->vlc.table_size; i++) {
1369 int code = rl->vlc.table[i][0];
1370 int len = rl->vlc.table[i][1];
1373 if (len == 0) { // illegal code
1376 } else if (len < 0) { // more bits needed
1380 if (code == rl->n) { // esc
1384 run = rl->table_run[code] + 1;
1385 level = rl->table_level[code] * qmul + qadd;
1386 if (code >= rl->last) run += 192;
1389 rl->rl_vlc[q][i].len = len;
1390 rl->rl_vlc[q][i].level = level;
1391 rl->rl_vlc[q][i].run = run;
1396 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1400 /* release non reference frames */
1401 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1402 if (!s->picture[i].reference &&
1403 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1404 ff_mpeg_unref_picture(s, &s->picture[i]);
1409 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1411 if (pic == s->last_picture_ptr)
1413 if (pic->f.data[0] == NULL)
1415 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1420 static int find_unused_picture(MpegEncContext *s, int shared)
1425 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1426 if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1430 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1431 if (pic_is_unused(s, &s->picture[i]))
1436 av_log(s->avctx, AV_LOG_FATAL,
1437 "Internal error, picture buffer overflow\n");
1438 /* We could return -1, but the codec would crash trying to draw into a
1439 * non-existing frame anyway. This is safer than waiting for a random crash.
1440 * Also the return of this is never useful, an encoder must only allocate
1441 * as much as allowed in the specification. This has no relationship to how
1442 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1443 * enough for such valid streams).
1444 * Plus, a decoder has to check stream validity and remove frames if too
1445 * many reference frames are around. Waiting for "OOM" is not correct at
1446 * all. Similarly, missing reference frames have to be replaced by
1447 * interpolated/MC frames, anything else is a bug in the codec ...
1453 int ff_find_unused_picture(MpegEncContext *s, int shared)
1455 int ret = find_unused_picture(s, shared);
1457 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1458 if (s->picture[ret].needs_realloc) {
1459 s->picture[ret].needs_realloc = 0;
1460 free_picture_tables(&s->picture[ret]);
1461 ff_mpeg_unref_picture(s, &s->picture[ret]);
1462 avcodec_get_frame_defaults(&s->picture[ret].f);
1468 static void update_noise_reduction(MpegEncContext *s)
1472 for (intra = 0; intra < 2; intra++) {
1473 if (s->dct_count[intra] > (1 << 16)) {
1474 for (i = 0; i < 64; i++) {
1475 s->dct_error_sum[intra][i] >>= 1;
1477 s->dct_count[intra] >>= 1;
1480 for (i = 0; i < 64; i++) {
1481 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1482 s->dct_count[intra] +
1483 s->dct_error_sum[intra][i] / 2) /
1484 (s->dct_error_sum[intra][i] + 1);
1490 * generic function for encode/decode called after coding/decoding
1491 * the header and before a frame is coded/decoded.
1493 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1499 if (!ff_thread_can_start_frame(avctx)) {
1500 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1504 /* mark & release old frames */
1505 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1506 s->last_picture_ptr != s->next_picture_ptr &&
1507 s->last_picture_ptr->f.data[0]) {
1508 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1511 /* release forgotten pictures */
1512 /* if (mpeg124/h263) */
1514 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1515 if (&s->picture[i] != s->last_picture_ptr &&
1516 &s->picture[i] != s->next_picture_ptr &&
1517 s->picture[i].reference && !s->picture[i].needs_realloc) {
1518 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1519 av_log(avctx, AV_LOG_ERROR,
1520 "releasing zombie picture\n");
1521 ff_mpeg_unref_picture(s, &s->picture[i]);
1526 ff_mpeg_unref_picture(s, &s->current_picture);
1529 ff_release_unused_pictures(s, 1);
1531 if (s->current_picture_ptr &&
1532 s->current_picture_ptr->f.data[0] == NULL) {
1533 // we already have a unused image
1534 // (maybe it was set before reading the header)
1535 pic = s->current_picture_ptr;
1537 i = ff_find_unused_picture(s, 0);
1539 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1542 pic = &s->picture[i];
1546 if (!s->droppable) {
1547 if (s->pict_type != AV_PICTURE_TYPE_B)
1551 pic->f.coded_picture_number = s->coded_picture_number++;
1553 if (ff_alloc_picture(s, pic, 0) < 0)
1556 s->current_picture_ptr = pic;
1557 // FIXME use only the vars from current_pic
1558 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1559 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1560 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1561 if (s->picture_structure != PICT_FRAME)
1562 s->current_picture_ptr->f.top_field_first =
1563 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1565 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1566 !s->progressive_sequence;
1567 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1570 s->current_picture_ptr->f.pict_type = s->pict_type;
1571 // if (s->flags && CODEC_FLAG_QSCALE)
1572 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1573 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1575 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1576 s->current_picture_ptr)) < 0)
1579 if (s->pict_type != AV_PICTURE_TYPE_B) {
1580 s->last_picture_ptr = s->next_picture_ptr;
1582 s->next_picture_ptr = s->current_picture_ptr;
1584 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1585 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1586 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1587 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1588 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1589 s->pict_type, s->droppable);
1591 if ((s->last_picture_ptr == NULL ||
1592 s->last_picture_ptr->f.data[0] == NULL) &&
1593 (s->pict_type != AV_PICTURE_TYPE_I ||
1594 s->picture_structure != PICT_FRAME)) {
1595 int h_chroma_shift, v_chroma_shift;
1596 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1597 &h_chroma_shift, &v_chroma_shift);
1598 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.data[0])
1599 av_log(avctx, AV_LOG_DEBUG,
1600 "allocating dummy last picture for B frame\n");
1601 else if (s->pict_type != AV_PICTURE_TYPE_I)
1602 av_log(avctx, AV_LOG_ERROR,
1603 "warning: first frame is no keyframe\n");
1604 else if (s->picture_structure != PICT_FRAME)
1605 av_log(avctx, AV_LOG_DEBUG,
1606 "allocate dummy last picture for field based first keyframe\n");
1608 /* Allocate a dummy frame */
1609 i = ff_find_unused_picture(s, 0);
1611 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1614 s->last_picture_ptr = &s->picture[i];
1615 s->last_picture_ptr->f.key_frame = 0;
1616 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1617 s->last_picture_ptr = NULL;
1621 memset(s->last_picture_ptr->f.data[0], 0x80,
1622 avctx->height * s->last_picture_ptr->f.linesize[0]);
1623 memset(s->last_picture_ptr->f.data[1], 0x80,
1624 (avctx->height >> v_chroma_shift) *
1625 s->last_picture_ptr->f.linesize[1]);
1626 memset(s->last_picture_ptr->f.data[2], 0x80,
1627 (avctx->height >> v_chroma_shift) *
1628 s->last_picture_ptr->f.linesize[2]);
1630 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1631 for(i=0; i<avctx->height; i++)
1632 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1635 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1636 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1638 if ((s->next_picture_ptr == NULL ||
1639 s->next_picture_ptr->f.data[0] == NULL) &&
1640 s->pict_type == AV_PICTURE_TYPE_B) {
1641 /* Allocate a dummy frame */
1642 i = ff_find_unused_picture(s, 0);
1644 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1647 s->next_picture_ptr = &s->picture[i];
1648 s->next_picture_ptr->f.key_frame = 0;
1649 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1650 s->next_picture_ptr = NULL;
1653 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1654 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1657 #if 0 // BUFREF-FIXME
1658 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1659 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1661 if (s->last_picture_ptr) {
1662 ff_mpeg_unref_picture(s, &s->last_picture);
1663 if (s->last_picture_ptr->f.data[0] &&
1664 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1665 s->last_picture_ptr)) < 0)
1668 if (s->next_picture_ptr) {
1669 ff_mpeg_unref_picture(s, &s->next_picture);
1670 if (s->next_picture_ptr->f.data[0] &&
1671 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1672 s->next_picture_ptr)) < 0)
1676 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1677 s->last_picture_ptr->f.data[0]));
1679 if (s->picture_structure!= PICT_FRAME) {
1681 for (i = 0; i < 4; i++) {
1682 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1683 s->current_picture.f.data[i] +=
1684 s->current_picture.f.linesize[i];
1686 s->current_picture.f.linesize[i] *= 2;
1687 s->last_picture.f.linesize[i] *= 2;
1688 s->next_picture.f.linesize[i] *= 2;
1692 s->err_recognition = avctx->err_recognition;
1694 /* set dequantizer, we can't do it during init as
1695 * it might change for mpeg4 and we can't do it in the header
1696 * decode as init is not called for mpeg4 there yet */
1697 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1698 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1699 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1700 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1701 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1702 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1704 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1705 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1708 if (s->dct_error_sum) {
1709 av_assert2(s->avctx->noise_reduction && s->encoding);
1710 update_noise_reduction(s);
1713 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1714 return ff_xvmc_field_start(s, avctx);
1719 /* generic function for encode/decode called after a
1720 * frame has been coded/decoded. */
1721 void ff_MPV_frame_end(MpegEncContext *s)
1723 /* redraw edges for the frame if decoding didn't complete */
1724 // just to make sure that all data is rendered.
1725 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1726 ff_xvmc_field_end(s);
1727 } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1728 !s->avctx->hwaccel &&
1729 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1730 s->unrestricted_mv &&
1731 s->current_picture.reference &&
1733 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1736 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1737 int hshift = desc->log2_chroma_w;
1738 int vshift = desc->log2_chroma_h;
1739 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1740 s->h_edge_pos, s->v_edge_pos,
1741 EDGE_WIDTH, EDGE_WIDTH,
1742 EDGE_TOP | EDGE_BOTTOM);
1743 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1744 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1745 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1746 EDGE_TOP | EDGE_BOTTOM);
1747 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1748 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1749 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1750 EDGE_TOP | EDGE_BOTTOM);
1755 s->last_pict_type = s->pict_type;
1756 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1757 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1758 s->last_non_b_pict_type = s->pict_type;
1761 /* copy back current_picture variables */
1762 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1763 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1764 s->picture[i] = s->current_picture;
1768 av_assert0(i < MAX_PICTURE_COUNT);
1771 // clear copies, to avoid confusion
1773 memset(&s->last_picture, 0, sizeof(Picture));
1774 memset(&s->next_picture, 0, sizeof(Picture));
1775 memset(&s->current_picture, 0, sizeof(Picture));
1777 s->avctx->coded_frame = &s->current_picture_ptr->f;
1779 if (s->current_picture.reference)
1780 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1784 * Draw a line from (ex, ey) -> (sx, sy).
1785 * @param w width of the image
1786 * @param h height of the image
1787 * @param stride stride/linesize of the image
1788 * @param color color of the arrow
1790 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1791 int w, int h, int stride, int color)
1795 sx = av_clip(sx, 0, w - 1);
1796 sy = av_clip(sy, 0, h - 1);
1797 ex = av_clip(ex, 0, w - 1);
1798 ey = av_clip(ey, 0, h - 1);
1800 buf[sy * stride + sx] += color;
1802 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1804 FFSWAP(int, sx, ex);
1805 FFSWAP(int, sy, ey);
1807 buf += sx + sy * stride;
1809 f = ((ey - sy) << 16) / ex;
1810 for (x = 0; x <= ex; x++) {
1812 fr = (x * f) & 0xFFFF;
1813 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1814 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1818 FFSWAP(int, sx, ex);
1819 FFSWAP(int, sy, ey);
1821 buf += sx + sy * stride;
1824 f = ((ex - sx) << 16) / ey;
1827 for(y= 0; y <= ey; y++){
1829 fr = (y*f) & 0xFFFF;
1830 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1831 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1837 * Draw an arrow from (ex, ey) -> (sx, sy).
1838 * @param w width of the image
1839 * @param h height of the image
1840 * @param stride stride/linesize of the image
1841 * @param color color of the arrow
1843 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1844 int ey, int w, int h, int stride, int color)
1848 sx = av_clip(sx, -100, w + 100);
1849 sy = av_clip(sy, -100, h + 100);
1850 ex = av_clip(ex, -100, w + 100);
1851 ey = av_clip(ey, -100, h + 100);
1856 if (dx * dx + dy * dy > 3 * 3) {
1859 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1861 // FIXME subpixel accuracy
1862 rx = ROUNDED_DIV(rx * 3 << 4, length);
1863 ry = ROUNDED_DIV(ry * 3 << 4, length);
1865 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1866 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1868 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1872 * Print debugging info for the given picture.
1874 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1876 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1878 if (avctx->hwaccel || !p || !p->mb_type
1879 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1883 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1886 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1887 av_get_picture_type_char(pict->pict_type));
1888 for (y = 0; y < mb_height; y++) {
1889 for (x = 0; x < mb_width; x++) {
1890 if (avctx->debug & FF_DEBUG_SKIP) {
1891 int count = mbskip_table[x + y * mb_stride];
1894 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1896 if (avctx->debug & FF_DEBUG_QP) {
1897 av_log(avctx, AV_LOG_DEBUG, "%2d",
1898 p->qscale_table[x + y * mb_stride]);
1900 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1901 int mb_type = p->mb_type[x + y * mb_stride];
1902 // Type & MV direction
1903 if (IS_PCM(mb_type))
1904 av_log(avctx, AV_LOG_DEBUG, "P");
1905 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1906 av_log(avctx, AV_LOG_DEBUG, "A");
1907 else if (IS_INTRA4x4(mb_type))
1908 av_log(avctx, AV_LOG_DEBUG, "i");
1909 else if (IS_INTRA16x16(mb_type))
1910 av_log(avctx, AV_LOG_DEBUG, "I");
1911 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1912 av_log(avctx, AV_LOG_DEBUG, "d");
1913 else if (IS_DIRECT(mb_type))
1914 av_log(avctx, AV_LOG_DEBUG, "D");
1915 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1916 av_log(avctx, AV_LOG_DEBUG, "g");
1917 else if (IS_GMC(mb_type))
1918 av_log(avctx, AV_LOG_DEBUG, "G");
1919 else if (IS_SKIP(mb_type))
1920 av_log(avctx, AV_LOG_DEBUG, "S");
1921 else if (!USES_LIST(mb_type, 1))
1922 av_log(avctx, AV_LOG_DEBUG, ">");
1923 else if (!USES_LIST(mb_type, 0))
1924 av_log(avctx, AV_LOG_DEBUG, "<");
1926 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1927 av_log(avctx, AV_LOG_DEBUG, "X");
1931 if (IS_8X8(mb_type))
1932 av_log(avctx, AV_LOG_DEBUG, "+");
1933 else if (IS_16X8(mb_type))
1934 av_log(avctx, AV_LOG_DEBUG, "-");
1935 else if (IS_8X16(mb_type))
1936 av_log(avctx, AV_LOG_DEBUG, "|");
1937 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1938 av_log(avctx, AV_LOG_DEBUG, " ");
1940 av_log(avctx, AV_LOG_DEBUG, "?");
1943 if (IS_INTERLACED(mb_type))
1944 av_log(avctx, AV_LOG_DEBUG, "=");
1946 av_log(avctx, AV_LOG_DEBUG, " ");
1949 av_log(avctx, AV_LOG_DEBUG, "\n");
1953 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1954 (avctx->debug_mv)) {
1955 const int shift = 1 + quarter_sample;
1959 int h_chroma_shift, v_chroma_shift, block_height;
1960 const int width = avctx->width;
1961 const int height = avctx->height;
1962 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1963 const int mv_stride = (mb_width << mv_sample_log2) +
1964 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1966 *low_delay = 0; // needed to see the vectors without trashing the buffers
1968 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1970 av_frame_make_writable(pict);
1972 pict->opaque = NULL;
1973 ptr = pict->data[0];
1974 block_height = 16 >> v_chroma_shift;
1976 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1978 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1979 const int mb_index = mb_x + mb_y * mb_stride;
1980 if ((avctx->debug_mv) && p->motion_val[0]) {
1982 for (type = 0; type < 3; type++) {
1986 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1987 (pict->pict_type!= AV_PICTURE_TYPE_P))
1992 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1993 (pict->pict_type!= AV_PICTURE_TYPE_B))
1998 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1999 (pict->pict_type!= AV_PICTURE_TYPE_B))
2004 if (!USES_LIST(p->mb_type[mb_index], direction))
2007 if (IS_8X8(p->mb_type[mb_index])) {
2009 for (i = 0; i < 4; i++) {
2010 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2011 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2012 int xy = (mb_x * 2 + (i & 1) +
2013 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2014 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2015 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2016 draw_arrow(ptr, sx, sy, mx, my, width,
2017 height, pict->linesize[0], 100);
2019 } else if (IS_16X8(p->mb_type[mb_index])) {
2021 for (i = 0; i < 2; i++) {
2022 int sx = mb_x * 16 + 8;
2023 int sy = mb_y * 16 + 4 + 8 * i;
2024 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2025 int mx = (p->motion_val[direction][xy][0] >> shift);
2026 int my = (p->motion_val[direction][xy][1] >> shift);
2028 if (IS_INTERLACED(p->mb_type[mb_index]))
2031 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2032 height, pict->linesize[0], 100);
2034 } else if (IS_8X16(p->mb_type[mb_index])) {
2036 for (i = 0; i < 2; i++) {
2037 int sx = mb_x * 16 + 4 + 8 * i;
2038 int sy = mb_y * 16 + 8;
2039 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2040 int mx = p->motion_val[direction][xy][0] >> shift;
2041 int my = p->motion_val[direction][xy][1] >> shift;
2043 if (IS_INTERLACED(p->mb_type[mb_index]))
2046 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2047 height, pict->linesize[0], 100);
2050 int sx= mb_x * 16 + 8;
2051 int sy= mb_y * 16 + 8;
2052 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2053 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2054 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2055 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2059 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2060 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2061 0x0101010101010101ULL;
2063 for (y = 0; y < block_height; y++) {
2064 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2065 (block_height * mb_y + y) *
2066 pict->linesize[1]) = c;
2067 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2068 (block_height * mb_y + y) *
2069 pict->linesize[2]) = c;
2072 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2074 int mb_type = p->mb_type[mb_index];
2077 #define COLOR(theta, r) \
2078 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2079 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2083 if (IS_PCM(mb_type)) {
2085 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2086 IS_INTRA16x16(mb_type)) {
2088 } else if (IS_INTRA4x4(mb_type)) {
2090 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2092 } else if (IS_DIRECT(mb_type)) {
2094 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2096 } else if (IS_GMC(mb_type)) {
2098 } else if (IS_SKIP(mb_type)) {
2100 } else if (!USES_LIST(mb_type, 1)) {
2102 } else if (!USES_LIST(mb_type, 0)) {
2105 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2109 u *= 0x0101010101010101ULL;
2110 v *= 0x0101010101010101ULL;
2111 for (y = 0; y < block_height; y++) {
2112 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2113 (block_height * mb_y + y) * pict->linesize[1]) = u;
2114 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2115 (block_height * mb_y + y) * pict->linesize[2]) = v;
2119 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2120 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2121 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2122 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2123 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2125 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2126 for (y = 0; y < 16; y++)
2127 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2128 pict->linesize[0]] ^= 0x80;
2130 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2131 int dm = 1 << (mv_sample_log2 - 2);
2132 for (i = 0; i < 4; i++) {
2133 int sx = mb_x * 16 + 8 * (i & 1);
2134 int sy = mb_y * 16 + 8 * (i >> 1);
2135 int xy = (mb_x * 2 + (i & 1) +
2136 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2138 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2139 if (mv[0] != mv[dm] ||
2140 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2141 for (y = 0; y < 8; y++)
2142 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2143 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2144 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2145 pict->linesize[0]) ^= 0x8080808080808080ULL;
2149 if (IS_INTERLACED(mb_type) &&
2150 avctx->codec->id == AV_CODEC_ID_H264) {
2154 mbskip_table[mb_index] = 0;
2160 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2162 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2163 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2166 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2168 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2169 int offset = 2*s->mb_stride + 1;
2171 return AVERROR(ENOMEM);
2172 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2173 ref->size -= offset;
2174 ref->data += offset;
2175 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2178 static inline int hpel_motion_lowres(MpegEncContext *s,
2179 uint8_t *dest, uint8_t *src,
2180 int field_based, int field_select,
2181 int src_x, int src_y,
2182 int width, int height, ptrdiff_t stride,
2183 int h_edge_pos, int v_edge_pos,
2184 int w, int h, h264_chroma_mc_func *pix_op,
2185 int motion_x, int motion_y)
2187 const int lowres = s->avctx->lowres;
2188 const int op_index = FFMIN(lowres, 3);
2189 const int s_mask = (2 << lowres) - 1;
2193 if (s->quarter_sample) {
2198 sx = motion_x & s_mask;
2199 sy = motion_y & s_mask;
2200 src_x += motion_x >> lowres + 1;
2201 src_y += motion_y >> lowres + 1;
2203 src += src_y * stride + src_x;
2205 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2206 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2207 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->linesize,
2208 src, s->linesize, w + 1,
2209 (h + 1) << field_based, src_x,
2210 src_y << field_based,
2213 src = s->edge_emu_buffer;
2217 sx = (sx << 2) >> lowres;
2218 sy = (sy << 2) >> lowres;
2221 pix_op[op_index](dest, src, stride, h, sx, sy);
2225 /* apply one mpeg motion vector to the three components */
2226 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2233 uint8_t **ref_picture,
2234 h264_chroma_mc_func *pix_op,
2235 int motion_x, int motion_y,
2238 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2239 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2240 ptrdiff_t uvlinesize, linesize;
2241 const int lowres = s->avctx->lowres;
2242 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2243 const int block_s = 8>>lowres;
2244 const int s_mask = (2 << lowres) - 1;
2245 const int h_edge_pos = s->h_edge_pos >> lowres;
2246 const int v_edge_pos = s->v_edge_pos >> lowres;
2247 linesize = s->current_picture.f.linesize[0] << field_based;
2248 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2250 // FIXME obviously not perfect but qpel will not work in lowres anyway
2251 if (s->quarter_sample) {
2257 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2260 sx = motion_x & s_mask;
2261 sy = motion_y & s_mask;
2262 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2263 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2265 if (s->out_format == FMT_H263) {
2266 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2267 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2268 uvsrc_x = src_x >> 1;
2269 uvsrc_y = src_y >> 1;
2270 } else if (s->out_format == FMT_H261) {
2271 // even chroma mv's are full pel in H261
2274 uvsx = (2 * mx) & s_mask;
2275 uvsy = (2 * my) & s_mask;
2276 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2277 uvsrc_y = mb_y * block_s + (my >> lowres);
2279 if(s->chroma_y_shift){
2284 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2285 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2287 if(s->chroma_x_shift){
2291 uvsy = motion_y & s_mask;
2293 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2296 uvsx = motion_x & s_mask;
2297 uvsy = motion_y & s_mask;
2304 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2305 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2306 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2308 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2309 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2310 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, linesize >> field_based, ptr_y,
2311 linesize >> field_based, 17, 17 + field_based,
2312 src_x, src_y << field_based, h_edge_pos,
2314 ptr_y = s->edge_emu_buffer;
2315 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2316 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2317 s->vdsp.emulated_edge_mc(uvbuf, uvlinesize >> field_based,
2318 ptr_cb, uvlinesize >> field_based, 9,
2320 uvsrc_x, uvsrc_y << field_based,
2321 h_edge_pos >> 1, v_edge_pos >> 1);
2322 s->vdsp.emulated_edge_mc(uvbuf + 16, uvlinesize >> field_based,
2323 ptr_cr, uvlinesize >> field_based, 9,
2325 uvsrc_x, uvsrc_y << field_based,
2326 h_edge_pos >> 1, v_edge_pos >> 1);
2328 ptr_cr = uvbuf + 16;
2332 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2334 dest_y += s->linesize;
2335 dest_cb += s->uvlinesize;
2336 dest_cr += s->uvlinesize;
2340 ptr_y += s->linesize;
2341 ptr_cb += s->uvlinesize;
2342 ptr_cr += s->uvlinesize;
2345 sx = (sx << 2) >> lowres;
2346 sy = (sy << 2) >> lowres;
2347 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2349 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2350 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2351 uvsx = (uvsx << 2) >> lowres;
2352 uvsy = (uvsy << 2) >> lowres;
2354 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2355 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2358 // FIXME h261 lowres loop filter
2361 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2362 uint8_t *dest_cb, uint8_t *dest_cr,
2363 uint8_t **ref_picture,
2364 h264_chroma_mc_func * pix_op,
2367 const int lowres = s->avctx->lowres;
2368 const int op_index = FFMIN(lowres, 3);
2369 const int block_s = 8 >> lowres;
2370 const int s_mask = (2 << lowres) - 1;
2371 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2372 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2373 int emu = 0, src_x, src_y, sx, sy;
2377 if (s->quarter_sample) {
2382 /* In case of 8X8, we construct a single chroma motion vector
2383 with a special rounding */
2384 mx = ff_h263_round_chroma(mx);
2385 my = ff_h263_round_chroma(my);
2389 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2390 src_y = s->mb_y * block_s + (my >> lowres + 1);
2392 offset = src_y * s->uvlinesize + src_x;
2393 ptr = ref_picture[1] + offset;
2394 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2395 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2396 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->uvlinesize, ptr, s->uvlinesize,
2397 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2398 ptr = s->edge_emu_buffer;
2401 sx = (sx << 2) >> lowres;
2402 sy = (sy << 2) >> lowres;
2403 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2405 ptr = ref_picture[2] + offset;
2407 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->uvlinesize,
2408 ptr, s->uvlinesize, 9, 9,
2409 src_x, src_y, h_edge_pos, v_edge_pos);
2410 ptr = s->edge_emu_buffer;
2412 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2416 * motion compensation of a single macroblock
2418 * @param dest_y luma destination pointer
2419 * @param dest_cb chroma cb/u destination pointer
2420 * @param dest_cr chroma cr/v destination pointer
2421 * @param dir direction (0->forward, 1->backward)
2422 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2423 * @param pix_op halfpel motion compensation function (average or put normally)
2424 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2426 static inline void MPV_motion_lowres(MpegEncContext *s,
2427 uint8_t *dest_y, uint8_t *dest_cb,
2429 int dir, uint8_t **ref_picture,
2430 h264_chroma_mc_func *pix_op)
2434 const int lowres = s->avctx->lowres;
2435 const int block_s = 8 >>lowres;
2440 switch (s->mv_type) {
2442 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2444 ref_picture, pix_op,
2445 s->mv[dir][0][0], s->mv[dir][0][1],
2451 for (i = 0; i < 4; i++) {
2452 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2453 s->linesize) * block_s,
2454 ref_picture[0], 0, 0,
2455 (2 * mb_x + (i & 1)) * block_s,
2456 (2 * mb_y + (i >> 1)) * block_s,
2457 s->width, s->height, s->linesize,
2458 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2459 block_s, block_s, pix_op,
2460 s->mv[dir][i][0], s->mv[dir][i][1]);
2462 mx += s->mv[dir][i][0];
2463 my += s->mv[dir][i][1];
2466 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2467 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2471 if (s->picture_structure == PICT_FRAME) {
2473 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2474 1, 0, s->field_select[dir][0],
2475 ref_picture, pix_op,
2476 s->mv[dir][0][0], s->mv[dir][0][1],
2479 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2480 1, 1, s->field_select[dir][1],
2481 ref_picture, pix_op,
2482 s->mv[dir][1][0], s->mv[dir][1][1],
2485 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2486 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2487 ref_picture = s->current_picture_ptr->f.data;
2490 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2491 0, 0, s->field_select[dir][0],
2492 ref_picture, pix_op,
2494 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2498 for (i = 0; i < 2; i++) {
2499 uint8_t **ref2picture;
2501 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2502 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2503 ref2picture = ref_picture;
2505 ref2picture = s->current_picture_ptr->f.data;
2508 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2509 0, 0, s->field_select[dir][i],
2510 ref2picture, pix_op,
2511 s->mv[dir][i][0], s->mv[dir][i][1] +
2512 2 * block_s * i, block_s, mb_y >> 1);
2514 dest_y += 2 * block_s * s->linesize;
2515 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2516 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2520 if (s->picture_structure == PICT_FRAME) {
2521 for (i = 0; i < 2; i++) {
2523 for (j = 0; j < 2; j++) {
2524 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2526 ref_picture, pix_op,
2527 s->mv[dir][2 * i + j][0],
2528 s->mv[dir][2 * i + j][1],
2531 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2534 for (i = 0; i < 2; i++) {
2535 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2536 0, 0, s->picture_structure != i + 1,
2537 ref_picture, pix_op,
2538 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2539 2 * block_s, mb_y >> 1);
2541 // after put we make avg of the same block
2542 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2544 // opposite parity is always in the same
2545 // frame if this is second field
2546 if (!s->first_field) {
2547 ref_picture = s->current_picture_ptr->f.data;
2558 * find the lowest MB row referenced in the MVs
2560 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2562 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2563 int my, off, i, mvs;
2565 if (s->picture_structure != PICT_FRAME || s->mcsel)
2568 switch (s->mv_type) {
2582 for (i = 0; i < mvs; i++) {
2583 my = s->mv[dir][i][1]<<qpel_shift;
2584 my_max = FFMAX(my_max, my);
2585 my_min = FFMIN(my_min, my);
2588 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2590 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2592 return s->mb_height-1;
2595 /* put block[] to dest[] */
2596 static inline void put_dct(MpegEncContext *s,
2597 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2599 s->dct_unquantize_intra(s, block, i, qscale);
2600 s->dsp.idct_put (dest, line_size, block);
2603 /* add block[] to dest[] */
2604 static inline void add_dct(MpegEncContext *s,
2605 int16_t *block, int i, uint8_t *dest, int line_size)
2607 if (s->block_last_index[i] >= 0) {
2608 s->dsp.idct_add (dest, line_size, block);
2612 static inline void add_dequant_dct(MpegEncContext *s,
2613 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2615 if (s->block_last_index[i] >= 0) {
2616 s->dct_unquantize_inter(s, block, i, qscale);
2618 s->dsp.idct_add (dest, line_size, block);
2623 * Clean dc, ac, coded_block for the current non-intra MB.
2625 void ff_clean_intra_table_entries(MpegEncContext *s)
2627 int wrap = s->b8_stride;
2628 int xy = s->block_index[0];
2631 s->dc_val[0][xy + 1 ] =
2632 s->dc_val[0][xy + wrap] =
2633 s->dc_val[0][xy + 1 + wrap] = 1024;
2635 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2636 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2637 if (s->msmpeg4_version>=3) {
2638 s->coded_block[xy ] =
2639 s->coded_block[xy + 1 ] =
2640 s->coded_block[xy + wrap] =
2641 s->coded_block[xy + 1 + wrap] = 0;
2644 wrap = s->mb_stride;
2645 xy = s->mb_x + s->mb_y * wrap;
2647 s->dc_val[2][xy] = 1024;
2649 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2650 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2652 s->mbintra_table[xy]= 0;
2655 /* generic function called after a macroblock has been parsed by the
2656 decoder or after it has been encoded by the encoder.
2658 Important variables used:
2659 s->mb_intra : true if intra macroblock
2660 s->mv_dir : motion vector direction
2661 s->mv_type : motion vector type
2662 s->mv : motion vector
2663 s->interlaced_dct : true if interlaced dct used (mpeg2)
2665 static av_always_inline
2666 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2667 int lowres_flag, int is_mpeg12)
2669 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2670 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2671 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2675 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2676 /* print DCT coefficients */
2678 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2680 for(j=0; j<64; j++){
2681 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2683 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2687 s->current_picture.qscale_table[mb_xy] = s->qscale;
2689 /* update DC predictors for P macroblocks */
2691 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2692 if(s->mbintra_table[mb_xy])
2693 ff_clean_intra_table_entries(s);
2697 s->last_dc[2] = 128 << s->intra_dc_precision;
2700 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2701 s->mbintra_table[mb_xy]=1;
2703 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2704 uint8_t *dest_y, *dest_cb, *dest_cr;
2705 int dct_linesize, dct_offset;
2706 op_pixels_func (*op_pix)[4];
2707 qpel_mc_func (*op_qpix)[16];
2708 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2709 const int uvlinesize = s->current_picture.f.linesize[1];
2710 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2711 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2713 /* avoid copy if macroblock skipped in last frame too */
2714 /* skip only during decoding as we might trash the buffers during encoding a bit */
2716 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2718 if (s->mb_skipped) {
2720 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2722 } else if(!s->current_picture.reference) {
2725 *mbskip_ptr = 0; /* not skipped */
2729 dct_linesize = linesize << s->interlaced_dct;
2730 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2734 dest_cb= s->dest[1];
2735 dest_cr= s->dest[2];
2737 dest_y = s->b_scratchpad;
2738 dest_cb= s->b_scratchpad+16*linesize;
2739 dest_cr= s->b_scratchpad+32*linesize;
2743 /* motion handling */
2744 /* decoding or more than one mb_type (MC was already done otherwise) */
2747 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2748 if (s->mv_dir & MV_DIR_FORWARD) {
2749 ff_thread_await_progress(&s->last_picture_ptr->tf,
2750 ff_MPV_lowest_referenced_row(s, 0),
2753 if (s->mv_dir & MV_DIR_BACKWARD) {
2754 ff_thread_await_progress(&s->next_picture_ptr->tf,
2755 ff_MPV_lowest_referenced_row(s, 1),
2761 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2763 if (s->mv_dir & MV_DIR_FORWARD) {
2764 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2765 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2767 if (s->mv_dir & MV_DIR_BACKWARD) {
2768 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2771 op_qpix = s->me.qpel_put;
2772 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2773 op_pix = s->hdsp.put_pixels_tab;
2775 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2777 if (s->mv_dir & MV_DIR_FORWARD) {
2778 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2779 op_pix = s->hdsp.avg_pixels_tab;
2780 op_qpix= s->me.qpel_avg;
2782 if (s->mv_dir & MV_DIR_BACKWARD) {
2783 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2788 /* skip dequant / idct if we are really late ;) */
2789 if(s->avctx->skip_idct){
2790 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2791 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2792 || s->avctx->skip_idct >= AVDISCARD_ALL)
2796 /* add dct residue */
2797 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2798 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2799 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2800 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2801 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2802 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2804 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2805 if (s->chroma_y_shift){
2806 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2807 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2811 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2812 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2813 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2814 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2817 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2818 add_dct(s, block[0], 0, dest_y , dct_linesize);
2819 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2820 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2821 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2823 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2824 if(s->chroma_y_shift){//Chroma420
2825 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2826 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2829 dct_linesize = uvlinesize << s->interlaced_dct;
2830 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2832 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2833 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2834 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2835 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2836 if(!s->chroma_x_shift){//Chroma444
2837 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2838 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2839 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2840 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2845 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2846 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2849 /* dct only in intra block */
2850 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2851 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2852 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2853 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2854 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2856 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2857 if(s->chroma_y_shift){
2858 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2859 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2863 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2864 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2865 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2866 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2870 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2871 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2872 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2873 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2875 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2876 if(s->chroma_y_shift){
2877 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2878 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2881 dct_linesize = uvlinesize << s->interlaced_dct;
2882 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2884 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2885 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2886 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2887 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2888 if(!s->chroma_x_shift){//Chroma444
2889 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2890 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2891 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2892 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2900 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2901 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2902 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2907 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2909 if(s->out_format == FMT_MPEG1) {
2910 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2911 else MPV_decode_mb_internal(s, block, 0, 1);
2914 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2915 else MPV_decode_mb_internal(s, block, 0, 0);
2919 * @param h is the normal height, this will be reduced automatically if needed for the last row
2921 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2922 Picture *last, int y, int h, int picture_structure,
2923 int first_field, int draw_edges, int low_delay,
2924 int v_edge_pos, int h_edge_pos)
2926 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2927 int hshift = desc->log2_chroma_w;
2928 int vshift = desc->log2_chroma_h;
2929 const int field_pic = picture_structure != PICT_FRAME;
2935 if (!avctx->hwaccel &&
2936 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2939 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2940 int *linesize = cur->f.linesize;
2941 int sides = 0, edge_h;
2942 if (y==0) sides |= EDGE_TOP;
2943 if (y + h >= v_edge_pos)
2944 sides |= EDGE_BOTTOM;
2946 edge_h= FFMIN(h, v_edge_pos - y);
2948 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2949 linesize[0], h_edge_pos, edge_h,
2950 EDGE_WIDTH, EDGE_WIDTH, sides);
2951 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2952 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2953 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2954 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2955 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2956 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2959 h = FFMIN(h, avctx->height - y);
2961 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2963 if (avctx->draw_horiz_band) {
2965 int offset[AV_NUM_DATA_POINTERS];
2968 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2969 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2976 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2977 picture_structure == PICT_FRAME &&
2978 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2979 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2982 offset[0]= y * src->linesize[0];
2984 offset[2]= (y >> vshift) * src->linesize[1];
2985 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2991 avctx->draw_horiz_band(avctx, src, offset,
2992 y, picture_structure, h);
2996 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2998 int draw_edges = s->unrestricted_mv && !s->intra_only;
2999 ff_draw_horiz_band(s->avctx, &s->dsp, s->current_picture_ptr,
3000 s->last_picture_ptr, y, h, s->picture_structure,
3001 s->first_field, draw_edges, s->low_delay,
3002 s->v_edge_pos, s->h_edge_pos);
3005 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3006 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3007 const int uvlinesize = s->current_picture.f.linesize[1];
3008 const int mb_size= 4 - s->avctx->lowres;
3010 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3011 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3012 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3013 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3014 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3015 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3016 //block_index is not used by mpeg2, so it is not affected by chroma_format
3018 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3019 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3020 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3022 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3024 if(s->picture_structure==PICT_FRAME){
3025 s->dest[0] += s->mb_y * linesize << mb_size;
3026 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3027 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3029 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3030 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3031 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3032 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3038 * Permute an 8x8 block.
3039 * @param block the block which will be permuted according to the given permutation vector
3040 * @param permutation the permutation vector
3041 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3042 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3043 * (inverse) permutated to scantable order!
3045 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3051 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3053 for(i=0; i<=last; i++){
3054 const int j= scantable[i];
3059 for(i=0; i<=last; i++){
3060 const int j= scantable[i];
3061 const int perm_j= permutation[j];
3062 block[perm_j]= temp[j];
3066 void ff_mpeg_flush(AVCodecContext *avctx){
3068 MpegEncContext *s = avctx->priv_data;
3070 if(s==NULL || s->picture==NULL)
3073 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3074 ff_mpeg_unref_picture(s, &s->picture[i]);
3075 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3077 ff_mpeg_unref_picture(s, &s->current_picture);
3078 ff_mpeg_unref_picture(s, &s->last_picture);
3079 ff_mpeg_unref_picture(s, &s->next_picture);
3081 s->mb_x= s->mb_y= 0;
3084 s->parse_context.state= -1;
3085 s->parse_context.frame_start_found= 0;
3086 s->parse_context.overread= 0;
3087 s->parse_context.overread_index= 0;
3088 s->parse_context.index= 0;
3089 s->parse_context.last_index= 0;
3090 s->bitstream_buffer_size=0;
3094 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3095 int16_t *block, int n, int qscale)
3097 int i, level, nCoeffs;
3098 const uint16_t *quant_matrix;
3100 nCoeffs= s->block_last_index[n];
3102 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3103 /* XXX: only mpeg1 */
3104 quant_matrix = s->intra_matrix;
3105 for(i=1;i<=nCoeffs;i++) {
3106 int j= s->intra_scantable.permutated[i];
3111 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3112 level = (level - 1) | 1;
3115 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3116 level = (level - 1) | 1;
3123 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3124 int16_t *block, int n, int qscale)
3126 int i, level, nCoeffs;
3127 const uint16_t *quant_matrix;
3129 nCoeffs= s->block_last_index[n];
3131 quant_matrix = s->inter_matrix;
3132 for(i=0; i<=nCoeffs; i++) {
3133 int j= s->intra_scantable.permutated[i];
3138 level = (((level << 1) + 1) * qscale *
3139 ((int) (quant_matrix[j]))) >> 4;
3140 level = (level - 1) | 1;
3143 level = (((level << 1) + 1) * qscale *
3144 ((int) (quant_matrix[j]))) >> 4;
3145 level = (level - 1) | 1;
3152 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3153 int16_t *block, int n, int qscale)
3155 int i, level, nCoeffs;
3156 const uint16_t *quant_matrix;
3158 if(s->alternate_scan) nCoeffs= 63;
3159 else nCoeffs= s->block_last_index[n];
3161 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3162 quant_matrix = s->intra_matrix;
3163 for(i=1;i<=nCoeffs;i++) {
3164 int j= s->intra_scantable.permutated[i];
3169 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3172 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3179 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3180 int16_t *block, int n, int qscale)
3182 int i, level, nCoeffs;
3183 const uint16_t *quant_matrix;
3186 if(s->alternate_scan) nCoeffs= 63;
3187 else nCoeffs= s->block_last_index[n];
3189 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3191 quant_matrix = s->intra_matrix;
3192 for(i=1;i<=nCoeffs;i++) {
3193 int j= s->intra_scantable.permutated[i];
3198 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3201 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3210 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3211 int16_t *block, int n, int qscale)
3213 int i, level, nCoeffs;
3214 const uint16_t *quant_matrix;
3217 if(s->alternate_scan) nCoeffs= 63;
3218 else nCoeffs= s->block_last_index[n];
3220 quant_matrix = s->inter_matrix;
3221 for(i=0; i<=nCoeffs; i++) {
3222 int j= s->intra_scantable.permutated[i];
3227 level = (((level << 1) + 1) * qscale *
3228 ((int) (quant_matrix[j]))) >> 4;
3231 level = (((level << 1) + 1) * qscale *
3232 ((int) (quant_matrix[j]))) >> 4;
3241 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3242 int16_t *block, int n, int qscale)
3244 int i, level, qmul, qadd;
3247 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
3252 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3253 qadd = (qscale - 1) | 1;
3260 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3262 for(i=1; i<=nCoeffs; i++) {
3266 level = level * qmul - qadd;
3268 level = level * qmul + qadd;
3275 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3276 int16_t *block, int n, int qscale)
3278 int i, level, qmul, qadd;
3281 av_assert2(s->block_last_index[n]>=0);
3283 qadd = (qscale - 1) | 1;
3286 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3288 for(i=0; i<=nCoeffs; i++) {
3292 level = level * qmul - qadd;
3294 level = level * qmul + qadd;
3302 * set qscale and update qscale dependent variables.
3304 void ff_set_qscale(MpegEncContext * s, int qscale)
3308 else if (qscale > 31)
3312 s->chroma_qscale= s->chroma_qscale_table[qscale];
3314 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3315 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3318 void ff_MPV_report_decode_progress(MpegEncContext *s)
3320 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3321 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3324 #if CONFIG_ERROR_RESILIENCE
3325 void ff_mpeg_er_frame_start(MpegEncContext *s)
3327 ERContext *er = &s->er;
3329 er->cur_pic = s->current_picture_ptr;
3330 er->last_pic = s->last_picture_ptr;
3331 er->next_pic = s->next_picture_ptr;
3333 er->pp_time = s->pp_time;
3334 er->pb_time = s->pb_time;
3335 er->quarter_sample = s->quarter_sample;
3336 er->partitioned_frame = s->partitioned_frame;
3338 ff_er_frame_start(er);
3340 #endif /* CONFIG_ERROR_RESILIENCE */