2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
35 #include "h264chroma.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
48 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
59 int16_t *block, int n, int qscale);
60 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
61 int16_t *block, int n, int qscale);
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
128 const enum AVPixelFormat ff_pixfmt_list_420[] = {
133 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
135 int mb_x, int mb_y, int mb_intra, int mb_skipped)
137 MpegEncContext *s = opaque;
140 s->mv_type = mv_type;
141 s->mb_intra = mb_intra;
142 s->mb_skipped = mb_skipped;
145 memcpy(s->mv, mv, sizeof(*mv));
147 ff_init_block_index(s);
148 ff_update_block_index(s);
150 s->dsp.clear_blocks(s->block[0]);
152 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
153 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
154 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
157 ff_MPV_decode_mb(s, s->block);
160 /* init common dct for both encoder and decoder */
161 av_cold int ff_dct_common_init(MpegEncContext *s)
163 ff_dsputil_init(&s->dsp, s->avctx);
164 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
165 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
166 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
168 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
169 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
170 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
171 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
172 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
173 if (s->flags & CODEC_FLAG_BITEXACT)
174 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
175 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
178 ff_MPV_common_init_x86(s);
180 ff_MPV_common_init_axp(s);
182 ff_MPV_common_init_arm(s);
184 ff_MPV_common_init_bfin(s);
186 ff_MPV_common_init_ppc(s);
189 /* load & permutate scantables
190 * note: only wmv uses different ones
192 if (s->alternate_scan) {
193 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
196 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
197 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
199 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
200 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
205 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
207 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
209 // edge emu needs blocksize + filter length - 1
210 // (= 17x17 for halfpel / 21x21 for h264)
211 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
212 // at uvlinesize. It supports only YUV420 so 24x24 is enough
213 // linesize * interlaced * MBsize
214 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
217 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
219 s->me.temp = s->me.scratchpad;
220 s->rd_scratchpad = s->me.scratchpad;
221 s->b_scratchpad = s->me.scratchpad;
222 s->obmc_scratchpad = s->me.scratchpad + 16;
226 av_freep(&s->edge_emu_buffer);
227 return AVERROR(ENOMEM);
231 * Allocate a frame buffer
233 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
238 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
239 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
240 s->codec_id != AV_CODEC_ID_MSS2)
241 r = ff_thread_get_buffer(s->avctx, &pic->tf,
242 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
244 pic->f.width = s->avctx->width;
245 pic->f.height = s->avctx->height;
246 pic->f.format = s->avctx->pix_fmt;
247 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
250 if (r < 0 || !pic->f.data[0]) {
251 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
256 if (s->avctx->hwaccel) {
257 assert(!pic->hwaccel_picture_private);
258 if (s->avctx->hwaccel->priv_data_size) {
259 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
260 if (!pic->hwaccel_priv_buf) {
261 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
264 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
268 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
269 s->uvlinesize != pic->f.linesize[1])) {
270 av_log(s->avctx, AV_LOG_ERROR,
271 "get_buffer() failed (stride changed)\n");
272 ff_mpeg_unref_picture(s, pic);
276 if (pic->f.linesize[1] != pic->f.linesize[2]) {
277 av_log(s->avctx, AV_LOG_ERROR,
278 "get_buffer() failed (uv stride mismatch)\n");
279 ff_mpeg_unref_picture(s, pic);
283 if (!s->edge_emu_buffer &&
284 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
285 av_log(s->avctx, AV_LOG_ERROR,
286 "get_buffer() failed to allocate context scratch buffers.\n");
287 ff_mpeg_unref_picture(s, pic);
294 static void free_picture_tables(Picture *pic)
298 pic->alloc_mb_width =
299 pic->alloc_mb_height = 0;
301 av_buffer_unref(&pic->mb_var_buf);
302 av_buffer_unref(&pic->mc_mb_var_buf);
303 av_buffer_unref(&pic->mb_mean_buf);
304 av_buffer_unref(&pic->mbskip_table_buf);
305 av_buffer_unref(&pic->qscale_table_buf);
306 av_buffer_unref(&pic->mb_type_buf);
308 for (i = 0; i < 2; i++) {
309 av_buffer_unref(&pic->motion_val_buf[i]);
310 av_buffer_unref(&pic->ref_index_buf[i]);
314 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
316 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
317 const int mb_array_size = s->mb_stride * s->mb_height;
318 const int b8_array_size = s->b8_stride * s->mb_height * 2;
322 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
323 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
324 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
326 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
327 return AVERROR(ENOMEM);
330 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
331 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
332 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
333 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
334 return AVERROR(ENOMEM);
337 if (s->out_format == FMT_H263 || s->encoding ||
338 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
339 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
340 int ref_index_size = 4 * mb_array_size;
342 for (i = 0; mv_size && i < 2; i++) {
343 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
344 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
345 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
346 return AVERROR(ENOMEM);
350 pic->alloc_mb_width = s->mb_width;
351 pic->alloc_mb_height = s->mb_height;
356 static int make_tables_writable(Picture *pic)
359 #define MAKE_WRITABLE(table) \
362 (ret = av_buffer_make_writable(&pic->table)) < 0)\
366 MAKE_WRITABLE(mb_var_buf);
367 MAKE_WRITABLE(mc_mb_var_buf);
368 MAKE_WRITABLE(mb_mean_buf);
369 MAKE_WRITABLE(mbskip_table_buf);
370 MAKE_WRITABLE(qscale_table_buf);
371 MAKE_WRITABLE(mb_type_buf);
373 for (i = 0; i < 2; i++) {
374 MAKE_WRITABLE(motion_val_buf[i]);
375 MAKE_WRITABLE(ref_index_buf[i]);
382 * Allocate a Picture.
383 * The pixels are allocated/set by calling get_buffer() if shared = 0
385 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
389 if (pic->qscale_table_buf)
390 if ( pic->alloc_mb_width != s->mb_width
391 || pic->alloc_mb_height != s->mb_height)
392 free_picture_tables(pic);
395 assert(pic->f.data[0]);
398 assert(!pic->f.data[0]);
400 if (alloc_frame_buffer(s, pic) < 0)
403 s->linesize = pic->f.linesize[0];
404 s->uvlinesize = pic->f.linesize[1];
407 if (!pic->qscale_table_buf)
408 ret = alloc_picture_tables(s, pic);
410 ret = make_tables_writable(pic);
415 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
416 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
417 pic->mb_mean = pic->mb_mean_buf->data;
420 pic->mbskip_table = pic->mbskip_table_buf->data;
421 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
422 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
424 if (pic->motion_val_buf[0]) {
425 for (i = 0; i < 2; i++) {
426 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
427 pic->ref_index[i] = pic->ref_index_buf[i]->data;
433 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
434 ff_mpeg_unref_picture(s, pic);
435 free_picture_tables(pic);
436 return AVERROR(ENOMEM);
440 * Deallocate a picture.
442 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
444 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
447 /* WM Image / Screen codecs allocate internal buffers with different
448 * dimensions / colorspaces; ignore user-defined callbacks for these. */
449 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
450 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
451 s->codec_id != AV_CODEC_ID_MSS2)
452 ff_thread_release_buffer(s->avctx, &pic->tf);
454 av_frame_unref(&pic->f);
456 av_buffer_unref(&pic->hwaccel_priv_buf);
458 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
461 static int update_picture_tables(Picture *dst, Picture *src)
465 #define UPDATE_TABLE(table)\
468 (!dst->table || dst->table->buffer != src->table->buffer)) {\
469 av_buffer_unref(&dst->table);\
470 dst->table = av_buffer_ref(src->table);\
472 free_picture_tables(dst);\
473 return AVERROR(ENOMEM);\
478 UPDATE_TABLE(mb_var_buf);
479 UPDATE_TABLE(mc_mb_var_buf);
480 UPDATE_TABLE(mb_mean_buf);
481 UPDATE_TABLE(mbskip_table_buf);
482 UPDATE_TABLE(qscale_table_buf);
483 UPDATE_TABLE(mb_type_buf);
484 for (i = 0; i < 2; i++) {
485 UPDATE_TABLE(motion_val_buf[i]);
486 UPDATE_TABLE(ref_index_buf[i]);
489 dst->mb_var = src->mb_var;
490 dst->mc_mb_var = src->mc_mb_var;
491 dst->mb_mean = src->mb_mean;
492 dst->mbskip_table = src->mbskip_table;
493 dst->qscale_table = src->qscale_table;
494 dst->mb_type = src->mb_type;
495 for (i = 0; i < 2; i++) {
496 dst->motion_val[i] = src->motion_val[i];
497 dst->ref_index[i] = src->ref_index[i];
500 dst->alloc_mb_width = src->alloc_mb_width;
501 dst->alloc_mb_height = src->alloc_mb_height;
506 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
510 av_assert0(!dst->f.buf[0]);
511 av_assert0(src->f.buf[0]);
515 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
519 ret = update_picture_tables(dst, src);
523 if (src->hwaccel_picture_private) {
524 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
525 if (!dst->hwaccel_priv_buf)
527 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
530 dst->field_picture = src->field_picture;
531 dst->mb_var_sum = src->mb_var_sum;
532 dst->mc_mb_var_sum = src->mc_mb_var_sum;
533 dst->b_frame_score = src->b_frame_score;
534 dst->needs_realloc = src->needs_realloc;
535 dst->reference = src->reference;
536 dst->shared = src->shared;
540 ff_mpeg_unref_picture(s, dst);
544 static int init_duplicate_context(MpegEncContext *s)
546 int y_size = s->b8_stride * (2 * s->mb_height + 1);
547 int c_size = s->mb_stride * (s->mb_height + 1);
548 int yc_size = y_size + 2 * c_size;
556 s->obmc_scratchpad = NULL;
559 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
560 ME_MAP_SIZE * sizeof(uint32_t), fail)
561 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
562 ME_MAP_SIZE * sizeof(uint32_t), fail)
563 if (s->avctx->noise_reduction) {
564 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
565 2 * 64 * sizeof(int), fail)
568 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
569 s->block = s->blocks[0];
571 for (i = 0; i < 12; i++) {
572 s->pblocks[i] = &s->block[i];
575 if (s->out_format == FMT_H263) {
577 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
578 yc_size * sizeof(int16_t) * 16, fail);
579 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
580 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
581 s->ac_val[2] = s->ac_val[1] + c_size;
586 return -1; // free() through ff_MPV_common_end()
589 static void free_duplicate_context(MpegEncContext *s)
594 av_freep(&s->edge_emu_buffer);
595 av_freep(&s->me.scratchpad);
599 s->obmc_scratchpad = NULL;
601 av_freep(&s->dct_error_sum);
602 av_freep(&s->me.map);
603 av_freep(&s->me.score_map);
604 av_freep(&s->blocks);
605 av_freep(&s->ac_val_base);
609 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
611 #define COPY(a) bak->a = src->a
612 COPY(edge_emu_buffer);
617 COPY(obmc_scratchpad);
624 COPY(me.map_generation);
636 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
640 // FIXME copy only needed parts
642 backup_duplicate_context(&bak, dst);
643 memcpy(dst, src, sizeof(MpegEncContext));
644 backup_duplicate_context(dst, &bak);
645 for (i = 0; i < 12; i++) {
646 dst->pblocks[i] = &dst->block[i];
648 if (!dst->edge_emu_buffer &&
649 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
650 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
651 "scratch buffers.\n");
654 // STOP_TIMER("update_duplicate_context")
655 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
659 int ff_mpeg_update_thread_context(AVCodecContext *dst,
660 const AVCodecContext *src)
663 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
670 // FIXME can parameters change on I-frames?
671 // in that case dst may need a reinit
672 if (!s->context_initialized) {
673 memcpy(s, s1, sizeof(MpegEncContext));
676 s->bitstream_buffer = NULL;
677 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
679 if (s1->context_initialized){
680 // s->picture_range_start += MAX_PICTURE_COUNT;
681 // s->picture_range_end += MAX_PICTURE_COUNT;
682 if((ret = ff_MPV_common_init(s)) < 0){
683 memset(s, 0, sizeof(MpegEncContext));
690 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
691 s->context_reinit = 0;
692 s->height = s1->height;
693 s->width = s1->width;
694 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
698 s->avctx->coded_height = s1->avctx->coded_height;
699 s->avctx->coded_width = s1->avctx->coded_width;
700 s->avctx->width = s1->avctx->width;
701 s->avctx->height = s1->avctx->height;
703 s->coded_picture_number = s1->coded_picture_number;
704 s->picture_number = s1->picture_number;
705 s->input_picture_number = s1->input_picture_number;
707 av_assert0(!s->picture || s->picture != s1->picture);
709 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
710 ff_mpeg_unref_picture(s, &s->picture[i]);
711 if (s1->picture[i].f.data[0] &&
712 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
716 #define UPDATE_PICTURE(pic)\
718 ff_mpeg_unref_picture(s, &s->pic);\
719 if (s1->pic.f.data[0])\
720 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
722 ret = update_picture_tables(&s->pic, &s1->pic);\
727 UPDATE_PICTURE(current_picture);
728 UPDATE_PICTURE(last_picture);
729 UPDATE_PICTURE(next_picture);
731 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
732 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
733 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
735 // Error/bug resilience
736 s->next_p_frame_damaged = s1->next_p_frame_damaged;
737 s->workaround_bugs = s1->workaround_bugs;
738 s->padding_bug_score = s1->padding_bug_score;
741 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
742 (char *) &s1->shape - (char *) &s1->time_increment_bits);
745 s->max_b_frames = s1->max_b_frames;
746 s->low_delay = s1->low_delay;
747 s->droppable = s1->droppable;
749 // DivX handling (doesn't work)
750 s->divx_packed = s1->divx_packed;
752 if (s1->bitstream_buffer) {
753 if (s1->bitstream_buffer_size +
754 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
755 av_fast_malloc(&s->bitstream_buffer,
756 &s->allocated_bitstream_buffer_size,
757 s1->allocated_bitstream_buffer_size);
758 s->bitstream_buffer_size = s1->bitstream_buffer_size;
759 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
760 s1->bitstream_buffer_size);
761 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
762 FF_INPUT_BUFFER_PADDING_SIZE);
765 // linesize dependend scratch buffer allocation
766 if (!s->edge_emu_buffer)
768 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
769 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
770 "scratch buffers.\n");
771 return AVERROR(ENOMEM);
774 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
775 "be allocated due to unknown size.\n");
778 // MPEG2/interlacing info
779 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
780 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
782 if (!s1->first_field) {
783 s->last_pict_type = s1->pict_type;
784 if (s1->current_picture_ptr)
785 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
787 if (s1->pict_type != AV_PICTURE_TYPE_B) {
788 s->last_non_b_pict_type = s1->pict_type;
796 * Set the given MpegEncContext to common defaults
797 * (same for encoding and decoding).
798 * The changed fields will not depend upon the
799 * prior state of the MpegEncContext.
801 void ff_MPV_common_defaults(MpegEncContext *s)
803 s->y_dc_scale_table =
804 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
805 s->chroma_qscale_table = ff_default_chroma_qscale_table;
806 s->progressive_frame = 1;
807 s->progressive_sequence = 1;
808 s->picture_structure = PICT_FRAME;
810 s->coded_picture_number = 0;
811 s->picture_number = 0;
812 s->input_picture_number = 0;
814 s->picture_in_gop_number = 0;
819 s->slice_context_count = 1;
823 * Set the given MpegEncContext to defaults for decoding.
824 * the changed fields will not depend upon
825 * the prior state of the MpegEncContext.
827 void ff_MPV_decode_defaults(MpegEncContext *s)
829 ff_MPV_common_defaults(s);
832 static int init_er(MpegEncContext *s)
834 ERContext *er = &s->er;
835 int mb_array_size = s->mb_height * s->mb_stride;
838 er->avctx = s->avctx;
841 er->mb_index2xy = s->mb_index2xy;
842 er->mb_num = s->mb_num;
843 er->mb_width = s->mb_width;
844 er->mb_height = s->mb_height;
845 er->mb_stride = s->mb_stride;
846 er->b8_stride = s->b8_stride;
848 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
849 er->error_status_table = av_mallocz(mb_array_size);
850 if (!er->er_temp_buffer || !er->error_status_table)
853 er->mbskip_table = s->mbskip_table;
854 er->mbintra_table = s->mbintra_table;
856 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
857 er->dc_val[i] = s->dc_val[i];
859 er->decode_mb = mpeg_er_decode_mb;
864 av_freep(&er->er_temp_buffer);
865 av_freep(&er->error_status_table);
866 return AVERROR(ENOMEM);
870 * Initialize and allocates MpegEncContext fields dependent on the resolution.
872 static int init_context_frame(MpegEncContext *s)
874 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
876 s->mb_width = (s->width + 15) / 16;
877 s->mb_stride = s->mb_width + 1;
878 s->b8_stride = s->mb_width * 2 + 1;
879 s->b4_stride = s->mb_width * 4 + 1;
880 mb_array_size = s->mb_height * s->mb_stride;
881 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
883 /* set default edge pos, will be overriden
884 * in decode_header if needed */
885 s->h_edge_pos = s->mb_width * 16;
886 s->v_edge_pos = s->mb_height * 16;
888 s->mb_num = s->mb_width * s->mb_height;
893 s->block_wrap[3] = s->b8_stride;
895 s->block_wrap[5] = s->mb_stride;
897 y_size = s->b8_stride * (2 * s->mb_height + 1);
898 c_size = s->mb_stride * (s->mb_height + 1);
899 yc_size = y_size + 2 * c_size;
901 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
902 for (y = 0; y < s->mb_height; y++)
903 for (x = 0; x < s->mb_width; x++)
904 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
906 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
909 /* Allocate MV tables */
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
911 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
912 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
913 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
914 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
915 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
916 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
917 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
918 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
919 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
920 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
921 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
923 /* Allocate MB type table */
924 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
926 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
928 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
929 mb_array_size * sizeof(float), fail);
930 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
931 mb_array_size * sizeof(float), fail);
935 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
936 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
937 /* interlaced direct mode decoding tables */
938 for (i = 0; i < 2; i++) {
940 for (j = 0; j < 2; j++) {
941 for (k = 0; k < 2; k++) {
942 FF_ALLOCZ_OR_GOTO(s->avctx,
943 s->b_field_mv_table_base[i][j][k],
944 mv_table_size * 2 * sizeof(int16_t),
946 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
950 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
951 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
956 if (s->out_format == FMT_H263) {
958 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
959 s->coded_block = s->coded_block_base + s->b8_stride + 1;
961 /* cbp, ac_pred, pred_dir */
962 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
963 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
966 if (s->h263_pred || s->h263_plus || !s->encoding) {
968 // MN: we need these for error resilience of intra-frames
969 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
970 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
971 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
972 s->dc_val[2] = s->dc_val[1] + c_size;
973 for (i = 0; i < yc_size; i++)
974 s->dc_val_base[i] = 1024;
977 /* which mb is a intra block */
978 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
979 memset(s->mbintra_table, 1, mb_array_size);
981 /* init macroblock skip table */
982 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
983 // Note the + 1 is for a quicker mpeg4 slice_end detection
987 return AVERROR(ENOMEM);
991 * init common structure for both encoder and decoder.
992 * this assumes that some variables like width/height are already set
994 av_cold int ff_MPV_common_init(MpegEncContext *s)
997 int nb_slices = (HAVE_THREADS &&
998 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
999 s->avctx->thread_count : 1;
1001 if (s->encoding && s->avctx->slices)
1002 nb_slices = s->avctx->slices;
1004 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1005 s->mb_height = (s->height + 31) / 32 * 2;
1007 s->mb_height = (s->height + 15) / 16;
1009 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1010 av_log(s->avctx, AV_LOG_ERROR,
1011 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1015 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1018 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1020 max_slices = MAX_THREADS;
1021 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1022 " reducing to %d\n", nb_slices, max_slices);
1023 nb_slices = max_slices;
1026 if ((s->width || s->height) &&
1027 av_image_check_size(s->width, s->height, 0, s->avctx))
1030 ff_dct_common_init(s);
1032 s->flags = s->avctx->flags;
1033 s->flags2 = s->avctx->flags2;
1035 /* set chroma shifts */
1036 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
1038 /* convert fourcc to upper case */
1039 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1040 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1042 s->avctx->coded_frame = &s->current_picture.f;
1045 if (s->msmpeg4_version) {
1046 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1047 2 * 2 * (MAX_LEVEL + 1) *
1048 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1056 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1057 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1058 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1059 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1061 if (s->avctx->noise_reduction) {
1062 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1066 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1067 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1068 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1069 avcodec_get_frame_defaults(&s->picture[i].f);
1071 memset(&s->next_picture, 0, sizeof(s->next_picture));
1072 memset(&s->last_picture, 0, sizeof(s->last_picture));
1073 memset(&s->current_picture, 0, sizeof(s->current_picture));
1074 avcodec_get_frame_defaults(&s->next_picture.f);
1075 avcodec_get_frame_defaults(&s->last_picture.f);
1076 avcodec_get_frame_defaults(&s->current_picture.f);
1078 if (init_context_frame(s))
1081 s->parse_context.state = -1;
1083 s->context_initialized = 1;
1084 s->thread_context[0] = s;
1086 // if (s->width && s->height) {
1087 if (nb_slices > 1) {
1088 for (i = 1; i < nb_slices; i++) {
1089 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1090 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1093 for (i = 0; i < nb_slices; i++) {
1094 if (init_duplicate_context(s->thread_context[i]) < 0)
1096 s->thread_context[i]->start_mb_y =
1097 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1098 s->thread_context[i]->end_mb_y =
1099 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1102 if (init_duplicate_context(s) < 0)
1105 s->end_mb_y = s->mb_height;
1107 s->slice_context_count = nb_slices;
1112 ff_MPV_common_end(s);
1117 * Frees and resets MpegEncContext fields depending on the resolution.
1118 * Is used during resolution changes to avoid a full reinitialization of the
1121 static int free_context_frame(MpegEncContext *s)
1125 av_freep(&s->mb_type);
1126 av_freep(&s->p_mv_table_base);
1127 av_freep(&s->b_forw_mv_table_base);
1128 av_freep(&s->b_back_mv_table_base);
1129 av_freep(&s->b_bidir_forw_mv_table_base);
1130 av_freep(&s->b_bidir_back_mv_table_base);
1131 av_freep(&s->b_direct_mv_table_base);
1132 s->p_mv_table = NULL;
1133 s->b_forw_mv_table = NULL;
1134 s->b_back_mv_table = NULL;
1135 s->b_bidir_forw_mv_table = NULL;
1136 s->b_bidir_back_mv_table = NULL;
1137 s->b_direct_mv_table = NULL;
1138 for (i = 0; i < 2; i++) {
1139 for (j = 0; j < 2; j++) {
1140 for (k = 0; k < 2; k++) {
1141 av_freep(&s->b_field_mv_table_base[i][j][k]);
1142 s->b_field_mv_table[i][j][k] = NULL;
1144 av_freep(&s->b_field_select_table[i][j]);
1145 av_freep(&s->p_field_mv_table_base[i][j]);
1146 s->p_field_mv_table[i][j] = NULL;
1148 av_freep(&s->p_field_select_table[i]);
1151 av_freep(&s->dc_val_base);
1152 av_freep(&s->coded_block_base);
1153 av_freep(&s->mbintra_table);
1154 av_freep(&s->cbp_table);
1155 av_freep(&s->pred_dir_table);
1157 av_freep(&s->mbskip_table);
1159 av_freep(&s->er.error_status_table);
1160 av_freep(&s->er.er_temp_buffer);
1161 av_freep(&s->mb_index2xy);
1162 av_freep(&s->lambda_table);
1164 av_freep(&s->cplx_tab);
1165 av_freep(&s->bits_tab);
1167 s->linesize = s->uvlinesize = 0;
1172 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1176 if (s->slice_context_count > 1) {
1177 for (i = 0; i < s->slice_context_count; i++) {
1178 free_duplicate_context(s->thread_context[i]);
1180 for (i = 1; i < s->slice_context_count; i++) {
1181 av_freep(&s->thread_context[i]);
1184 free_duplicate_context(s);
1186 if ((err = free_context_frame(s)) < 0)
1190 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1191 s->picture[i].needs_realloc = 1;
1194 s->last_picture_ptr =
1195 s->next_picture_ptr =
1196 s->current_picture_ptr = NULL;
1199 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1200 s->mb_height = (s->height + 31) / 32 * 2;
1202 s->mb_height = (s->height + 15) / 16;
1204 if ((s->width || s->height) &&
1205 av_image_check_size(s->width, s->height, 0, s->avctx))
1206 return AVERROR_INVALIDDATA;
1208 if ((err = init_context_frame(s)))
1211 s->thread_context[0] = s;
1213 if (s->width && s->height) {
1214 int nb_slices = s->slice_context_count;
1215 if (nb_slices > 1) {
1216 for (i = 1; i < nb_slices; i++) {
1217 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1218 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1221 for (i = 0; i < nb_slices; i++) {
1222 if (init_duplicate_context(s->thread_context[i]) < 0)
1224 s->thread_context[i]->start_mb_y =
1225 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1226 s->thread_context[i]->end_mb_y =
1227 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1230 if (init_duplicate_context(s) < 0)
1233 s->end_mb_y = s->mb_height;
1235 s->slice_context_count = nb_slices;
1240 ff_MPV_common_end(s);
1244 /* init common structure for both encoder and decoder */
1245 void ff_MPV_common_end(MpegEncContext *s)
1249 if (s->slice_context_count > 1) {
1250 for (i = 0; i < s->slice_context_count; i++) {
1251 free_duplicate_context(s->thread_context[i]);
1253 for (i = 1; i < s->slice_context_count; i++) {
1254 av_freep(&s->thread_context[i]);
1256 s->slice_context_count = 1;
1257 } else free_duplicate_context(s);
1259 av_freep(&s->parse_context.buffer);
1260 s->parse_context.buffer_size = 0;
1262 av_freep(&s->bitstream_buffer);
1263 s->allocated_bitstream_buffer_size = 0;
1265 av_freep(&s->avctx->stats_out);
1266 av_freep(&s->ac_stats);
1268 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1269 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1270 s->q_chroma_intra_matrix= NULL;
1271 s->q_chroma_intra_matrix16= NULL;
1272 av_freep(&s->q_intra_matrix);
1273 av_freep(&s->q_inter_matrix);
1274 av_freep(&s->q_intra_matrix16);
1275 av_freep(&s->q_inter_matrix16);
1276 av_freep(&s->input_picture);
1277 av_freep(&s->reordered_input_picture);
1278 av_freep(&s->dct_offset);
1281 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1282 free_picture_tables(&s->picture[i]);
1283 ff_mpeg_unref_picture(s, &s->picture[i]);
1286 av_freep(&s->picture);
1287 free_picture_tables(&s->last_picture);
1288 ff_mpeg_unref_picture(s, &s->last_picture);
1289 free_picture_tables(&s->current_picture);
1290 ff_mpeg_unref_picture(s, &s->current_picture);
1291 free_picture_tables(&s->next_picture);
1292 ff_mpeg_unref_picture(s, &s->next_picture);
1293 free_picture_tables(&s->new_picture);
1294 ff_mpeg_unref_picture(s, &s->new_picture);
1296 free_context_frame(s);
1298 s->context_initialized = 0;
1299 s->last_picture_ptr =
1300 s->next_picture_ptr =
1301 s->current_picture_ptr = NULL;
1302 s->linesize = s->uvlinesize = 0;
1305 av_cold void ff_init_rl(RLTable *rl,
1306 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1308 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1309 uint8_t index_run[MAX_RUN + 1];
1310 int last, run, level, start, end, i;
1312 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1313 if (static_store && rl->max_level[0])
1316 /* compute max_level[], max_run[] and index_run[] */
1317 for (last = 0; last < 2; last++) {
1326 memset(max_level, 0, MAX_RUN + 1);
1327 memset(max_run, 0, MAX_LEVEL + 1);
1328 memset(index_run, rl->n, MAX_RUN + 1);
1329 for (i = start; i < end; i++) {
1330 run = rl->table_run[i];
1331 level = rl->table_level[i];
1332 if (index_run[run] == rl->n)
1334 if (level > max_level[run])
1335 max_level[run] = level;
1336 if (run > max_run[level])
1337 max_run[level] = run;
1340 rl->max_level[last] = static_store[last];
1342 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1343 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1345 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1347 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1348 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1350 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1352 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1353 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1357 av_cold void ff_init_vlc_rl(RLTable *rl)
1361 for (q = 0; q < 32; q++) {
1363 int qadd = (q - 1) | 1;
1369 for (i = 0; i < rl->vlc.table_size; i++) {
1370 int code = rl->vlc.table[i][0];
1371 int len = rl->vlc.table[i][1];
1374 if (len == 0) { // illegal code
1377 } else if (len < 0) { // more bits needed
1381 if (code == rl->n) { // esc
1385 run = rl->table_run[code] + 1;
1386 level = rl->table_level[code] * qmul + qadd;
1387 if (code >= rl->last) run += 192;
1390 rl->rl_vlc[q][i].len = len;
1391 rl->rl_vlc[q][i].level = level;
1392 rl->rl_vlc[q][i].run = run;
1397 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1401 /* release non reference frames */
1402 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1403 if (!s->picture[i].reference &&
1404 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1405 ff_mpeg_unref_picture(s, &s->picture[i]);
1410 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1412 if (pic == s->last_picture_ptr)
1414 if (pic->f.data[0] == NULL)
1416 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1421 static int find_unused_picture(MpegEncContext *s, int shared)
1426 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1427 if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1431 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1432 if (pic_is_unused(s, &s->picture[i]))
1437 av_log(s->avctx, AV_LOG_FATAL,
1438 "Internal error, picture buffer overflow\n");
1439 /* We could return -1, but the codec would crash trying to draw into a
1440 * non-existing frame anyway. This is safer than waiting for a random crash.
1441 * Also the return of this is never useful, an encoder must only allocate
1442 * as much as allowed in the specification. This has no relationship to how
1443 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1444 * enough for such valid streams).
1445 * Plus, a decoder has to check stream validity and remove frames if too
1446 * many reference frames are around. Waiting for "OOM" is not correct at
1447 * all. Similarly, missing reference frames have to be replaced by
1448 * interpolated/MC frames, anything else is a bug in the codec ...
1454 int ff_find_unused_picture(MpegEncContext *s, int shared)
1456 int ret = find_unused_picture(s, shared);
1458 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1459 if (s->picture[ret].needs_realloc) {
1460 s->picture[ret].needs_realloc = 0;
1461 free_picture_tables(&s->picture[ret]);
1462 ff_mpeg_unref_picture(s, &s->picture[ret]);
1463 avcodec_get_frame_defaults(&s->picture[ret].f);
1469 static void update_noise_reduction(MpegEncContext *s)
1473 for (intra = 0; intra < 2; intra++) {
1474 if (s->dct_count[intra] > (1 << 16)) {
1475 for (i = 0; i < 64; i++) {
1476 s->dct_error_sum[intra][i] >>= 1;
1478 s->dct_count[intra] >>= 1;
1481 for (i = 0; i < 64; i++) {
1482 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1483 s->dct_count[intra] +
1484 s->dct_error_sum[intra][i] / 2) /
1485 (s->dct_error_sum[intra][i] + 1);
1491 * generic function for encode/decode called after coding/decoding
1492 * the header and before a frame is coded/decoded.
1494 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1500 if (!ff_thread_can_start_frame(avctx)) {
1501 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1505 /* mark & release old frames */
1506 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1507 s->last_picture_ptr != s->next_picture_ptr &&
1508 s->last_picture_ptr->f.data[0]) {
1509 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1512 /* release forgotten pictures */
1513 /* if (mpeg124/h263) */
1515 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1516 if (&s->picture[i] != s->last_picture_ptr &&
1517 &s->picture[i] != s->next_picture_ptr &&
1518 s->picture[i].reference && !s->picture[i].needs_realloc) {
1519 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1520 av_log(avctx, AV_LOG_ERROR,
1521 "releasing zombie picture\n");
1522 ff_mpeg_unref_picture(s, &s->picture[i]);
1528 ff_release_unused_pictures(s, 1);
1530 if (s->current_picture_ptr &&
1531 s->current_picture_ptr->f.data[0] == NULL) {
1532 // we already have a unused image
1533 // (maybe it was set before reading the header)
1534 pic = s->current_picture_ptr;
1536 i = ff_find_unused_picture(s, 0);
1538 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1541 pic = &s->picture[i];
1545 if (!s->droppable) {
1546 if (s->pict_type != AV_PICTURE_TYPE_B)
1550 pic->f.coded_picture_number = s->coded_picture_number++;
1552 if (ff_alloc_picture(s, pic, 0) < 0)
1555 s->current_picture_ptr = pic;
1556 // FIXME use only the vars from current_pic
1557 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1558 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1559 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1560 if (s->picture_structure != PICT_FRAME)
1561 s->current_picture_ptr->f.top_field_first =
1562 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1564 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1565 !s->progressive_sequence;
1566 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1569 s->current_picture_ptr->f.pict_type = s->pict_type;
1570 // if (s->flags && CODEC_FLAG_QSCALE)
1571 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1572 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1574 ff_mpeg_unref_picture(s, &s->current_picture);
1575 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1576 s->current_picture_ptr)) < 0)
1579 if (s->pict_type != AV_PICTURE_TYPE_B) {
1580 s->last_picture_ptr = s->next_picture_ptr;
1582 s->next_picture_ptr = s->current_picture_ptr;
1584 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1585 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1586 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1587 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1588 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1589 s->pict_type, s->droppable);
1591 if ((s->last_picture_ptr == NULL ||
1592 s->last_picture_ptr->f.data[0] == NULL) &&
1593 (s->pict_type != AV_PICTURE_TYPE_I ||
1594 s->picture_structure != PICT_FRAME)) {
1595 int h_chroma_shift, v_chroma_shift;
1596 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1597 &h_chroma_shift, &v_chroma_shift);
1598 if (s->pict_type != AV_PICTURE_TYPE_I)
1599 av_log(avctx, AV_LOG_ERROR,
1600 "warning: first frame is no keyframe\n");
1601 else if (s->picture_structure != PICT_FRAME)
1602 av_log(avctx, AV_LOG_INFO,
1603 "allocate dummy last picture for field based first keyframe\n");
1605 /* Allocate a dummy frame */
1606 i = ff_find_unused_picture(s, 0);
1608 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1611 s->last_picture_ptr = &s->picture[i];
1612 s->last_picture_ptr->f.key_frame = 0;
1613 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1614 s->last_picture_ptr = NULL;
1618 memset(s->last_picture_ptr->f.data[0], 0x80,
1619 avctx->height * s->last_picture_ptr->f.linesize[0]);
1620 memset(s->last_picture_ptr->f.data[1], 0x80,
1621 (avctx->height >> v_chroma_shift) *
1622 s->last_picture_ptr->f.linesize[1]);
1623 memset(s->last_picture_ptr->f.data[2], 0x80,
1624 (avctx->height >> v_chroma_shift) *
1625 s->last_picture_ptr->f.linesize[2]);
1627 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1628 for(i=0; i<avctx->height; i++)
1629 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1632 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1633 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1635 if ((s->next_picture_ptr == NULL ||
1636 s->next_picture_ptr->f.data[0] == NULL) &&
1637 s->pict_type == AV_PICTURE_TYPE_B) {
1638 /* Allocate a dummy frame */
1639 i = ff_find_unused_picture(s, 0);
1641 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1644 s->next_picture_ptr = &s->picture[i];
1645 s->next_picture_ptr->f.key_frame = 0;
1646 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1647 s->next_picture_ptr = NULL;
1650 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1651 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1654 #if 0 // BUFREF-FIXME
1655 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1656 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1658 if (s->last_picture_ptr) {
1659 ff_mpeg_unref_picture(s, &s->last_picture);
1660 if (s->last_picture_ptr->f.data[0] &&
1661 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1662 s->last_picture_ptr)) < 0)
1665 if (s->next_picture_ptr) {
1666 ff_mpeg_unref_picture(s, &s->next_picture);
1667 if (s->next_picture_ptr->f.data[0] &&
1668 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1669 s->next_picture_ptr)) < 0)
1673 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1674 s->last_picture_ptr->f.data[0]));
1676 if (s->picture_structure!= PICT_FRAME) {
1678 for (i = 0; i < 4; i++) {
1679 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1680 s->current_picture.f.data[i] +=
1681 s->current_picture.f.linesize[i];
1683 s->current_picture.f.linesize[i] *= 2;
1684 s->last_picture.f.linesize[i] *= 2;
1685 s->next_picture.f.linesize[i] *= 2;
1689 s->err_recognition = avctx->err_recognition;
1691 /* set dequantizer, we can't do it during init as
1692 * it might change for mpeg4 and we can't do it in the header
1693 * decode as init is not called for mpeg4 there yet */
1694 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1695 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1696 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1697 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1698 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1699 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1701 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1702 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1705 if (s->dct_error_sum) {
1706 av_assert2(s->avctx->noise_reduction && s->encoding);
1707 update_noise_reduction(s);
1710 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1711 return ff_xvmc_field_start(s, avctx);
1716 /* generic function for encode/decode called after a
1717 * frame has been coded/decoded. */
1718 void ff_MPV_frame_end(MpegEncContext *s)
1721 /* redraw edges for the frame if decoding didn't complete */
1722 // just to make sure that all data is rendered.
1723 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1724 ff_xvmc_field_end(s);
1725 } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1726 !s->avctx->hwaccel &&
1727 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1728 s->unrestricted_mv &&
1729 s->current_picture.reference &&
1731 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1734 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1735 int hshift = desc->log2_chroma_w;
1736 int vshift = desc->log2_chroma_h;
1737 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1738 s->h_edge_pos, s->v_edge_pos,
1739 EDGE_WIDTH, EDGE_WIDTH,
1740 EDGE_TOP | EDGE_BOTTOM);
1741 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1742 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1743 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1744 EDGE_TOP | EDGE_BOTTOM);
1745 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1746 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1747 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1748 EDGE_TOP | EDGE_BOTTOM);
1753 s->last_pict_type = s->pict_type;
1754 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1755 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1756 s->last_non_b_pict_type = s->pict_type;
1759 /* copy back current_picture variables */
1760 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1761 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1762 s->picture[i] = s->current_picture;
1766 assert(i < MAX_PICTURE_COUNT);
1770 /* release non-reference frames */
1771 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1772 if (!s->picture[i].reference)
1773 ff_mpeg_unref_picture(s, &s->picture[i]);
1776 // clear copies, to avoid confusion
1778 memset(&s->last_picture, 0, sizeof(Picture));
1779 memset(&s->next_picture, 0, sizeof(Picture));
1780 memset(&s->current_picture, 0, sizeof(Picture));
1782 s->avctx->coded_frame = &s->current_picture_ptr->f;
1784 if (s->current_picture.reference)
1785 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1789 * Draw a line from (ex, ey) -> (sx, sy).
1790 * @param w width of the image
1791 * @param h height of the image
1792 * @param stride stride/linesize of the image
1793 * @param color color of the arrow
1795 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1796 int w, int h, int stride, int color)
1800 sx = av_clip(sx, 0, w - 1);
1801 sy = av_clip(sy, 0, h - 1);
1802 ex = av_clip(ex, 0, w - 1);
1803 ey = av_clip(ey, 0, h - 1);
1805 buf[sy * stride + sx] += color;
1807 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1809 FFSWAP(int, sx, ex);
1810 FFSWAP(int, sy, ey);
1812 buf += sx + sy * stride;
1814 f = ((ey - sy) << 16) / ex;
1815 for (x = 0; x <= ex; x++) {
1817 fr = (x * f) & 0xFFFF;
1818 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1819 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1823 FFSWAP(int, sx, ex);
1824 FFSWAP(int, sy, ey);
1826 buf += sx + sy * stride;
1829 f = ((ex - sx) << 16) / ey;
1832 for(y= 0; y <= ey; y++){
1834 fr = (y*f) & 0xFFFF;
1835 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1836 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1842 * Draw an arrow from (ex, ey) -> (sx, sy).
1843 * @param w width of the image
1844 * @param h height of the image
1845 * @param stride stride/linesize of the image
1846 * @param color color of the arrow
1848 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1849 int ey, int w, int h, int stride, int color)
1853 sx = av_clip(sx, -100, w + 100);
1854 sy = av_clip(sy, -100, h + 100);
1855 ex = av_clip(ex, -100, w + 100);
1856 ey = av_clip(ey, -100, h + 100);
1861 if (dx * dx + dy * dy > 3 * 3) {
1864 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1866 // FIXME subpixel accuracy
1867 rx = ROUNDED_DIV(rx * 3 << 4, length);
1868 ry = ROUNDED_DIV(ry * 3 << 4, length);
1870 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1871 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1873 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1877 * Print debugging info for the given picture.
1879 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1881 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1883 if (avctx->hwaccel || !p || !p->mb_type
1884 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1888 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1891 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1892 av_get_picture_type_char(pict->pict_type));
1893 for (y = 0; y < mb_height; y++) {
1894 for (x = 0; x < mb_width; x++) {
1895 if (avctx->debug & FF_DEBUG_SKIP) {
1896 int count = mbskip_table[x + y * mb_stride];
1899 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1901 if (avctx->debug & FF_DEBUG_QP) {
1902 av_log(avctx, AV_LOG_DEBUG, "%2d",
1903 p->qscale_table[x + y * mb_stride]);
1905 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1906 int mb_type = p->mb_type[x + y * mb_stride];
1907 // Type & MV direction
1908 if (IS_PCM(mb_type))
1909 av_log(avctx, AV_LOG_DEBUG, "P");
1910 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1911 av_log(avctx, AV_LOG_DEBUG, "A");
1912 else if (IS_INTRA4x4(mb_type))
1913 av_log(avctx, AV_LOG_DEBUG, "i");
1914 else if (IS_INTRA16x16(mb_type))
1915 av_log(avctx, AV_LOG_DEBUG, "I");
1916 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1917 av_log(avctx, AV_LOG_DEBUG, "d");
1918 else if (IS_DIRECT(mb_type))
1919 av_log(avctx, AV_LOG_DEBUG, "D");
1920 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1921 av_log(avctx, AV_LOG_DEBUG, "g");
1922 else if (IS_GMC(mb_type))
1923 av_log(avctx, AV_LOG_DEBUG, "G");
1924 else if (IS_SKIP(mb_type))
1925 av_log(avctx, AV_LOG_DEBUG, "S");
1926 else if (!USES_LIST(mb_type, 1))
1927 av_log(avctx, AV_LOG_DEBUG, ">");
1928 else if (!USES_LIST(mb_type, 0))
1929 av_log(avctx, AV_LOG_DEBUG, "<");
1931 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1932 av_log(avctx, AV_LOG_DEBUG, "X");
1936 if (IS_8X8(mb_type))
1937 av_log(avctx, AV_LOG_DEBUG, "+");
1938 else if (IS_16X8(mb_type))
1939 av_log(avctx, AV_LOG_DEBUG, "-");
1940 else if (IS_8X16(mb_type))
1941 av_log(avctx, AV_LOG_DEBUG, "|");
1942 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1943 av_log(avctx, AV_LOG_DEBUG, " ");
1945 av_log(avctx, AV_LOG_DEBUG, "?");
1948 if (IS_INTERLACED(mb_type))
1949 av_log(avctx, AV_LOG_DEBUG, "=");
1951 av_log(avctx, AV_LOG_DEBUG, " ");
1954 av_log(avctx, AV_LOG_DEBUG, "\n");
1958 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1959 (avctx->debug_mv)) {
1960 const int shift = 1 + quarter_sample;
1964 int h_chroma_shift, v_chroma_shift, block_height;
1965 const int width = avctx->width;
1966 const int height = avctx->height;
1967 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1968 const int mv_stride = (mb_width << mv_sample_log2) +
1969 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1971 *low_delay = 0; // needed to see the vectors without trashing the buffers
1973 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1975 av_frame_make_writable(pict);
1977 pict->opaque = NULL;
1978 ptr = pict->data[0];
1979 block_height = 16 >> v_chroma_shift;
1981 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1983 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1984 const int mb_index = mb_x + mb_y * mb_stride;
1985 if ((avctx->debug_mv) && p->motion_val[0]) {
1987 for (type = 0; type < 3; type++) {
1991 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1992 (pict->pict_type!= AV_PICTURE_TYPE_P))
1997 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1998 (pict->pict_type!= AV_PICTURE_TYPE_B))
2003 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2004 (pict->pict_type!= AV_PICTURE_TYPE_B))
2009 if (!USES_LIST(p->mb_type[mb_index], direction))
2012 if (IS_8X8(p->mb_type[mb_index])) {
2014 for (i = 0; i < 4; i++) {
2015 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2016 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2017 int xy = (mb_x * 2 + (i & 1) +
2018 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2019 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2020 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2021 draw_arrow(ptr, sx, sy, mx, my, width,
2022 height, pict->linesize[0], 100);
2024 } else if (IS_16X8(p->mb_type[mb_index])) {
2026 for (i = 0; i < 2; i++) {
2027 int sx = mb_x * 16 + 8;
2028 int sy = mb_y * 16 + 4 + 8 * i;
2029 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2030 int mx = (p->motion_val[direction][xy][0] >> shift);
2031 int my = (p->motion_val[direction][xy][1] >> shift);
2033 if (IS_INTERLACED(p->mb_type[mb_index]))
2036 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2037 height, pict->linesize[0], 100);
2039 } else if (IS_8X16(p->mb_type[mb_index])) {
2041 for (i = 0; i < 2; i++) {
2042 int sx = mb_x * 16 + 4 + 8 * i;
2043 int sy = mb_y * 16 + 8;
2044 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2045 int mx = p->motion_val[direction][xy][0] >> shift;
2046 int my = p->motion_val[direction][xy][1] >> shift;
2048 if (IS_INTERLACED(p->mb_type[mb_index]))
2051 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2052 height, pict->linesize[0], 100);
2055 int sx= mb_x * 16 + 8;
2056 int sy= mb_y * 16 + 8;
2057 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2058 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2059 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2060 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2064 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2065 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2066 0x0101010101010101ULL;
2068 for (y = 0; y < block_height; y++) {
2069 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2070 (block_height * mb_y + y) *
2071 pict->linesize[1]) = c;
2072 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2073 (block_height * mb_y + y) *
2074 pict->linesize[2]) = c;
2077 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2079 int mb_type = p->mb_type[mb_index];
2082 #define COLOR(theta, r) \
2083 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2084 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2088 if (IS_PCM(mb_type)) {
2090 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2091 IS_INTRA16x16(mb_type)) {
2093 } else if (IS_INTRA4x4(mb_type)) {
2095 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2097 } else if (IS_DIRECT(mb_type)) {
2099 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2101 } else if (IS_GMC(mb_type)) {
2103 } else if (IS_SKIP(mb_type)) {
2105 } else if (!USES_LIST(mb_type, 1)) {
2107 } else if (!USES_LIST(mb_type, 0)) {
2110 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2114 u *= 0x0101010101010101ULL;
2115 v *= 0x0101010101010101ULL;
2116 for (y = 0; y < block_height; y++) {
2117 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2118 (block_height * mb_y + y) * pict->linesize[1]) = u;
2119 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2120 (block_height * mb_y + y) * pict->linesize[2]) = v;
2124 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2125 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2126 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2127 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2128 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2130 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2131 for (y = 0; y < 16; y++)
2132 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2133 pict->linesize[0]] ^= 0x80;
2135 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2136 int dm = 1 << (mv_sample_log2 - 2);
2137 for (i = 0; i < 4; i++) {
2138 int sx = mb_x * 16 + 8 * (i & 1);
2139 int sy = mb_y * 16 + 8 * (i >> 1);
2140 int xy = (mb_x * 2 + (i & 1) +
2141 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2143 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2144 if (mv[0] != mv[dm] ||
2145 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2146 for (y = 0; y < 8; y++)
2147 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2148 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2149 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2150 pict->linesize[0]) ^= 0x8080808080808080ULL;
2154 if (IS_INTERLACED(mb_type) &&
2155 avctx->codec->id == AV_CODEC_ID_H264) {
2159 mbskip_table[mb_index] = 0;
2165 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2167 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2168 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2171 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2173 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2174 int offset = 2*s->mb_stride + 1;
2176 return AVERROR(ENOMEM);
2177 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2178 ref->size -= offset;
2179 ref->data += offset;
2180 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2183 static inline int hpel_motion_lowres(MpegEncContext *s,
2184 uint8_t *dest, uint8_t *src,
2185 int field_based, int field_select,
2186 int src_x, int src_y,
2187 int width, int height, int stride,
2188 int h_edge_pos, int v_edge_pos,
2189 int w, int h, h264_chroma_mc_func *pix_op,
2190 int motion_x, int motion_y)
2192 const int lowres = s->avctx->lowres;
2193 const int op_index = FFMIN(lowres, 2);
2194 const int s_mask = (2 << lowres) - 1;
2198 if (s->quarter_sample) {
2203 sx = motion_x & s_mask;
2204 sy = motion_y & s_mask;
2205 src_x += motion_x >> lowres + 1;
2206 src_y += motion_y >> lowres + 1;
2208 src += src_y * stride + src_x;
2210 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2211 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2212 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2213 (h + 1) << field_based, src_x,
2214 src_y << field_based,
2217 src = s->edge_emu_buffer;
2221 sx = (sx << 2) >> lowres;
2222 sy = (sy << 2) >> lowres;
2225 pix_op[op_index](dest, src, stride, h, sx, sy);
2229 /* apply one mpeg motion vector to the three components */
2230 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2237 uint8_t **ref_picture,
2238 h264_chroma_mc_func *pix_op,
2239 int motion_x, int motion_y,
2242 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2243 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2245 const int lowres = s->avctx->lowres;
2246 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
2247 const int block_s = 8>>lowres;
2248 const int s_mask = (2 << lowres) - 1;
2249 const int h_edge_pos = s->h_edge_pos >> lowres;
2250 const int v_edge_pos = s->v_edge_pos >> lowres;
2251 linesize = s->current_picture.f.linesize[0] << field_based;
2252 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2254 // FIXME obviously not perfect but qpel will not work in lowres anyway
2255 if (s->quarter_sample) {
2261 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2264 sx = motion_x & s_mask;
2265 sy = motion_y & s_mask;
2266 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2267 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2269 if (s->out_format == FMT_H263) {
2270 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2271 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2272 uvsrc_x = src_x >> 1;
2273 uvsrc_y = src_y >> 1;
2274 } else if (s->out_format == FMT_H261) {
2275 // even chroma mv's are full pel in H261
2278 uvsx = (2 * mx) & s_mask;
2279 uvsy = (2 * my) & s_mask;
2280 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2281 uvsrc_y = mb_y * block_s + (my >> lowres);
2283 if(s->chroma_y_shift){
2288 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2289 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2291 if(s->chroma_x_shift){
2295 uvsy = motion_y & s_mask;
2297 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2300 uvsx = motion_x & s_mask;
2301 uvsy = motion_y & s_mask;
2308 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2309 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2310 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2312 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2313 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2314 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2315 linesize >> field_based, 17, 17 + field_based,
2316 src_x, src_y << field_based, h_edge_pos,
2318 ptr_y = s->edge_emu_buffer;
2319 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2320 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2321 s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2323 uvsrc_x, uvsrc_y << field_based,
2324 h_edge_pos >> 1, v_edge_pos >> 1);
2325 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2327 uvsrc_x, uvsrc_y << field_based,
2328 h_edge_pos >> 1, v_edge_pos >> 1);
2330 ptr_cr = uvbuf + 16;
2334 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2336 dest_y += s->linesize;
2337 dest_cb += s->uvlinesize;
2338 dest_cr += s->uvlinesize;
2342 ptr_y += s->linesize;
2343 ptr_cb += s->uvlinesize;
2344 ptr_cr += s->uvlinesize;
2347 sx = (sx << 2) >> lowres;
2348 sy = (sy << 2) >> lowres;
2349 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2351 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2352 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2353 uvsx = (uvsx << 2) >> lowres;
2354 uvsy = (uvsy << 2) >> lowres;
2356 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2357 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2360 // FIXME h261 lowres loop filter
2363 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2364 uint8_t *dest_cb, uint8_t *dest_cr,
2365 uint8_t **ref_picture,
2366 h264_chroma_mc_func * pix_op,
2369 const int lowres = s->avctx->lowres;
2370 const int op_index = FFMIN(lowres, 2);
2371 const int block_s = 8 >> lowres;
2372 const int s_mask = (2 << lowres) - 1;
2373 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2374 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2375 int emu = 0, src_x, src_y, offset, sx, sy;
2378 if (s->quarter_sample) {
2383 /* In case of 8X8, we construct a single chroma motion vector
2384 with a special rounding */
2385 mx = ff_h263_round_chroma(mx);
2386 my = ff_h263_round_chroma(my);
2390 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2391 src_y = s->mb_y * block_s + (my >> lowres + 1);
2393 offset = src_y * s->uvlinesize + src_x;
2394 ptr = ref_picture[1] + offset;
2395 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2396 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2397 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2398 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2399 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2400 ptr = s->edge_emu_buffer;
2404 sx = (sx << 2) >> lowres;
2405 sy = (sy << 2) >> lowres;
2406 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2408 ptr = ref_picture[2] + offset;
2410 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2411 src_x, src_y, h_edge_pos, v_edge_pos);
2412 ptr = s->edge_emu_buffer;
2414 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2418 * motion compensation of a single macroblock
2420 * @param dest_y luma destination pointer
2421 * @param dest_cb chroma cb/u destination pointer
2422 * @param dest_cr chroma cr/v destination pointer
2423 * @param dir direction (0->forward, 1->backward)
2424 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2425 * @param pix_op halfpel motion compensation function (average or put normally)
2426 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2428 static inline void MPV_motion_lowres(MpegEncContext *s,
2429 uint8_t *dest_y, uint8_t *dest_cb,
2431 int dir, uint8_t **ref_picture,
2432 h264_chroma_mc_func *pix_op)
2436 const int lowres = s->avctx->lowres;
2437 const int block_s = 8 >>lowres;
2442 switch (s->mv_type) {
2444 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2446 ref_picture, pix_op,
2447 s->mv[dir][0][0], s->mv[dir][0][1],
2453 for (i = 0; i < 4; i++) {
2454 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2455 s->linesize) * block_s,
2456 ref_picture[0], 0, 0,
2457 (2 * mb_x + (i & 1)) * block_s,
2458 (2 * mb_y + (i >> 1)) * block_s,
2459 s->width, s->height, s->linesize,
2460 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2461 block_s, block_s, pix_op,
2462 s->mv[dir][i][0], s->mv[dir][i][1]);
2464 mx += s->mv[dir][i][0];
2465 my += s->mv[dir][i][1];
2468 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2469 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2473 if (s->picture_structure == PICT_FRAME) {
2475 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2476 1, 0, s->field_select[dir][0],
2477 ref_picture, pix_op,
2478 s->mv[dir][0][0], s->mv[dir][0][1],
2481 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2482 1, 1, s->field_select[dir][1],
2483 ref_picture, pix_op,
2484 s->mv[dir][1][0], s->mv[dir][1][1],
2487 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2488 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2489 ref_picture = s->current_picture_ptr->f.data;
2492 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2493 0, 0, s->field_select[dir][0],
2494 ref_picture, pix_op,
2496 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2500 for (i = 0; i < 2; i++) {
2501 uint8_t **ref2picture;
2503 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2504 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2505 ref2picture = ref_picture;
2507 ref2picture = s->current_picture_ptr->f.data;
2510 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2511 0, 0, s->field_select[dir][i],
2512 ref2picture, pix_op,
2513 s->mv[dir][i][0], s->mv[dir][i][1] +
2514 2 * block_s * i, block_s, mb_y >> 1);
2516 dest_y += 2 * block_s * s->linesize;
2517 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2518 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2522 if (s->picture_structure == PICT_FRAME) {
2523 for (i = 0; i < 2; i++) {
2525 for (j = 0; j < 2; j++) {
2526 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2528 ref_picture, pix_op,
2529 s->mv[dir][2 * i + j][0],
2530 s->mv[dir][2 * i + j][1],
2533 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2536 for (i = 0; i < 2; i++) {
2537 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2538 0, 0, s->picture_structure != i + 1,
2539 ref_picture, pix_op,
2540 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2541 2 * block_s, mb_y >> 1);
2543 // after put we make avg of the same block
2544 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2546 // opposite parity is always in the same
2547 // frame if this is second field
2548 if (!s->first_field) {
2549 ref_picture = s->current_picture_ptr->f.data;
2560 * find the lowest MB row referenced in the MVs
2562 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2564 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2565 int my, off, i, mvs;
2567 if (s->picture_structure != PICT_FRAME || s->mcsel)
2570 switch (s->mv_type) {
2584 for (i = 0; i < mvs; i++) {
2585 my = s->mv[dir][i][1]<<qpel_shift;
2586 my_max = FFMAX(my_max, my);
2587 my_min = FFMIN(my_min, my);
2590 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2592 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2594 return s->mb_height-1;
2597 /* put block[] to dest[] */
2598 static inline void put_dct(MpegEncContext *s,
2599 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2601 s->dct_unquantize_intra(s, block, i, qscale);
2602 s->dsp.idct_put (dest, line_size, block);
2605 /* add block[] to dest[] */
2606 static inline void add_dct(MpegEncContext *s,
2607 int16_t *block, int i, uint8_t *dest, int line_size)
2609 if (s->block_last_index[i] >= 0) {
2610 s->dsp.idct_add (dest, line_size, block);
2614 static inline void add_dequant_dct(MpegEncContext *s,
2615 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2617 if (s->block_last_index[i] >= 0) {
2618 s->dct_unquantize_inter(s, block, i, qscale);
2620 s->dsp.idct_add (dest, line_size, block);
2625 * Clean dc, ac, coded_block for the current non-intra MB.
2627 void ff_clean_intra_table_entries(MpegEncContext *s)
2629 int wrap = s->b8_stride;
2630 int xy = s->block_index[0];
2633 s->dc_val[0][xy + 1 ] =
2634 s->dc_val[0][xy + wrap] =
2635 s->dc_val[0][xy + 1 + wrap] = 1024;
2637 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2638 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2639 if (s->msmpeg4_version>=3) {
2640 s->coded_block[xy ] =
2641 s->coded_block[xy + 1 ] =
2642 s->coded_block[xy + wrap] =
2643 s->coded_block[xy + 1 + wrap] = 0;
2646 wrap = s->mb_stride;
2647 xy = s->mb_x + s->mb_y * wrap;
2649 s->dc_val[2][xy] = 1024;
2651 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2652 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2654 s->mbintra_table[xy]= 0;
2657 /* generic function called after a macroblock has been parsed by the
2658 decoder or after it has been encoded by the encoder.
2660 Important variables used:
2661 s->mb_intra : true if intra macroblock
2662 s->mv_dir : motion vector direction
2663 s->mv_type : motion vector type
2664 s->mv : motion vector
2665 s->interlaced_dct : true if interlaced dct used (mpeg2)
2667 static av_always_inline
2668 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2669 int lowres_flag, int is_mpeg12)
2671 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2672 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2673 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2677 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2678 /* print DCT coefficients */
2680 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2682 for(j=0; j<64; j++){
2683 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2685 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2689 s->current_picture.qscale_table[mb_xy] = s->qscale;
2691 /* update DC predictors for P macroblocks */
2693 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2694 if(s->mbintra_table[mb_xy])
2695 ff_clean_intra_table_entries(s);
2699 s->last_dc[2] = 128 << s->intra_dc_precision;
2702 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2703 s->mbintra_table[mb_xy]=1;
2705 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2706 uint8_t *dest_y, *dest_cb, *dest_cr;
2707 int dct_linesize, dct_offset;
2708 op_pixels_func (*op_pix)[4];
2709 qpel_mc_func (*op_qpix)[16];
2710 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2711 const int uvlinesize = s->current_picture.f.linesize[1];
2712 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2713 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2715 /* avoid copy if macroblock skipped in last frame too */
2716 /* skip only during decoding as we might trash the buffers during encoding a bit */
2718 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2720 if (s->mb_skipped) {
2722 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2724 } else if(!s->current_picture.reference) {
2727 *mbskip_ptr = 0; /* not skipped */
2731 dct_linesize = linesize << s->interlaced_dct;
2732 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2736 dest_cb= s->dest[1];
2737 dest_cr= s->dest[2];
2739 dest_y = s->b_scratchpad;
2740 dest_cb= s->b_scratchpad+16*linesize;
2741 dest_cr= s->b_scratchpad+32*linesize;
2745 /* motion handling */
2746 /* decoding or more than one mb_type (MC was already done otherwise) */
2749 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2750 if (s->mv_dir & MV_DIR_FORWARD) {
2751 ff_thread_await_progress(&s->last_picture_ptr->tf,
2752 ff_MPV_lowest_referenced_row(s, 0),
2755 if (s->mv_dir & MV_DIR_BACKWARD) {
2756 ff_thread_await_progress(&s->next_picture_ptr->tf,
2757 ff_MPV_lowest_referenced_row(s, 1),
2763 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2765 if (s->mv_dir & MV_DIR_FORWARD) {
2766 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2767 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2769 if (s->mv_dir & MV_DIR_BACKWARD) {
2770 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2773 op_qpix= s->me.qpel_put;
2774 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2775 op_pix = s->hdsp.put_pixels_tab;
2777 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2779 if (s->mv_dir & MV_DIR_FORWARD) {
2780 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2781 op_pix = s->hdsp.avg_pixels_tab;
2782 op_qpix= s->me.qpel_avg;
2784 if (s->mv_dir & MV_DIR_BACKWARD) {
2785 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2790 /* skip dequant / idct if we are really late ;) */
2791 if(s->avctx->skip_idct){
2792 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2793 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2794 || s->avctx->skip_idct >= AVDISCARD_ALL)
2798 /* add dct residue */
2799 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2800 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2801 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2802 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2803 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2804 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2806 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2807 if (s->chroma_y_shift){
2808 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2809 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2813 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2814 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2815 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2816 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2819 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2820 add_dct(s, block[0], 0, dest_y , dct_linesize);
2821 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2822 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2823 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2825 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2826 if(s->chroma_y_shift){//Chroma420
2827 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2828 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2831 dct_linesize = uvlinesize << s->interlaced_dct;
2832 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2834 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2835 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2836 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2837 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2838 if(!s->chroma_x_shift){//Chroma444
2839 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2840 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2841 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2842 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2847 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2848 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2851 /* dct only in intra block */
2852 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2853 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2854 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2855 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2856 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2858 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2859 if(s->chroma_y_shift){
2860 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2861 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2865 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2866 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2867 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2868 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2872 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2873 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2874 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2875 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2877 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2878 if(s->chroma_y_shift){
2879 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2880 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2883 dct_linesize = uvlinesize << s->interlaced_dct;
2884 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2886 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2887 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2888 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2889 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2890 if(!s->chroma_x_shift){//Chroma444
2891 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2892 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2893 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2894 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2902 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2903 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2904 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2909 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2911 if(s->out_format == FMT_MPEG1) {
2912 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2913 else MPV_decode_mb_internal(s, block, 0, 1);
2916 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2917 else MPV_decode_mb_internal(s, block, 0, 0);
2921 * @param h is the normal height, this will be reduced automatically if needed for the last row
2923 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2924 Picture *last, int y, int h, int picture_structure,
2925 int first_field, int draw_edges, int low_delay,
2926 int v_edge_pos, int h_edge_pos)
2928 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2929 int hshift = desc->log2_chroma_w;
2930 int vshift = desc->log2_chroma_h;
2931 const int field_pic = picture_structure != PICT_FRAME;
2937 if (!avctx->hwaccel &&
2938 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2941 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2942 int *linesize = cur->f.linesize;
2943 int sides = 0, edge_h;
2944 if (y==0) sides |= EDGE_TOP;
2945 if (y + h >= v_edge_pos)
2946 sides |= EDGE_BOTTOM;
2948 edge_h= FFMIN(h, v_edge_pos - y);
2950 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2951 linesize[0], h_edge_pos, edge_h,
2952 EDGE_WIDTH, EDGE_WIDTH, sides);
2953 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2954 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2955 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2956 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2957 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2958 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2961 h = FFMIN(h, avctx->height - y);
2963 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2965 if (avctx->draw_horiz_band) {
2967 int offset[AV_NUM_DATA_POINTERS];
2970 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2971 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2978 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2979 picture_structure == PICT_FRAME &&
2980 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2981 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2984 offset[0]= y * src->linesize[0];
2986 offset[2]= (y >> vshift) * src->linesize[1];
2987 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2993 avctx->draw_horiz_band(avctx, src, offset,
2994 y, picture_structure, h);
2998 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
3000 int draw_edges = s->unrestricted_mv && !s->intra_only;
3001 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
3002 &s->last_picture, y, h, s->picture_structure,
3003 s->first_field, draw_edges, s->low_delay,
3004 s->v_edge_pos, s->h_edge_pos);
3007 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3008 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3009 const int uvlinesize = s->current_picture.f.linesize[1];
3010 const int mb_size= 4 - s->avctx->lowres;
3012 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3013 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3014 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3015 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3016 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3017 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3018 //block_index is not used by mpeg2, so it is not affected by chroma_format
3020 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3021 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3022 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3024 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3026 if(s->picture_structure==PICT_FRAME){
3027 s->dest[0] += s->mb_y * linesize << mb_size;
3028 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3029 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3031 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3032 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3033 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3034 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3040 * Permute an 8x8 block.
3041 * @param block the block which will be permuted according to the given permutation vector
3042 * @param permutation the permutation vector
3043 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3044 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3045 * (inverse) permutated to scantable order!
3047 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3053 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3055 for(i=0; i<=last; i++){
3056 const int j= scantable[i];
3061 for(i=0; i<=last; i++){
3062 const int j= scantable[i];
3063 const int perm_j= permutation[j];
3064 block[perm_j]= temp[j];
3068 void ff_mpeg_flush(AVCodecContext *avctx){
3070 MpegEncContext *s = avctx->priv_data;
3072 if(s==NULL || s->picture==NULL)
3075 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3076 ff_mpeg_unref_picture(s, &s->picture[i]);
3077 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3079 ff_mpeg_unref_picture(s, &s->current_picture);
3080 ff_mpeg_unref_picture(s, &s->last_picture);
3081 ff_mpeg_unref_picture(s, &s->next_picture);
3083 s->mb_x= s->mb_y= 0;
3086 s->parse_context.state= -1;
3087 s->parse_context.frame_start_found= 0;
3088 s->parse_context.overread= 0;
3089 s->parse_context.overread_index= 0;
3090 s->parse_context.index= 0;
3091 s->parse_context.last_index= 0;
3092 s->bitstream_buffer_size=0;
3096 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3097 int16_t *block, int n, int qscale)
3099 int i, level, nCoeffs;
3100 const uint16_t *quant_matrix;
3102 nCoeffs= s->block_last_index[n];
3104 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3105 /* XXX: only mpeg1 */
3106 quant_matrix = s->intra_matrix;
3107 for(i=1;i<=nCoeffs;i++) {
3108 int j= s->intra_scantable.permutated[i];
3113 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3114 level = (level - 1) | 1;
3117 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3118 level = (level - 1) | 1;
3125 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3126 int16_t *block, int n, int qscale)
3128 int i, level, nCoeffs;
3129 const uint16_t *quant_matrix;
3131 nCoeffs= s->block_last_index[n];
3133 quant_matrix = s->inter_matrix;
3134 for(i=0; i<=nCoeffs; i++) {
3135 int j= s->intra_scantable.permutated[i];
3140 level = (((level << 1) + 1) * qscale *
3141 ((int) (quant_matrix[j]))) >> 4;
3142 level = (level - 1) | 1;
3145 level = (((level << 1) + 1) * qscale *
3146 ((int) (quant_matrix[j]))) >> 4;
3147 level = (level - 1) | 1;
3154 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3155 int16_t *block, int n, int qscale)
3157 int i, level, nCoeffs;
3158 const uint16_t *quant_matrix;
3160 if(s->alternate_scan) nCoeffs= 63;
3161 else nCoeffs= s->block_last_index[n];
3163 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3164 quant_matrix = s->intra_matrix;
3165 for(i=1;i<=nCoeffs;i++) {
3166 int j= s->intra_scantable.permutated[i];
3171 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3174 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3181 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3182 int16_t *block, int n, int qscale)
3184 int i, level, nCoeffs;
3185 const uint16_t *quant_matrix;
3188 if(s->alternate_scan) nCoeffs= 63;
3189 else nCoeffs= s->block_last_index[n];
3191 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3193 quant_matrix = s->intra_matrix;
3194 for(i=1;i<=nCoeffs;i++) {
3195 int j= s->intra_scantable.permutated[i];
3200 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3203 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3212 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3213 int16_t *block, int n, int qscale)
3215 int i, level, nCoeffs;
3216 const uint16_t *quant_matrix;
3219 if(s->alternate_scan) nCoeffs= 63;
3220 else nCoeffs= s->block_last_index[n];
3222 quant_matrix = s->inter_matrix;
3223 for(i=0; i<=nCoeffs; i++) {
3224 int j= s->intra_scantable.permutated[i];
3229 level = (((level << 1) + 1) * qscale *
3230 ((int) (quant_matrix[j]))) >> 4;
3233 level = (((level << 1) + 1) * qscale *
3234 ((int) (quant_matrix[j]))) >> 4;
3243 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3244 int16_t *block, int n, int qscale)
3246 int i, level, qmul, qadd;
3249 av_assert2(s->block_last_index[n]>=0);
3254 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3255 qadd = (qscale - 1) | 1;
3262 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3264 for(i=1; i<=nCoeffs; i++) {
3268 level = level * qmul - qadd;
3270 level = level * qmul + qadd;
3277 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3278 int16_t *block, int n, int qscale)
3280 int i, level, qmul, qadd;
3283 av_assert2(s->block_last_index[n]>=0);
3285 qadd = (qscale - 1) | 1;
3288 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3290 for(i=0; i<=nCoeffs; i++) {
3294 level = level * qmul - qadd;
3296 level = level * qmul + qadd;
3304 * set qscale and update qscale dependent variables.
3306 void ff_set_qscale(MpegEncContext * s, int qscale)
3310 else if (qscale > 31)
3314 s->chroma_qscale= s->chroma_qscale_table[qscale];
3316 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3317 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3320 void ff_MPV_report_decode_progress(MpegEncContext *s)
3322 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3323 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3326 #if CONFIG_ERROR_RESILIENCE
3327 void ff_mpeg_er_frame_start(MpegEncContext *s)
3329 ERContext *er = &s->er;
3331 er->cur_pic = s->current_picture_ptr;
3332 er->last_pic = s->last_picture_ptr;
3333 er->next_pic = s->next_picture_ptr;
3335 er->pp_time = s->pp_time;
3336 er->pb_time = s->pb_time;
3337 er->quarter_sample = s->quarter_sample;
3338 er->partitioned_frame = s->partitioned_frame;
3340 ff_er_frame_start(er);
3342 #endif /* CONFIG_ERROR_RESILIENCE */