2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
130 MpegEncContext *s = opaque;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
138 memcpy(s->mv, mv, sizeof(*mv));
140 ff_init_block_index(s);
141 ff_update_block_index(s);
143 s->dsp.clear_blocks(s->block[0]);
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
150 ff_MPV_decode_mb(s, s->block);
153 /* init common dct for both encoder and decoder */
154 av_cold int ff_dct_common_init(MpegEncContext *s)
156 ff_dsputil_init(&s->dsp, s->avctx);
157 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
158 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
160 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
161 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
162 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
163 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
165 if (s->flags & CODEC_FLAG_BITEXACT)
166 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
167 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
170 ff_MPV_common_init_arm(s);
172 ff_MPV_common_init_bfin(s);
174 ff_MPV_common_init_ppc(s);
176 ff_MPV_common_init_x86(s);
178 /* load & permutate scantables
179 * note: only wmv uses different ones
181 if (s->alternate_scan) {
182 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
183 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
185 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
186 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
194 static int frame_size_alloc(MpegEncContext *s, int linesize)
196 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
198 // edge emu needs blocksize + filter length - 1
199 // (= 17x17 for halfpel / 21x21 for h264)
200 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
201 // at uvlinesize. It supports only YUV420 so 24x24 is enough
202 // linesize * interlaced * MBsize
203 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
206 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
208 s->me.temp = s->me.scratchpad;
209 s->rd_scratchpad = s->me.scratchpad;
210 s->b_scratchpad = s->me.scratchpad;
211 s->obmc_scratchpad = s->me.scratchpad + 16;
215 av_freep(&s->edge_emu_buffer);
216 return AVERROR(ENOMEM);
220 * Allocate a frame buffer
222 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
227 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
228 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
229 s->codec_id != AV_CODEC_ID_MSS2)
230 r = ff_thread_get_buffer(s->avctx, &pic->tf,
231 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
233 pic->f.width = s->avctx->width;
234 pic->f.height = s->avctx->height;
235 pic->f.format = s->avctx->pix_fmt;
236 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
239 if (r < 0 || !pic->f.buf[0]) {
240 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
245 if (s->avctx->hwaccel) {
246 assert(!pic->hwaccel_picture_private);
247 if (s->avctx->hwaccel->priv_data_size) {
248 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
249 if (!pic->hwaccel_priv_buf) {
250 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
253 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
257 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
258 s->uvlinesize != pic->f.linesize[1])) {
259 av_log(s->avctx, AV_LOG_ERROR,
260 "get_buffer() failed (stride changed)\n");
261 ff_mpeg_unref_picture(s, pic);
265 if (pic->f.linesize[1] != pic->f.linesize[2]) {
266 av_log(s->avctx, AV_LOG_ERROR,
267 "get_buffer() failed (uv stride mismatch)\n");
268 ff_mpeg_unref_picture(s, pic);
272 if (!s->edge_emu_buffer &&
273 (ret = frame_size_alloc(s, pic->f.linesize[0])) < 0) {
274 av_log(s->avctx, AV_LOG_ERROR,
275 "get_buffer() failed to allocate context scratch buffers.\n");
276 ff_mpeg_unref_picture(s, pic);
283 void ff_free_picture_tables(Picture *pic)
287 av_buffer_unref(&pic->mb_var_buf);
288 av_buffer_unref(&pic->mc_mb_var_buf);
289 av_buffer_unref(&pic->mb_mean_buf);
290 av_buffer_unref(&pic->mbskip_table_buf);
291 av_buffer_unref(&pic->qscale_table_buf);
292 av_buffer_unref(&pic->mb_type_buf);
294 for (i = 0; i < 2; i++) {
295 av_buffer_unref(&pic->motion_val_buf[i]);
296 av_buffer_unref(&pic->ref_index_buf[i]);
300 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
302 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
303 const int mb_array_size = s->mb_stride * s->mb_height;
304 const int b8_array_size = s->b8_stride * s->mb_height * 2;
308 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
309 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
310 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
312 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
313 return AVERROR(ENOMEM);
316 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
317 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
318 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
319 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
320 return AVERROR(ENOMEM);
323 if (s->out_format == FMT_H263 || s->encoding) {
324 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
325 int ref_index_size = 4 * mb_array_size;
327 for (i = 0; mv_size && i < 2; i++) {
328 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
329 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
330 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
331 return AVERROR(ENOMEM);
338 static int make_tables_writable(Picture *pic)
341 #define MAKE_WRITABLE(table) \
344 (ret = av_buffer_make_writable(&pic->table)) < 0)\
348 MAKE_WRITABLE(mb_var_buf);
349 MAKE_WRITABLE(mc_mb_var_buf);
350 MAKE_WRITABLE(mb_mean_buf);
351 MAKE_WRITABLE(mbskip_table_buf);
352 MAKE_WRITABLE(qscale_table_buf);
353 MAKE_WRITABLE(mb_type_buf);
355 for (i = 0; i < 2; i++) {
356 MAKE_WRITABLE(motion_val_buf[i]);
357 MAKE_WRITABLE(ref_index_buf[i]);
364 * Allocate a Picture.
365 * The pixels are allocated/set by calling get_buffer() if shared = 0
367 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
372 assert(pic->f.data[0]);
375 assert(!pic->f.buf[0]);
377 if (alloc_frame_buffer(s, pic) < 0)
380 s->linesize = pic->f.linesize[0];
381 s->uvlinesize = pic->f.linesize[1];
384 if (!pic->qscale_table_buf)
385 ret = alloc_picture_tables(s, pic);
387 ret = make_tables_writable(pic);
392 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
393 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
394 pic->mb_mean = pic->mb_mean_buf->data;
397 pic->mbskip_table = pic->mbskip_table_buf->data;
398 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
399 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
401 if (pic->motion_val_buf[0]) {
402 for (i = 0; i < 2; i++) {
403 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
404 pic->ref_index[i] = pic->ref_index_buf[i]->data;
410 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
411 ff_mpeg_unref_picture(s, pic);
412 ff_free_picture_tables(pic);
413 return AVERROR(ENOMEM);
417 * Deallocate a picture.
419 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
421 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
424 /* WM Image / Screen codecs allocate internal buffers with different
425 * dimensions / colorspaces; ignore user-defined callbacks for these. */
426 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
427 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
428 s->codec_id != AV_CODEC_ID_MSS2)
429 ff_thread_release_buffer(s->avctx, &pic->tf);
431 av_frame_unref(&pic->f);
433 av_buffer_unref(&pic->hwaccel_priv_buf);
435 if (pic->needs_realloc)
436 ff_free_picture_tables(pic);
438 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
441 static int update_picture_tables(Picture *dst, Picture *src)
445 #define UPDATE_TABLE(table)\
448 (!dst->table || dst->table->buffer != src->table->buffer)) {\
449 av_buffer_unref(&dst->table);\
450 dst->table = av_buffer_ref(src->table);\
452 ff_free_picture_tables(dst);\
453 return AVERROR(ENOMEM);\
458 UPDATE_TABLE(mb_var_buf);
459 UPDATE_TABLE(mc_mb_var_buf);
460 UPDATE_TABLE(mb_mean_buf);
461 UPDATE_TABLE(mbskip_table_buf);
462 UPDATE_TABLE(qscale_table_buf);
463 UPDATE_TABLE(mb_type_buf);
464 for (i = 0; i < 2; i++) {
465 UPDATE_TABLE(motion_val_buf[i]);
466 UPDATE_TABLE(ref_index_buf[i]);
469 dst->mb_var = src->mb_var;
470 dst->mc_mb_var = src->mc_mb_var;
471 dst->mb_mean = src->mb_mean;
472 dst->mbskip_table = src->mbskip_table;
473 dst->qscale_table = src->qscale_table;
474 dst->mb_type = src->mb_type;
475 for (i = 0; i < 2; i++) {
476 dst->motion_val[i] = src->motion_val[i];
477 dst->ref_index[i] = src->ref_index[i];
483 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
487 av_assert0(!dst->f.buf[0]);
488 av_assert0(src->f.buf[0]);
492 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
496 ret = update_picture_tables(dst, src);
500 if (src->hwaccel_picture_private) {
501 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
502 if (!dst->hwaccel_priv_buf)
504 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
507 dst->field_picture = src->field_picture;
508 dst->mb_var_sum = src->mb_var_sum;
509 dst->mc_mb_var_sum = src->mc_mb_var_sum;
510 dst->b_frame_score = src->b_frame_score;
511 dst->needs_realloc = src->needs_realloc;
512 dst->reference = src->reference;
513 dst->shared = src->shared;
517 ff_mpeg_unref_picture(s, dst);
521 static void exchange_uv(MpegEncContext *s)
526 s->pblocks[4] = s->pblocks[5];
530 static int init_duplicate_context(MpegEncContext *s)
532 int y_size = s->b8_stride * (2 * s->mb_height + 1);
533 int c_size = s->mb_stride * (s->mb_height + 1);
534 int yc_size = y_size + 2 * c_size;
542 s->obmc_scratchpad = NULL;
545 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
546 ME_MAP_SIZE * sizeof(uint32_t), fail)
547 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
548 ME_MAP_SIZE * sizeof(uint32_t), fail)
549 if (s->avctx->noise_reduction) {
550 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
551 2 * 64 * sizeof(int), fail)
554 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
555 s->block = s->blocks[0];
557 for (i = 0; i < 12; i++) {
558 s->pblocks[i] = &s->block[i];
560 if (s->avctx->codec_tag == AV_RL32("VCR2"))
563 if (s->out_format == FMT_H263) {
565 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
566 yc_size * sizeof(int16_t) * 16, fail);
567 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
568 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
569 s->ac_val[2] = s->ac_val[1] + c_size;
574 return -1; // free() through ff_MPV_common_end()
577 static void free_duplicate_context(MpegEncContext *s)
582 av_freep(&s->edge_emu_buffer);
583 av_freep(&s->me.scratchpad);
587 s->obmc_scratchpad = NULL;
589 av_freep(&s->dct_error_sum);
590 av_freep(&s->me.map);
591 av_freep(&s->me.score_map);
592 av_freep(&s->blocks);
593 av_freep(&s->ac_val_base);
597 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
599 #define COPY(a) bak->a = src->a
600 COPY(edge_emu_buffer);
605 COPY(obmc_scratchpad);
612 COPY(me.map_generation);
624 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
628 // FIXME copy only needed parts
630 backup_duplicate_context(&bak, dst);
631 memcpy(dst, src, sizeof(MpegEncContext));
632 backup_duplicate_context(dst, &bak);
633 for (i = 0; i < 12; i++) {
634 dst->pblocks[i] = &dst->block[i];
636 if (dst->avctx->codec_tag == AV_RL32("VCR2"))
638 if (!dst->edge_emu_buffer &&
639 (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
640 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
641 "scratch buffers.\n");
644 // STOP_TIMER("update_duplicate_context")
645 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
649 int ff_mpeg_update_thread_context(AVCodecContext *dst,
650 const AVCodecContext *src)
653 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
655 if (dst == src || !s1->context_initialized)
658 // FIXME can parameters change on I-frames?
659 // in that case dst may need a reinit
660 if (!s->context_initialized) {
661 memcpy(s, s1, sizeof(MpegEncContext));
664 s->bitstream_buffer = NULL;
665 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
667 ff_MPV_common_init(s);
670 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
672 s->context_reinit = 0;
673 s->height = s1->height;
674 s->width = s1->width;
675 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
679 s->avctx->coded_height = s1->avctx->coded_height;
680 s->avctx->coded_width = s1->avctx->coded_width;
681 s->avctx->width = s1->avctx->width;
682 s->avctx->height = s1->avctx->height;
684 s->coded_picture_number = s1->coded_picture_number;
685 s->picture_number = s1->picture_number;
687 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
688 ff_mpeg_unref_picture(s, &s->picture[i]);
689 if (s1->picture[i].f.buf[0] &&
690 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
694 #define UPDATE_PICTURE(pic)\
696 ff_mpeg_unref_picture(s, &s->pic);\
697 if (s1->pic.f.buf[0])\
698 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
700 ret = update_picture_tables(&s->pic, &s1->pic);\
705 UPDATE_PICTURE(current_picture);
706 UPDATE_PICTURE(last_picture);
707 UPDATE_PICTURE(next_picture);
709 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
710 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
711 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
713 // Error/bug resilience
714 s->next_p_frame_damaged = s1->next_p_frame_damaged;
715 s->workaround_bugs = s1->workaround_bugs;
718 memcpy(&s->last_time_base, &s1->last_time_base,
719 (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
720 (char *) &s1->last_time_base);
723 s->max_b_frames = s1->max_b_frames;
724 s->low_delay = s1->low_delay;
725 s->droppable = s1->droppable;
727 // DivX handling (doesn't work)
728 s->divx_packed = s1->divx_packed;
730 if (s1->bitstream_buffer) {
731 if (s1->bitstream_buffer_size +
732 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
733 av_fast_malloc(&s->bitstream_buffer,
734 &s->allocated_bitstream_buffer_size,
735 s1->allocated_bitstream_buffer_size);
736 s->bitstream_buffer_size = s1->bitstream_buffer_size;
737 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
738 s1->bitstream_buffer_size);
739 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
740 FF_INPUT_BUFFER_PADDING_SIZE);
743 // linesize dependend scratch buffer allocation
744 if (!s->edge_emu_buffer)
746 if (frame_size_alloc(s, s1->linesize) < 0) {
747 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
748 "scratch buffers.\n");
749 return AVERROR(ENOMEM);
752 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
753 "be allocated due to unknown size.\n");
757 // MPEG2/interlacing info
758 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
759 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
761 if (!s1->first_field) {
762 s->last_pict_type = s1->pict_type;
763 if (s1->current_picture_ptr)
764 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
771 * Set the given MpegEncContext to common defaults
772 * (same for encoding and decoding).
773 * The changed fields will not depend upon the
774 * prior state of the MpegEncContext.
776 void ff_MPV_common_defaults(MpegEncContext *s)
778 s->y_dc_scale_table =
779 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
780 s->chroma_qscale_table = ff_default_chroma_qscale_table;
781 s->progressive_frame = 1;
782 s->progressive_sequence = 1;
783 s->picture_structure = PICT_FRAME;
785 s->coded_picture_number = 0;
786 s->picture_number = 0;
791 s->slice_context_count = 1;
795 * Set the given MpegEncContext to defaults for decoding.
796 * the changed fields will not depend upon
797 * the prior state of the MpegEncContext.
799 void ff_MPV_decode_defaults(MpegEncContext *s)
801 ff_MPV_common_defaults(s);
804 static int init_er(MpegEncContext *s)
806 ERContext *er = &s->er;
807 int mb_array_size = s->mb_height * s->mb_stride;
810 er->avctx = s->avctx;
813 er->mb_index2xy = s->mb_index2xy;
814 er->mb_num = s->mb_num;
815 er->mb_width = s->mb_width;
816 er->mb_height = s->mb_height;
817 er->mb_stride = s->mb_stride;
818 er->b8_stride = s->b8_stride;
820 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
821 er->error_status_table = av_mallocz(mb_array_size);
822 if (!er->er_temp_buffer || !er->error_status_table)
825 er->mbskip_table = s->mbskip_table;
826 er->mbintra_table = s->mbintra_table;
828 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
829 er->dc_val[i] = s->dc_val[i];
831 er->decode_mb = mpeg_er_decode_mb;
836 av_freep(&er->er_temp_buffer);
837 av_freep(&er->error_status_table);
838 return AVERROR(ENOMEM);
842 * Initialize and allocates MpegEncContext fields dependent on the resolution.
844 static int init_context_frame(MpegEncContext *s)
846 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
848 s->mb_width = (s->width + 15) / 16;
849 s->mb_stride = s->mb_width + 1;
850 s->b8_stride = s->mb_width * 2 + 1;
851 s->b4_stride = s->mb_width * 4 + 1;
852 mb_array_size = s->mb_height * s->mb_stride;
853 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
855 /* set default edge pos, will be overriden
856 * in decode_header if needed */
857 s->h_edge_pos = s->mb_width * 16;
858 s->v_edge_pos = s->mb_height * 16;
860 s->mb_num = s->mb_width * s->mb_height;
865 s->block_wrap[3] = s->b8_stride;
867 s->block_wrap[5] = s->mb_stride;
869 y_size = s->b8_stride * (2 * s->mb_height + 1);
870 c_size = s->mb_stride * (s->mb_height + 1);
871 yc_size = y_size + 2 * c_size;
873 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
874 fail); // error ressilience code looks cleaner with this
875 for (y = 0; y < s->mb_height; y++)
876 for (x = 0; x < s->mb_width; x++)
877 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
879 s->mb_index2xy[s->mb_height * s->mb_width] =
880 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
883 /* Allocate MV tables */
884 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
885 mv_table_size * 2 * sizeof(int16_t), fail);
886 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
887 mv_table_size * 2 * sizeof(int16_t), fail);
888 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
889 mv_table_size * 2 * sizeof(int16_t), fail);
890 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
891 mv_table_size * 2 * sizeof(int16_t), fail);
892 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
893 mv_table_size * 2 * sizeof(int16_t), fail);
894 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
895 mv_table_size * 2 * sizeof(int16_t), fail);
896 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
897 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
898 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
899 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
901 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
903 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
905 /* Allocate MB type table */
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
907 sizeof(uint16_t), fail); // needed for encoding
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
912 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
913 mb_array_size * sizeof(float), fail);
914 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
915 mb_array_size * sizeof(float), fail);
919 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
920 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
921 /* interlaced direct mode decoding tables */
922 for (i = 0; i < 2; i++) {
924 for (j = 0; j < 2; j++) {
925 for (k = 0; k < 2; k++) {
926 FF_ALLOCZ_OR_GOTO(s->avctx,
927 s->b_field_mv_table_base[i][j][k],
928 mv_table_size * 2 * sizeof(int16_t),
930 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
933 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
934 mb_array_size * 2 * sizeof(uint8_t), fail);
935 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
936 mv_table_size * 2 * sizeof(int16_t), fail);
937 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
940 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
941 mb_array_size * 2 * sizeof(uint8_t), fail);
944 if (s->out_format == FMT_H263) {
946 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
947 s->coded_block = s->coded_block_base + s->b8_stride + 1;
949 /* cbp, ac_pred, pred_dir */
950 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
951 mb_array_size * sizeof(uint8_t), fail);
952 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
953 mb_array_size * sizeof(uint8_t), fail);
956 if (s->h263_pred || s->h263_plus || !s->encoding) {
958 // MN: we need these for error resilience of intra-frames
959 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
960 yc_size * sizeof(int16_t), fail);
961 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
962 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
963 s->dc_val[2] = s->dc_val[1] + c_size;
964 for (i = 0; i < yc_size; i++)
965 s->dc_val_base[i] = 1024;
968 /* which mb is a intra block */
969 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
970 memset(s->mbintra_table, 1, mb_array_size);
972 /* init macroblock skip table */
973 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
974 // Note the + 1 is for a quicker mpeg4 slice_end detection
978 return AVERROR(ENOMEM);
982 * init common structure for both encoder and decoder.
983 * this assumes that some variables like width/height are already set
985 av_cold int ff_MPV_common_init(MpegEncContext *s)
988 int nb_slices = (HAVE_THREADS &&
989 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
990 s->avctx->thread_count : 1;
992 if (s->encoding && s->avctx->slices)
993 nb_slices = s->avctx->slices;
995 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
996 s->mb_height = (s->height + 31) / 32 * 2;
998 s->mb_height = (s->height + 15) / 16;
1000 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1001 av_log(s->avctx, AV_LOG_ERROR,
1002 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1006 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1009 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1011 max_slices = MAX_THREADS;
1012 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1013 " reducing to %d\n", nb_slices, max_slices);
1014 nb_slices = max_slices;
1017 if ((s->width || s->height) &&
1018 av_image_check_size(s->width, s->height, 0, s->avctx))
1021 ff_dct_common_init(s);
1023 s->flags = s->avctx->flags;
1024 s->flags2 = s->avctx->flags2;
1026 /* set chroma shifts */
1027 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1029 &s->chroma_y_shift);
1031 /* convert fourcc to upper case */
1032 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1034 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1036 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1037 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1038 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1039 avcodec_get_frame_defaults(&s->picture[i].f);
1041 memset(&s->next_picture, 0, sizeof(s->next_picture));
1042 memset(&s->last_picture, 0, sizeof(s->last_picture));
1043 memset(&s->current_picture, 0, sizeof(s->current_picture));
1044 avcodec_get_frame_defaults(&s->next_picture.f);
1045 avcodec_get_frame_defaults(&s->last_picture.f);
1046 avcodec_get_frame_defaults(&s->current_picture.f);
1048 if (s->width && s->height) {
1049 if (init_context_frame(s))
1052 s->parse_context.state = -1;
1055 s->context_initialized = 1;
1056 s->thread_context[0] = s;
1058 if (s->width && s->height) {
1059 if (nb_slices > 1) {
1060 for (i = 1; i < nb_slices; i++) {
1061 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1062 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1065 for (i = 0; i < nb_slices; i++) {
1066 if (init_duplicate_context(s->thread_context[i]) < 0)
1068 s->thread_context[i]->start_mb_y =
1069 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1070 s->thread_context[i]->end_mb_y =
1071 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1074 if (init_duplicate_context(s) < 0)
1077 s->end_mb_y = s->mb_height;
1079 s->slice_context_count = nb_slices;
1084 ff_MPV_common_end(s);
1089 * Frees and resets MpegEncContext fields depending on the resolution.
1090 * Is used during resolution changes to avoid a full reinitialization of the
1093 static int free_context_frame(MpegEncContext *s)
1097 av_freep(&s->mb_type);
1098 av_freep(&s->p_mv_table_base);
1099 av_freep(&s->b_forw_mv_table_base);
1100 av_freep(&s->b_back_mv_table_base);
1101 av_freep(&s->b_bidir_forw_mv_table_base);
1102 av_freep(&s->b_bidir_back_mv_table_base);
1103 av_freep(&s->b_direct_mv_table_base);
1104 s->p_mv_table = NULL;
1105 s->b_forw_mv_table = NULL;
1106 s->b_back_mv_table = NULL;
1107 s->b_bidir_forw_mv_table = NULL;
1108 s->b_bidir_back_mv_table = NULL;
1109 s->b_direct_mv_table = NULL;
1110 for (i = 0; i < 2; i++) {
1111 for (j = 0; j < 2; j++) {
1112 for (k = 0; k < 2; k++) {
1113 av_freep(&s->b_field_mv_table_base[i][j][k]);
1114 s->b_field_mv_table[i][j][k] = NULL;
1116 av_freep(&s->b_field_select_table[i][j]);
1117 av_freep(&s->p_field_mv_table_base[i][j]);
1118 s->p_field_mv_table[i][j] = NULL;
1120 av_freep(&s->p_field_select_table[i]);
1123 av_freep(&s->dc_val_base);
1124 av_freep(&s->coded_block_base);
1125 av_freep(&s->mbintra_table);
1126 av_freep(&s->cbp_table);
1127 av_freep(&s->pred_dir_table);
1129 av_freep(&s->mbskip_table);
1131 av_freep(&s->er.error_status_table);
1132 av_freep(&s->er.er_temp_buffer);
1133 av_freep(&s->mb_index2xy);
1134 av_freep(&s->lambda_table);
1135 av_freep(&s->cplx_tab);
1136 av_freep(&s->bits_tab);
1138 s->linesize = s->uvlinesize = 0;
1143 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1147 if (s->slice_context_count > 1) {
1148 for (i = 0; i < s->slice_context_count; i++) {
1149 free_duplicate_context(s->thread_context[i]);
1151 for (i = 1; i < s->slice_context_count; i++) {
1152 av_freep(&s->thread_context[i]);
1155 free_duplicate_context(s);
1157 if ((err = free_context_frame(s)) < 0)
1161 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1162 s->picture[i].needs_realloc = 1;
1165 s->last_picture_ptr =
1166 s->next_picture_ptr =
1167 s->current_picture_ptr = NULL;
1170 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1171 s->mb_height = (s->height + 31) / 32 * 2;
1173 s->mb_height = (s->height + 15) / 16;
1175 if ((s->width || s->height) &&
1176 av_image_check_size(s->width, s->height, 0, s->avctx))
1177 return AVERROR_INVALIDDATA;
1179 if ((err = init_context_frame(s)))
1182 s->thread_context[0] = s;
1184 if (s->width && s->height) {
1185 int nb_slices = s->slice_context_count;
1186 if (nb_slices > 1) {
1187 for (i = 1; i < nb_slices; i++) {
1188 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1189 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1192 for (i = 0; i < nb_slices; i++) {
1193 if (init_duplicate_context(s->thread_context[i]) < 0)
1195 s->thread_context[i]->start_mb_y =
1196 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1197 s->thread_context[i]->end_mb_y =
1198 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1201 if (init_duplicate_context(s) < 0)
1204 s->end_mb_y = s->mb_height;
1206 s->slice_context_count = nb_slices;
1211 ff_MPV_common_end(s);
1215 /* init common structure for both encoder and decoder */
1216 void ff_MPV_common_end(MpegEncContext *s)
1220 if (s->slice_context_count > 1) {
1221 for (i = 0; i < s->slice_context_count; i++) {
1222 free_duplicate_context(s->thread_context[i]);
1224 for (i = 1; i < s->slice_context_count; i++) {
1225 av_freep(&s->thread_context[i]);
1227 s->slice_context_count = 1;
1228 } else free_duplicate_context(s);
1230 av_freep(&s->parse_context.buffer);
1231 s->parse_context.buffer_size = 0;
1233 av_freep(&s->bitstream_buffer);
1234 s->allocated_bitstream_buffer_size = 0;
1237 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1238 ff_free_picture_tables(&s->picture[i]);
1239 ff_mpeg_unref_picture(s, &s->picture[i]);
1242 av_freep(&s->picture);
1243 ff_free_picture_tables(&s->last_picture);
1244 ff_mpeg_unref_picture(s, &s->last_picture);
1245 ff_free_picture_tables(&s->current_picture);
1246 ff_mpeg_unref_picture(s, &s->current_picture);
1247 ff_free_picture_tables(&s->next_picture);
1248 ff_mpeg_unref_picture(s, &s->next_picture);
1250 free_context_frame(s);
1252 s->context_initialized = 0;
1253 s->last_picture_ptr =
1254 s->next_picture_ptr =
1255 s->current_picture_ptr = NULL;
1256 s->linesize = s->uvlinesize = 0;
1259 av_cold void ff_init_rl(RLTable *rl,
1260 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1262 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1263 uint8_t index_run[MAX_RUN + 1];
1264 int last, run, level, start, end, i;
1266 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1267 if (static_store && rl->max_level[0])
1270 /* compute max_level[], max_run[] and index_run[] */
1271 for (last = 0; last < 2; last++) {
1280 memset(max_level, 0, MAX_RUN + 1);
1281 memset(max_run, 0, MAX_LEVEL + 1);
1282 memset(index_run, rl->n, MAX_RUN + 1);
1283 for (i = start; i < end; i++) {
1284 run = rl->table_run[i];
1285 level = rl->table_level[i];
1286 if (index_run[run] == rl->n)
1288 if (level > max_level[run])
1289 max_level[run] = level;
1290 if (run > max_run[level])
1291 max_run[level] = run;
1294 rl->max_level[last] = static_store[last];
1296 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1297 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1299 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1301 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1302 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1304 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1306 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1307 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1311 av_cold void ff_init_vlc_rl(RLTable *rl)
1315 for (q = 0; q < 32; q++) {
1317 int qadd = (q - 1) | 1;
1323 for (i = 0; i < rl->vlc.table_size; i++) {
1324 int code = rl->vlc.table[i][0];
1325 int len = rl->vlc.table[i][1];
1328 if (len == 0) { // illegal code
1331 } else if (len < 0) { // more bits needed
1335 if (code == rl->n) { // esc
1339 run = rl->table_run[code] + 1;
1340 level = rl->table_level[code] * qmul + qadd;
1341 if (code >= rl->last) run += 192;
1344 rl->rl_vlc[q][i].len = len;
1345 rl->rl_vlc[q][i].level = level;
1346 rl->rl_vlc[q][i].run = run;
1351 static void release_unused_pictures(MpegEncContext *s)
1355 /* release non reference frames */
1356 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1357 if (!s->picture[i].reference)
1358 ff_mpeg_unref_picture(s, &s->picture[i]);
1362 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1364 if (pic->f.buf[0] == NULL)
1366 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1371 static int find_unused_picture(MpegEncContext *s, int shared)
1376 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1377 if (s->picture[i].f.buf[0] == NULL)
1381 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1382 if (pic_is_unused(s, &s->picture[i]))
1387 return AVERROR_INVALIDDATA;
1390 int ff_find_unused_picture(MpegEncContext *s, int shared)
1392 int ret = find_unused_picture(s, shared);
1394 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1395 if (s->picture[ret].needs_realloc) {
1396 s->picture[ret].needs_realloc = 0;
1397 ff_free_picture_tables(&s->picture[ret]);
1398 ff_mpeg_unref_picture(s, &s->picture[ret]);
1399 avcodec_get_frame_defaults(&s->picture[ret].f);
1405 static void update_noise_reduction(MpegEncContext *s)
1409 for (intra = 0; intra < 2; intra++) {
1410 if (s->dct_count[intra] > (1 << 16)) {
1411 for (i = 0; i < 64; i++) {
1412 s->dct_error_sum[intra][i] >>= 1;
1414 s->dct_count[intra] >>= 1;
1417 for (i = 0; i < 64; i++) {
1418 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1419 s->dct_count[intra] +
1420 s->dct_error_sum[intra][i] / 2) /
1421 (s->dct_error_sum[intra][i] + 1);
1427 * generic function for encode/decode called after coding/decoding
1428 * the header and before a frame is coded/decoded.
1430 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1436 /* mark & release old frames */
1437 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1438 s->last_picture_ptr != s->next_picture_ptr &&
1439 s->last_picture_ptr->f.buf[0]) {
1440 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1443 /* release forgotten pictures */
1444 /* if (mpeg124/h263) */
1446 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1447 if (&s->picture[i] != s->last_picture_ptr &&
1448 &s->picture[i] != s->next_picture_ptr &&
1449 s->picture[i].reference && !s->picture[i].needs_realloc) {
1450 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1451 av_log(avctx, AV_LOG_ERROR,
1452 "releasing zombie picture\n");
1453 ff_mpeg_unref_picture(s, &s->picture[i]);
1458 ff_mpeg_unref_picture(s, &s->current_picture);
1461 release_unused_pictures(s);
1463 if (s->current_picture_ptr &&
1464 s->current_picture_ptr->f.buf[0] == NULL) {
1465 // we already have a unused image
1466 // (maybe it was set before reading the header)
1467 pic = s->current_picture_ptr;
1469 i = ff_find_unused_picture(s, 0);
1471 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1474 pic = &s->picture[i];
1478 if (!s->droppable) {
1479 if (s->pict_type != AV_PICTURE_TYPE_B)
1483 pic->f.coded_picture_number = s->coded_picture_number++;
1485 if (ff_alloc_picture(s, pic, 0) < 0)
1488 s->current_picture_ptr = pic;
1489 // FIXME use only the vars from current_pic
1490 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1491 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1492 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1493 if (s->picture_structure != PICT_FRAME)
1494 s->current_picture_ptr->f.top_field_first =
1495 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1497 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1498 !s->progressive_sequence;
1499 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1502 s->current_picture_ptr->f.pict_type = s->pict_type;
1503 // if (s->flags && CODEC_FLAG_QSCALE)
1504 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1505 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1507 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1508 s->current_picture_ptr)) < 0)
1511 if (s->pict_type != AV_PICTURE_TYPE_B) {
1512 s->last_picture_ptr = s->next_picture_ptr;
1514 s->next_picture_ptr = s->current_picture_ptr;
1516 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1517 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1518 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1519 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1520 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1521 s->pict_type, s->droppable);
1523 if ((s->last_picture_ptr == NULL ||
1524 s->last_picture_ptr->f.buf[0] == NULL) &&
1525 (s->pict_type != AV_PICTURE_TYPE_I ||
1526 s->picture_structure != PICT_FRAME)) {
1527 int h_chroma_shift, v_chroma_shift;
1528 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1529 &h_chroma_shift, &v_chroma_shift);
1530 if (s->pict_type != AV_PICTURE_TYPE_I)
1531 av_log(avctx, AV_LOG_ERROR,
1532 "warning: first frame is no keyframe\n");
1533 else if (s->picture_structure != PICT_FRAME)
1534 av_log(avctx, AV_LOG_INFO,
1535 "allocate dummy last picture for field based first keyframe\n");
1537 /* Allocate a dummy frame */
1538 i = ff_find_unused_picture(s, 0);
1540 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1543 s->last_picture_ptr = &s->picture[i];
1544 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1545 s->last_picture_ptr = NULL;
1549 memset(s->last_picture_ptr->f.data[0], 0,
1550 avctx->height * s->last_picture_ptr->f.linesize[0]);
1551 memset(s->last_picture_ptr->f.data[1], 0x80,
1552 (avctx->height >> v_chroma_shift) *
1553 s->last_picture_ptr->f.linesize[1]);
1554 memset(s->last_picture_ptr->f.data[2], 0x80,
1555 (avctx->height >> v_chroma_shift) *
1556 s->last_picture_ptr->f.linesize[2]);
1558 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1559 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1561 if ((s->next_picture_ptr == NULL ||
1562 s->next_picture_ptr->f.buf[0] == NULL) &&
1563 s->pict_type == AV_PICTURE_TYPE_B) {
1564 /* Allocate a dummy frame */
1565 i = ff_find_unused_picture(s, 0);
1567 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1570 s->next_picture_ptr = &s->picture[i];
1571 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1572 s->next_picture_ptr = NULL;
1575 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1576 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1579 if (s->last_picture_ptr) {
1580 ff_mpeg_unref_picture(s, &s->last_picture);
1581 if (s->last_picture_ptr->f.buf[0] &&
1582 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1583 s->last_picture_ptr)) < 0)
1586 if (s->next_picture_ptr) {
1587 ff_mpeg_unref_picture(s, &s->next_picture);
1588 if (s->next_picture_ptr->f.buf[0] &&
1589 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1590 s->next_picture_ptr)) < 0)
1594 if (s->pict_type != AV_PICTURE_TYPE_I &&
1595 !(s->last_picture_ptr && s->last_picture_ptr->f.buf[0])) {
1596 av_log(s, AV_LOG_ERROR,
1597 "Non-reference picture received and no reference available\n");
1598 return AVERROR_INVALIDDATA;
1601 if (s->picture_structure!= PICT_FRAME) {
1603 for (i = 0; i < 4; i++) {
1604 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1605 s->current_picture.f.data[i] +=
1606 s->current_picture.f.linesize[i];
1608 s->current_picture.f.linesize[i] *= 2;
1609 s->last_picture.f.linesize[i] *= 2;
1610 s->next_picture.f.linesize[i] *= 2;
1614 s->err_recognition = avctx->err_recognition;
1616 /* set dequantizer, we can't do it during init as
1617 * it might change for mpeg4 and we can't do it in the header
1618 * decode as init is not called for mpeg4 there yet */
1619 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1620 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1621 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1622 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1623 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1624 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1626 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1627 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1630 if (s->dct_error_sum) {
1631 assert(s->avctx->noise_reduction && s->encoding);
1632 update_noise_reduction(s);
1636 FF_DISABLE_DEPRECATION_WARNINGS
1637 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1638 return ff_xvmc_field_start(s, avctx);
1639 FF_ENABLE_DEPRECATION_WARNINGS
1640 #endif /* FF_API_XVMC */
1645 /* called after a frame has been decoded. */
1646 void ff_MPV_frame_end(MpegEncContext *s)
1649 FF_DISABLE_DEPRECATION_WARNINGS
1650 /* redraw edges for the frame if decoding didn't complete */
1651 // just to make sure that all data is rendered.
1652 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1653 ff_xvmc_field_end(s);
1655 FF_ENABLE_DEPRECATION_WARNINGS
1656 #endif /* FF_API_XVMC */
1657 if (s->er.error_count &&
1658 !s->avctx->hwaccel &&
1659 s->unrestricted_mv &&
1660 s->current_picture.reference &&
1662 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1663 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1664 int hshift = desc->log2_chroma_w;
1665 int vshift = desc->log2_chroma_h;
1666 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1667 s->h_edge_pos, s->v_edge_pos,
1668 EDGE_WIDTH, EDGE_WIDTH,
1669 EDGE_TOP | EDGE_BOTTOM);
1670 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1671 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1672 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1673 EDGE_TOP | EDGE_BOTTOM);
1674 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1675 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1676 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1677 EDGE_TOP | EDGE_BOTTOM);
1682 if (s->current_picture.reference)
1683 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1687 * Print debugging info for the given picture.
1689 void ff_print_debug_info(MpegEncContext *s, Picture *p)
1692 if (s->avctx->hwaccel || !p || !p->mb_type)
1696 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1699 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1700 switch (pict->pict_type) {
1701 case AV_PICTURE_TYPE_I:
1702 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1704 case AV_PICTURE_TYPE_P:
1705 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1707 case AV_PICTURE_TYPE_B:
1708 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1710 case AV_PICTURE_TYPE_S:
1711 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1713 case AV_PICTURE_TYPE_SI:
1714 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1716 case AV_PICTURE_TYPE_SP:
1717 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1720 for (y = 0; y < s->mb_height; y++) {
1721 for (x = 0; x < s->mb_width; x++) {
1722 if (s->avctx->debug & FF_DEBUG_SKIP) {
1723 int count = s->mbskip_table[x + y * s->mb_stride];
1726 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1728 if (s->avctx->debug & FF_DEBUG_QP) {
1729 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1730 p->qscale_table[x + y * s->mb_stride]);
1732 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1733 int mb_type = p->mb_type[x + y * s->mb_stride];
1734 // Type & MV direction
1735 if (IS_PCM(mb_type))
1736 av_log(s->avctx, AV_LOG_DEBUG, "P");
1737 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1738 av_log(s->avctx, AV_LOG_DEBUG, "A");
1739 else if (IS_INTRA4x4(mb_type))
1740 av_log(s->avctx, AV_LOG_DEBUG, "i");
1741 else if (IS_INTRA16x16(mb_type))
1742 av_log(s->avctx, AV_LOG_DEBUG, "I");
1743 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1744 av_log(s->avctx, AV_LOG_DEBUG, "d");
1745 else if (IS_DIRECT(mb_type))
1746 av_log(s->avctx, AV_LOG_DEBUG, "D");
1747 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1748 av_log(s->avctx, AV_LOG_DEBUG, "g");
1749 else if (IS_GMC(mb_type))
1750 av_log(s->avctx, AV_LOG_DEBUG, "G");
1751 else if (IS_SKIP(mb_type))
1752 av_log(s->avctx, AV_LOG_DEBUG, "S");
1753 else if (!USES_LIST(mb_type, 1))
1754 av_log(s->avctx, AV_LOG_DEBUG, ">");
1755 else if (!USES_LIST(mb_type, 0))
1756 av_log(s->avctx, AV_LOG_DEBUG, "<");
1758 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1759 av_log(s->avctx, AV_LOG_DEBUG, "X");
1763 if (IS_8X8(mb_type))
1764 av_log(s->avctx, AV_LOG_DEBUG, "+");
1765 else if (IS_16X8(mb_type))
1766 av_log(s->avctx, AV_LOG_DEBUG, "-");
1767 else if (IS_8X16(mb_type))
1768 av_log(s->avctx, AV_LOG_DEBUG, "|");
1769 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1770 av_log(s->avctx, AV_LOG_DEBUG, " ");
1772 av_log(s->avctx, AV_LOG_DEBUG, "?");
1775 if (IS_INTERLACED(mb_type))
1776 av_log(s->avctx, AV_LOG_DEBUG, "=");
1778 av_log(s->avctx, AV_LOG_DEBUG, " ");
1781 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1787 * find the lowest MB row referenced in the MVs
1789 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1791 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1792 int my, off, i, mvs;
1794 if (s->picture_structure != PICT_FRAME || s->mcsel)
1797 switch (s->mv_type) {
1811 for (i = 0; i < mvs; i++) {
1812 my = s->mv[dir][i][1]<<qpel_shift;
1813 my_max = FFMAX(my_max, my);
1814 my_min = FFMIN(my_min, my);
1817 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1819 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1821 return s->mb_height-1;
1824 /* put block[] to dest[] */
1825 static inline void put_dct(MpegEncContext *s,
1826 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1828 s->dct_unquantize_intra(s, block, i, qscale);
1829 s->dsp.idct_put (dest, line_size, block);
1832 /* add block[] to dest[] */
1833 static inline void add_dct(MpegEncContext *s,
1834 int16_t *block, int i, uint8_t *dest, int line_size)
1836 if (s->block_last_index[i] >= 0) {
1837 s->dsp.idct_add (dest, line_size, block);
1841 static inline void add_dequant_dct(MpegEncContext *s,
1842 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1844 if (s->block_last_index[i] >= 0) {
1845 s->dct_unquantize_inter(s, block, i, qscale);
1847 s->dsp.idct_add (dest, line_size, block);
1852 * Clean dc, ac, coded_block for the current non-intra MB.
1854 void ff_clean_intra_table_entries(MpegEncContext *s)
1856 int wrap = s->b8_stride;
1857 int xy = s->block_index[0];
1860 s->dc_val[0][xy + 1 ] =
1861 s->dc_val[0][xy + wrap] =
1862 s->dc_val[0][xy + 1 + wrap] = 1024;
1864 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1865 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1866 if (s->msmpeg4_version>=3) {
1867 s->coded_block[xy ] =
1868 s->coded_block[xy + 1 ] =
1869 s->coded_block[xy + wrap] =
1870 s->coded_block[xy + 1 + wrap] = 0;
1873 wrap = s->mb_stride;
1874 xy = s->mb_x + s->mb_y * wrap;
1876 s->dc_val[2][xy] = 1024;
1878 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1879 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1881 s->mbintra_table[xy]= 0;
1884 /* generic function called after a macroblock has been parsed by the
1885 decoder or after it has been encoded by the encoder.
1887 Important variables used:
1888 s->mb_intra : true if intra macroblock
1889 s->mv_dir : motion vector direction
1890 s->mv_type : motion vector type
1891 s->mv : motion vector
1892 s->interlaced_dct : true if interlaced dct used (mpeg2)
1894 static av_always_inline
1895 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
1898 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1901 FF_DISABLE_DEPRECATION_WARNINGS
1902 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1903 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1906 FF_ENABLE_DEPRECATION_WARNINGS
1907 #endif /* FF_API_XVMC */
1909 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1910 /* print DCT coefficients */
1912 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1914 for(j=0; j<64; j++){
1915 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
1917 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1921 s->current_picture.qscale_table[mb_xy] = s->qscale;
1923 /* update DC predictors for P macroblocks */
1925 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1926 if(s->mbintra_table[mb_xy])
1927 ff_clean_intra_table_entries(s);
1931 s->last_dc[2] = 128 << s->intra_dc_precision;
1934 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1935 s->mbintra_table[mb_xy]=1;
1937 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1938 uint8_t *dest_y, *dest_cb, *dest_cr;
1939 int dct_linesize, dct_offset;
1940 op_pixels_func (*op_pix)[4];
1941 qpel_mc_func (*op_qpix)[16];
1942 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
1943 const int uvlinesize = s->current_picture.f.linesize[1];
1944 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
1945 const int block_size = 8;
1947 /* avoid copy if macroblock skipped in last frame too */
1948 /* skip only during decoding as we might trash the buffers during encoding a bit */
1950 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1952 if (s->mb_skipped) {
1954 assert(s->pict_type!=AV_PICTURE_TYPE_I);
1956 } else if(!s->current_picture.reference) {
1959 *mbskip_ptr = 0; /* not skipped */
1963 dct_linesize = linesize << s->interlaced_dct;
1964 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
1968 dest_cb= s->dest[1];
1969 dest_cr= s->dest[2];
1971 dest_y = s->b_scratchpad;
1972 dest_cb= s->b_scratchpad+16*linesize;
1973 dest_cr= s->b_scratchpad+32*linesize;
1977 /* motion handling */
1978 /* decoding or more than one mb_type (MC was already done otherwise) */
1981 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
1982 if (s->mv_dir & MV_DIR_FORWARD) {
1983 ff_thread_await_progress(&s->last_picture_ptr->tf,
1984 ff_MPV_lowest_referenced_row(s, 0),
1987 if (s->mv_dir & MV_DIR_BACKWARD) {
1988 ff_thread_await_progress(&s->next_picture_ptr->tf,
1989 ff_MPV_lowest_referenced_row(s, 1),
1994 op_qpix= s->me.qpel_put;
1995 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
1996 op_pix = s->hdsp.put_pixels_tab;
1998 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2000 if (s->mv_dir & MV_DIR_FORWARD) {
2001 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2002 op_pix = s->hdsp.avg_pixels_tab;
2003 op_qpix= s->me.qpel_avg;
2005 if (s->mv_dir & MV_DIR_BACKWARD) {
2006 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2010 /* skip dequant / idct if we are really late ;) */
2011 if(s->avctx->skip_idct){
2012 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2013 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2014 || s->avctx->skip_idct >= AVDISCARD_ALL)
2018 /* add dct residue */
2019 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2020 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2021 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2022 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2023 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2024 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2026 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2027 if (s->chroma_y_shift){
2028 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2029 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2033 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2034 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2035 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2036 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2039 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2040 add_dct(s, block[0], 0, dest_y , dct_linesize);
2041 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2042 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2043 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2045 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2046 if(s->chroma_y_shift){//Chroma420
2047 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2048 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2051 dct_linesize = uvlinesize << s->interlaced_dct;
2052 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2054 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2055 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2056 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2057 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2058 if(!s->chroma_x_shift){//Chroma444
2059 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2060 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2061 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2062 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2067 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2068 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2071 /* dct only in intra block */
2072 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2073 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2074 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2075 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2076 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2078 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2079 if(s->chroma_y_shift){
2080 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2081 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2085 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2086 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2087 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2088 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2092 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2093 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2094 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2095 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2097 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2098 if(s->chroma_y_shift){
2099 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2100 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2103 dct_linesize = uvlinesize << s->interlaced_dct;
2104 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2106 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2107 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2108 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2109 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2110 if(!s->chroma_x_shift){//Chroma444
2111 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2112 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2113 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2114 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2122 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2123 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2124 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2129 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2131 if(s->out_format == FMT_MPEG1) {
2132 MPV_decode_mb_internal(s, block, 1);
2135 MPV_decode_mb_internal(s, block, 0);
2139 * @param h is the normal height, this will be reduced automatically if needed for the last row
2141 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2142 Picture *last, int y, int h, int picture_structure,
2143 int first_field, int draw_edges, int low_delay,
2144 int v_edge_pos, int h_edge_pos)
2146 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2147 int hshift = desc->log2_chroma_w;
2148 int vshift = desc->log2_chroma_h;
2149 const int field_pic = picture_structure != PICT_FRAME;
2155 if (!avctx->hwaccel &&
2158 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2159 int *linesize = cur->f.linesize;
2160 int sides = 0, edge_h;
2161 if (y==0) sides |= EDGE_TOP;
2162 if (y + h >= v_edge_pos)
2163 sides |= EDGE_BOTTOM;
2165 edge_h= FFMIN(h, v_edge_pos - y);
2167 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2168 linesize[0], h_edge_pos, edge_h,
2169 EDGE_WIDTH, EDGE_WIDTH, sides);
2170 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2171 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2172 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2173 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2174 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2175 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2178 h = FFMIN(h, avctx->height - y);
2180 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2182 if (avctx->draw_horiz_band) {
2184 int offset[AV_NUM_DATA_POINTERS];
2187 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2188 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2195 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2196 picture_structure == PICT_FRAME &&
2197 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2198 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2201 offset[0]= y * src->linesize[0];
2203 offset[2]= (y >> vshift) * src->linesize[1];
2204 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2210 avctx->draw_horiz_band(avctx, src, offset,
2211 y, picture_structure, h);
2215 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2217 int draw_edges = s->unrestricted_mv && !s->intra_only;
2218 ff_draw_horiz_band(s->avctx, &s->dsp, &s->current_picture,
2219 &s->last_picture, y, h, s->picture_structure,
2220 s->first_field, draw_edges, s->low_delay,
2221 s->v_edge_pos, s->h_edge_pos);
2224 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2225 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2226 const int uvlinesize = s->current_picture.f.linesize[1];
2227 const int mb_size= 4;
2229 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2230 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2231 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2232 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2233 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2234 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2235 //block_index is not used by mpeg2, so it is not affected by chroma_format
2237 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2238 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2239 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2241 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2243 if(s->picture_structure==PICT_FRAME){
2244 s->dest[0] += s->mb_y * linesize << mb_size;
2245 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2246 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2248 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2249 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2250 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2251 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2257 * Permute an 8x8 block.
2258 * @param block the block which will be permuted according to the given permutation vector
2259 * @param permutation the permutation vector
2260 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2261 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2262 * (inverse) permutated to scantable order!
2264 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2270 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2272 for(i=0; i<=last; i++){
2273 const int j= scantable[i];
2278 for(i=0; i<=last; i++){
2279 const int j= scantable[i];
2280 const int perm_j= permutation[j];
2281 block[perm_j]= temp[j];
2285 void ff_mpeg_flush(AVCodecContext *avctx){
2287 MpegEncContext *s = avctx->priv_data;
2289 if(s==NULL || s->picture==NULL)
2292 for (i = 0; i < MAX_PICTURE_COUNT; i++)
2293 ff_mpeg_unref_picture(s, &s->picture[i]);
2294 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2296 ff_mpeg_unref_picture(s, &s->current_picture);
2297 ff_mpeg_unref_picture(s, &s->last_picture);
2298 ff_mpeg_unref_picture(s, &s->next_picture);
2300 s->mb_x= s->mb_y= 0;
2302 s->parse_context.state= -1;
2303 s->parse_context.frame_start_found= 0;
2304 s->parse_context.overread= 0;
2305 s->parse_context.overread_index= 0;
2306 s->parse_context.index= 0;
2307 s->parse_context.last_index= 0;
2308 s->bitstream_buffer_size=0;
2312 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2313 int16_t *block, int n, int qscale)
2315 int i, level, nCoeffs;
2316 const uint16_t *quant_matrix;
2318 nCoeffs= s->block_last_index[n];
2321 block[0] = block[0] * s->y_dc_scale;
2323 block[0] = block[0] * s->c_dc_scale;
2324 /* XXX: only mpeg1 */
2325 quant_matrix = s->intra_matrix;
2326 for(i=1;i<=nCoeffs;i++) {
2327 int j= s->intra_scantable.permutated[i];
2332 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2333 level = (level - 1) | 1;
2336 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2337 level = (level - 1) | 1;
2344 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2345 int16_t *block, int n, int qscale)
2347 int i, level, nCoeffs;
2348 const uint16_t *quant_matrix;
2350 nCoeffs= s->block_last_index[n];
2352 quant_matrix = s->inter_matrix;
2353 for(i=0; i<=nCoeffs; i++) {
2354 int j= s->intra_scantable.permutated[i];
2359 level = (((level << 1) + 1) * qscale *
2360 ((int) (quant_matrix[j]))) >> 4;
2361 level = (level - 1) | 1;
2364 level = (((level << 1) + 1) * qscale *
2365 ((int) (quant_matrix[j]))) >> 4;
2366 level = (level - 1) | 1;
2373 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2374 int16_t *block, int n, int qscale)
2376 int i, level, nCoeffs;
2377 const uint16_t *quant_matrix;
2379 if(s->alternate_scan) nCoeffs= 63;
2380 else nCoeffs= s->block_last_index[n];
2383 block[0] = block[0] * s->y_dc_scale;
2385 block[0] = block[0] * s->c_dc_scale;
2386 quant_matrix = s->intra_matrix;
2387 for(i=1;i<=nCoeffs;i++) {
2388 int j= s->intra_scantable.permutated[i];
2393 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2396 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2403 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2404 int16_t *block, int n, int qscale)
2406 int i, level, nCoeffs;
2407 const uint16_t *quant_matrix;
2410 if(s->alternate_scan) nCoeffs= 63;
2411 else nCoeffs= s->block_last_index[n];
2414 block[0] = block[0] * s->y_dc_scale;
2416 block[0] = block[0] * s->c_dc_scale;
2417 quant_matrix = s->intra_matrix;
2418 for(i=1;i<=nCoeffs;i++) {
2419 int j= s->intra_scantable.permutated[i];
2424 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2427 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2436 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2437 int16_t *block, int n, int qscale)
2439 int i, level, nCoeffs;
2440 const uint16_t *quant_matrix;
2443 if(s->alternate_scan) nCoeffs= 63;
2444 else nCoeffs= s->block_last_index[n];
2446 quant_matrix = s->inter_matrix;
2447 for(i=0; i<=nCoeffs; i++) {
2448 int j= s->intra_scantable.permutated[i];
2453 level = (((level << 1) + 1) * qscale *
2454 ((int) (quant_matrix[j]))) >> 4;
2457 level = (((level << 1) + 1) * qscale *
2458 ((int) (quant_matrix[j]))) >> 4;
2467 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2468 int16_t *block, int n, int qscale)
2470 int i, level, qmul, qadd;
2473 assert(s->block_last_index[n]>=0);
2479 block[0] = block[0] * s->y_dc_scale;
2481 block[0] = block[0] * s->c_dc_scale;
2482 qadd = (qscale - 1) | 1;
2489 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2491 for(i=1; i<=nCoeffs; i++) {
2495 level = level * qmul - qadd;
2497 level = level * qmul + qadd;
2504 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2505 int16_t *block, int n, int qscale)
2507 int i, level, qmul, qadd;
2510 assert(s->block_last_index[n]>=0);
2512 qadd = (qscale - 1) | 1;
2515 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2517 for(i=0; i<=nCoeffs; i++) {
2521 level = level * qmul - qadd;
2523 level = level * qmul + qadd;
2531 * set qscale and update qscale dependent variables.
2533 void ff_set_qscale(MpegEncContext * s, int qscale)
2537 else if (qscale > 31)
2541 s->chroma_qscale= s->chroma_qscale_table[qscale];
2543 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2544 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2547 void ff_MPV_report_decode_progress(MpegEncContext *s)
2549 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2550 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2553 #if CONFIG_ERROR_RESILIENCE
2554 void ff_mpeg_er_frame_start(MpegEncContext *s)
2556 ERContext *er = &s->er;
2558 er->cur_pic = s->current_picture_ptr;
2559 er->last_pic = s->last_picture_ptr;
2560 er->next_pic = s->next_picture_ptr;
2562 er->pp_time = s->pp_time;
2563 er->pb_time = s->pb_time;
2564 er->quarter_sample = s->quarter_sample;
2565 er->partitioned_frame = s->partitioned_frame;
2567 ff_er_frame_start(er);
2569 #endif /* CONFIG_ERROR_RESILIENCE */