2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
35 #include "h264chroma.h"
38 #include "mpegvideo.h"
41 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
60 static const uint8_t ff_default_chroma_qscale_table[32] = {
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
66 const uint8_t ff_mpeg1_dc_scale_table[128] = {
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
115 ff_mpeg1_dc_scale_table,
116 mpeg2_dc_scale_table1,
117 mpeg2_dc_scale_table2,
118 mpeg2_dc_scale_table3,
121 const enum AVPixelFormat ff_pixfmt_list_420[] = {
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
128 int mb_x, int mb_y, int mb_intra, int mb_skipped)
130 MpegEncContext *s = opaque;
133 s->mv_type = mv_type;
134 s->mb_intra = mb_intra;
135 s->mb_skipped = mb_skipped;
138 memcpy(s->mv, mv, sizeof(*mv));
140 ff_init_block_index(s);
141 ff_update_block_index(s);
143 s->dsp.clear_blocks(s->block[0]);
145 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
150 av_log(s->avctx, AV_LOG_DEBUG, "Interlaced error concealment is not fully implemented\n");
151 ff_MPV_decode_mb(s, s->block);
154 /* init common dct for both encoder and decoder */
155 av_cold int ff_dct_common_init(MpegEncContext *s)
157 ff_dsputil_init(&s->dsp, s->avctx);
158 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
159 ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
160 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
162 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
163 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
164 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
165 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
166 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
167 if (s->flags & CODEC_FLAG_BITEXACT)
168 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
169 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
172 ff_MPV_common_init_axp(s);
174 ff_MPV_common_init_arm(s);
176 ff_MPV_common_init_bfin(s);
178 ff_MPV_common_init_ppc(s);
180 ff_MPV_common_init_x86(s);
182 /* load & permutate scantables
183 * note: only wmv uses different ones
185 if (s->alternate_scan) {
186 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
187 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
198 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
200 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
202 // edge emu needs blocksize + filter length - 1
203 // (= 17x17 for halfpel / 21x21 for h264)
204 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
205 // at uvlinesize. It supports only YUV420 so 24x24 is enough
206 // linesize * interlaced * MBsize
207 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
210 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
212 s->me.temp = s->me.scratchpad;
213 s->rd_scratchpad = s->me.scratchpad;
214 s->b_scratchpad = s->me.scratchpad;
215 s->obmc_scratchpad = s->me.scratchpad + 16;
219 av_freep(&s->edge_emu_buffer);
220 return AVERROR(ENOMEM);
224 * Allocate a frame buffer
226 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
231 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
233 s->codec_id != AV_CODEC_ID_MSS2)
234 r = ff_thread_get_buffer(s->avctx, &pic->tf,
235 pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
237 pic->f.width = s->avctx->width;
238 pic->f.height = s->avctx->height;
239 pic->f.format = s->avctx->pix_fmt;
240 r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
243 if (r < 0 || !pic->f.data[0]) {
244 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
249 if (s->avctx->hwaccel) {
250 assert(!pic->hwaccel_picture_private);
251 if (s->avctx->hwaccel->priv_data_size) {
252 pic->hwaccel_priv_buf = av_buffer_allocz(s->avctx->hwaccel->priv_data_size);
253 if (!pic->hwaccel_priv_buf) {
254 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
257 pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
261 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
262 s->uvlinesize != pic->f.linesize[1])) {
263 av_log(s->avctx, AV_LOG_ERROR,
264 "get_buffer() failed (stride changed)\n");
265 ff_mpeg_unref_picture(s, pic);
269 if (pic->f.linesize[1] != pic->f.linesize[2]) {
270 av_log(s->avctx, AV_LOG_ERROR,
271 "get_buffer() failed (uv stride mismatch)\n");
272 ff_mpeg_unref_picture(s, pic);
276 if (!s->edge_emu_buffer &&
277 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
278 av_log(s->avctx, AV_LOG_ERROR,
279 "get_buffer() failed to allocate context scratch buffers.\n");
280 ff_mpeg_unref_picture(s, pic);
287 static void free_picture_tables(Picture *pic)
291 pic->alloc_mb_width =
292 pic->alloc_mb_height = 0;
294 av_buffer_unref(&pic->mb_var_buf);
295 av_buffer_unref(&pic->mc_mb_var_buf);
296 av_buffer_unref(&pic->mb_mean_buf);
297 av_buffer_unref(&pic->mbskip_table_buf);
298 av_buffer_unref(&pic->qscale_table_buf);
299 av_buffer_unref(&pic->mb_type_buf);
301 for (i = 0; i < 2; i++) {
302 av_buffer_unref(&pic->motion_val_buf[i]);
303 av_buffer_unref(&pic->ref_index_buf[i]);
307 static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
309 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
310 const int mb_array_size = s->mb_stride * s->mb_height;
311 const int b8_array_size = s->b8_stride * s->mb_height * 2;
315 pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
316 pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
317 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
319 if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
320 return AVERROR(ENOMEM);
323 pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
324 pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325 pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
326 if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
327 return AVERROR(ENOMEM);
330 if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv) {
331 int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
332 int ref_index_size = 4 * mb_array_size;
334 for (i = 0; mv_size && i < 2; i++) {
335 pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
336 pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
337 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
338 return AVERROR(ENOMEM);
342 pic->alloc_mb_width = s->mb_width;
343 pic->alloc_mb_height = s->mb_height;
348 static int make_tables_writable(Picture *pic)
351 #define MAKE_WRITABLE(table) \
354 (ret = av_buffer_make_writable(&pic->table)) < 0)\
358 MAKE_WRITABLE(mb_var_buf);
359 MAKE_WRITABLE(mc_mb_var_buf);
360 MAKE_WRITABLE(mb_mean_buf);
361 MAKE_WRITABLE(mbskip_table_buf);
362 MAKE_WRITABLE(qscale_table_buf);
363 MAKE_WRITABLE(mb_type_buf);
365 for (i = 0; i < 2; i++) {
366 MAKE_WRITABLE(motion_val_buf[i]);
367 MAKE_WRITABLE(ref_index_buf[i]);
374 * Allocate a Picture.
375 * The pixels are allocated/set by calling get_buffer() if shared = 0
377 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
381 if (pic->qscale_table_buf)
382 if ( pic->alloc_mb_width != s->mb_width
383 || pic->alloc_mb_height != s->mb_height)
384 free_picture_tables(pic);
387 av_assert0(pic->f.data[0]);
390 av_assert0(!pic->f.data[0]);
392 if (alloc_frame_buffer(s, pic) < 0)
395 s->linesize = pic->f.linesize[0];
396 s->uvlinesize = pic->f.linesize[1];
399 if (!pic->qscale_table_buf)
400 ret = alloc_picture_tables(s, pic);
402 ret = make_tables_writable(pic);
407 pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
408 pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
409 pic->mb_mean = pic->mb_mean_buf->data;
412 pic->mbskip_table = pic->mbskip_table_buf->data;
413 pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
414 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
416 if (pic->motion_val_buf[0]) {
417 for (i = 0; i < 2; i++) {
418 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
419 pic->ref_index[i] = pic->ref_index_buf[i]->data;
425 av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
426 ff_mpeg_unref_picture(s, pic);
427 free_picture_tables(pic);
428 return AVERROR(ENOMEM);
432 * Deallocate a picture.
434 void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
436 int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
439 /* WM Image / Screen codecs allocate internal buffers with different
440 * dimensions / colorspaces; ignore user-defined callbacks for these. */
441 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
442 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
443 s->codec_id != AV_CODEC_ID_MSS2)
444 ff_thread_release_buffer(s->avctx, &pic->tf);
446 av_frame_unref(&pic->f);
448 av_buffer_unref(&pic->hwaccel_priv_buf);
450 if (pic->needs_realloc)
451 free_picture_tables(pic);
453 memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
456 static int update_picture_tables(Picture *dst, Picture *src)
460 #define UPDATE_TABLE(table)\
463 (!dst->table || dst->table->buffer != src->table->buffer)) {\
464 av_buffer_unref(&dst->table);\
465 dst->table = av_buffer_ref(src->table);\
467 free_picture_tables(dst);\
468 return AVERROR(ENOMEM);\
473 UPDATE_TABLE(mb_var_buf);
474 UPDATE_TABLE(mc_mb_var_buf);
475 UPDATE_TABLE(mb_mean_buf);
476 UPDATE_TABLE(mbskip_table_buf);
477 UPDATE_TABLE(qscale_table_buf);
478 UPDATE_TABLE(mb_type_buf);
479 for (i = 0; i < 2; i++) {
480 UPDATE_TABLE(motion_val_buf[i]);
481 UPDATE_TABLE(ref_index_buf[i]);
484 dst->mb_var = src->mb_var;
485 dst->mc_mb_var = src->mc_mb_var;
486 dst->mb_mean = src->mb_mean;
487 dst->mbskip_table = src->mbskip_table;
488 dst->qscale_table = src->qscale_table;
489 dst->mb_type = src->mb_type;
490 for (i = 0; i < 2; i++) {
491 dst->motion_val[i] = src->motion_val[i];
492 dst->ref_index[i] = src->ref_index[i];
495 dst->alloc_mb_width = src->alloc_mb_width;
496 dst->alloc_mb_height = src->alloc_mb_height;
501 int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
505 av_assert0(!dst->f.buf[0]);
506 av_assert0(src->f.buf[0]);
510 ret = ff_thread_ref_frame(&dst->tf, &src->tf);
514 ret = update_picture_tables(dst, src);
518 if (src->hwaccel_picture_private) {
519 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
520 if (!dst->hwaccel_priv_buf)
522 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
525 dst->field_picture = src->field_picture;
526 dst->mb_var_sum = src->mb_var_sum;
527 dst->mc_mb_var_sum = src->mc_mb_var_sum;
528 dst->b_frame_score = src->b_frame_score;
529 dst->needs_realloc = src->needs_realloc;
530 dst->reference = src->reference;
531 dst->shared = src->shared;
535 ff_mpeg_unref_picture(s, dst);
539 static int init_duplicate_context(MpegEncContext *s)
541 int y_size = s->b8_stride * (2 * s->mb_height + 1);
542 int c_size = s->mb_stride * (s->mb_height + 1);
543 int yc_size = y_size + 2 * c_size;
551 s->obmc_scratchpad = NULL;
554 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
555 ME_MAP_SIZE * sizeof(uint32_t), fail)
556 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
557 ME_MAP_SIZE * sizeof(uint32_t), fail)
558 if (s->avctx->noise_reduction) {
559 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
560 2 * 64 * sizeof(int), fail)
563 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
564 s->block = s->blocks[0];
566 for (i = 0; i < 12; i++) {
567 s->pblocks[i] = &s->block[i];
570 if (s->out_format == FMT_H263) {
572 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
573 yc_size * sizeof(int16_t) * 16, fail);
574 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
575 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
576 s->ac_val[2] = s->ac_val[1] + c_size;
581 return -1; // free() through ff_MPV_common_end()
584 static void free_duplicate_context(MpegEncContext *s)
589 av_freep(&s->edge_emu_buffer);
590 av_freep(&s->me.scratchpad);
594 s->obmc_scratchpad = NULL;
596 av_freep(&s->dct_error_sum);
597 av_freep(&s->me.map);
598 av_freep(&s->me.score_map);
599 av_freep(&s->blocks);
600 av_freep(&s->ac_val_base);
604 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
606 #define COPY(a) bak->a = src->a
607 COPY(edge_emu_buffer);
612 COPY(obmc_scratchpad);
619 COPY(me.map_generation);
631 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
635 // FIXME copy only needed parts
637 backup_duplicate_context(&bak, dst);
638 memcpy(dst, src, sizeof(MpegEncContext));
639 backup_duplicate_context(dst, &bak);
640 for (i = 0; i < 12; i++) {
641 dst->pblocks[i] = &dst->block[i];
643 if (!dst->edge_emu_buffer &&
644 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
645 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
646 "scratch buffers.\n");
649 // STOP_TIMER("update_duplicate_context")
650 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
654 int ff_mpeg_update_thread_context(AVCodecContext *dst,
655 const AVCodecContext *src)
658 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
665 // FIXME can parameters change on I-frames?
666 // in that case dst may need a reinit
667 if (!s->context_initialized) {
668 memcpy(s, s1, sizeof(MpegEncContext));
671 s->bitstream_buffer = NULL;
672 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
674 if (s1->context_initialized){
675 // s->picture_range_start += MAX_PICTURE_COUNT;
676 // s->picture_range_end += MAX_PICTURE_COUNT;
677 if((ret = ff_MPV_common_init(s)) < 0){
678 memset(s, 0, sizeof(MpegEncContext));
685 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
686 s->context_reinit = 0;
687 s->height = s1->height;
688 s->width = s1->width;
689 if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
693 s->avctx->coded_height = s1->avctx->coded_height;
694 s->avctx->coded_width = s1->avctx->coded_width;
695 s->avctx->width = s1->avctx->width;
696 s->avctx->height = s1->avctx->height;
698 s->coded_picture_number = s1->coded_picture_number;
699 s->picture_number = s1->picture_number;
700 s->input_picture_number = s1->input_picture_number;
702 av_assert0(!s->picture || s->picture != s1->picture);
704 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
705 ff_mpeg_unref_picture(s, &s->picture[i]);
706 if (s1->picture[i].f.data[0] &&
707 (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
711 #define UPDATE_PICTURE(pic)\
713 ff_mpeg_unref_picture(s, &s->pic);\
714 if (s1->pic.f.data[0])\
715 ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
717 ret = update_picture_tables(&s->pic, &s1->pic);\
722 UPDATE_PICTURE(current_picture);
723 UPDATE_PICTURE(last_picture);
724 UPDATE_PICTURE(next_picture);
726 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
727 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
728 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
730 // Error/bug resilience
731 s->next_p_frame_damaged = s1->next_p_frame_damaged;
732 s->workaround_bugs = s1->workaround_bugs;
733 s->padding_bug_score = s1->padding_bug_score;
736 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
737 (char *) &s1->shape - (char *) &s1->time_increment_bits);
740 s->max_b_frames = s1->max_b_frames;
741 s->low_delay = s1->low_delay;
742 s->droppable = s1->droppable;
744 // DivX handling (doesn't work)
745 s->divx_packed = s1->divx_packed;
747 if (s1->bitstream_buffer) {
748 if (s1->bitstream_buffer_size +
749 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
750 av_fast_malloc(&s->bitstream_buffer,
751 &s->allocated_bitstream_buffer_size,
752 s1->allocated_bitstream_buffer_size);
753 s->bitstream_buffer_size = s1->bitstream_buffer_size;
754 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
755 s1->bitstream_buffer_size);
756 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
757 FF_INPUT_BUFFER_PADDING_SIZE);
760 // linesize dependend scratch buffer allocation
761 if (!s->edge_emu_buffer)
763 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
764 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
765 "scratch buffers.\n");
766 return AVERROR(ENOMEM);
769 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
770 "be allocated due to unknown size.\n");
773 // MPEG2/interlacing info
774 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
775 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
777 if (!s1->first_field) {
778 s->last_pict_type = s1->pict_type;
779 if (s1->current_picture_ptr)
780 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
782 if (s1->pict_type != AV_PICTURE_TYPE_B) {
783 s->last_non_b_pict_type = s1->pict_type;
791 * Set the given MpegEncContext to common defaults
792 * (same for encoding and decoding).
793 * The changed fields will not depend upon the
794 * prior state of the MpegEncContext.
796 void ff_MPV_common_defaults(MpegEncContext *s)
798 s->y_dc_scale_table =
799 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
800 s->chroma_qscale_table = ff_default_chroma_qscale_table;
801 s->progressive_frame = 1;
802 s->progressive_sequence = 1;
803 s->picture_structure = PICT_FRAME;
805 s->coded_picture_number = 0;
806 s->picture_number = 0;
807 s->input_picture_number = 0;
809 s->picture_in_gop_number = 0;
814 s->slice_context_count = 1;
818 * Set the given MpegEncContext to defaults for decoding.
819 * the changed fields will not depend upon
820 * the prior state of the MpegEncContext.
822 void ff_MPV_decode_defaults(MpegEncContext *s)
824 ff_MPV_common_defaults(s);
827 static int init_er(MpegEncContext *s)
829 ERContext *er = &s->er;
830 int mb_array_size = s->mb_height * s->mb_stride;
833 er->avctx = s->avctx;
836 er->mb_index2xy = s->mb_index2xy;
837 er->mb_num = s->mb_num;
838 er->mb_width = s->mb_width;
839 er->mb_height = s->mb_height;
840 er->mb_stride = s->mb_stride;
841 er->b8_stride = s->b8_stride;
843 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
844 er->error_status_table = av_mallocz(mb_array_size);
845 if (!er->er_temp_buffer || !er->error_status_table)
848 er->mbskip_table = s->mbskip_table;
849 er->mbintra_table = s->mbintra_table;
851 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
852 er->dc_val[i] = s->dc_val[i];
854 er->decode_mb = mpeg_er_decode_mb;
859 av_freep(&er->er_temp_buffer);
860 av_freep(&er->error_status_table);
861 return AVERROR(ENOMEM);
865 * Initialize and allocates MpegEncContext fields dependent on the resolution.
867 static int init_context_frame(MpegEncContext *s)
869 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
871 s->mb_width = (s->width + 15) / 16;
872 s->mb_stride = s->mb_width + 1;
873 s->b8_stride = s->mb_width * 2 + 1;
874 s->b4_stride = s->mb_width * 4 + 1;
875 mb_array_size = s->mb_height * s->mb_stride;
876 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
878 /* set default edge pos, will be overriden
879 * in decode_header if needed */
880 s->h_edge_pos = s->mb_width * 16;
881 s->v_edge_pos = s->mb_height * 16;
883 s->mb_num = s->mb_width * s->mb_height;
888 s->block_wrap[3] = s->b8_stride;
890 s->block_wrap[5] = s->mb_stride;
892 y_size = s->b8_stride * (2 * s->mb_height + 1);
893 c_size = s->mb_stride * (s->mb_height + 1);
894 yc_size = y_size + 2 * c_size;
896 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
897 for (y = 0; y < s->mb_height; y++)
898 for (x = 0; x < s->mb_width; x++)
899 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
901 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
904 /* Allocate MV tables */
905 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
907 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
909 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
910 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
911 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
912 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
913 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
914 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
915 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
916 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
918 /* Allocate MB type table */
919 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
921 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
923 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
924 mb_array_size * sizeof(float), fail);
925 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
926 mb_array_size * sizeof(float), fail);
930 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
931 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
932 /* interlaced direct mode decoding tables */
933 for (i = 0; i < 2; i++) {
935 for (j = 0; j < 2; j++) {
936 for (k = 0; k < 2; k++) {
937 FF_ALLOCZ_OR_GOTO(s->avctx,
938 s->b_field_mv_table_base[i][j][k],
939 mv_table_size * 2 * sizeof(int16_t),
941 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
944 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
946 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
948 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
951 if (s->out_format == FMT_H263) {
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
954 s->coded_block = s->coded_block_base + s->b8_stride + 1;
956 /* cbp, ac_pred, pred_dir */
957 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
958 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
961 if (s->h263_pred || s->h263_plus || !s->encoding) {
963 // MN: we need these for error resilience of intra-frames
964 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
965 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
966 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
967 s->dc_val[2] = s->dc_val[1] + c_size;
968 for (i = 0; i < yc_size; i++)
969 s->dc_val_base[i] = 1024;
972 /* which mb is a intra block */
973 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
974 memset(s->mbintra_table, 1, mb_array_size);
976 /* init macroblock skip table */
977 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
978 // Note the + 1 is for a quicker mpeg4 slice_end detection
982 return AVERROR(ENOMEM);
986 * init common structure for both encoder and decoder.
987 * this assumes that some variables like width/height are already set
989 av_cold int ff_MPV_common_init(MpegEncContext *s)
992 int nb_slices = (HAVE_THREADS &&
993 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
994 s->avctx->thread_count : 1;
996 if (s->encoding && s->avctx->slices)
997 nb_slices = s->avctx->slices;
999 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1000 s->mb_height = (s->height + 31) / 32 * 2;
1002 s->mb_height = (s->height + 15) / 16;
1004 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1005 av_log(s->avctx, AV_LOG_ERROR,
1006 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1010 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1013 max_slices = FFMIN(MAX_THREADS, s->mb_height);
1015 max_slices = MAX_THREADS;
1016 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1017 " reducing to %d\n", nb_slices, max_slices);
1018 nb_slices = max_slices;
1021 if ((s->width || s->height) &&
1022 av_image_check_size(s->width, s->height, 0, s->avctx))
1025 ff_dct_common_init(s);
1027 s->flags = s->avctx->flags;
1028 s->flags2 = s->avctx->flags2;
1030 /* set chroma shifts */
1031 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1033 &s->chroma_y_shift);
1035 /* convert fourcc to upper case */
1036 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
1037 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
1039 s->avctx->coded_frame = &s->current_picture.f;
1042 if (s->msmpeg4_version) {
1043 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1044 2 * 2 * (MAX_LEVEL + 1) *
1045 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1047 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1049 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1050 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1051 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1052 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1053 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1054 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1055 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1056 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
1058 if (s->avctx->noise_reduction) {
1059 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1063 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1064 MAX_PICTURE_COUNT * sizeof(Picture), fail);
1065 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1066 avcodec_get_frame_defaults(&s->picture[i].f);
1068 memset(&s->next_picture, 0, sizeof(s->next_picture));
1069 memset(&s->last_picture, 0, sizeof(s->last_picture));
1070 memset(&s->current_picture, 0, sizeof(s->current_picture));
1071 avcodec_get_frame_defaults(&s->next_picture.f);
1072 avcodec_get_frame_defaults(&s->last_picture.f);
1073 avcodec_get_frame_defaults(&s->current_picture.f);
1075 if (init_context_frame(s))
1078 s->parse_context.state = -1;
1080 s->context_initialized = 1;
1081 s->thread_context[0] = s;
1083 // if (s->width && s->height) {
1084 if (nb_slices > 1) {
1085 for (i = 1; i < nb_slices; i++) {
1086 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1087 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1090 for (i = 0; i < nb_slices; i++) {
1091 if (init_duplicate_context(s->thread_context[i]) < 0)
1093 s->thread_context[i]->start_mb_y =
1094 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1095 s->thread_context[i]->end_mb_y =
1096 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1099 if (init_duplicate_context(s) < 0)
1102 s->end_mb_y = s->mb_height;
1104 s->slice_context_count = nb_slices;
1109 ff_MPV_common_end(s);
1114 * Frees and resets MpegEncContext fields depending on the resolution.
1115 * Is used during resolution changes to avoid a full reinitialization of the
1118 static int free_context_frame(MpegEncContext *s)
1122 av_freep(&s->mb_type);
1123 av_freep(&s->p_mv_table_base);
1124 av_freep(&s->b_forw_mv_table_base);
1125 av_freep(&s->b_back_mv_table_base);
1126 av_freep(&s->b_bidir_forw_mv_table_base);
1127 av_freep(&s->b_bidir_back_mv_table_base);
1128 av_freep(&s->b_direct_mv_table_base);
1129 s->p_mv_table = NULL;
1130 s->b_forw_mv_table = NULL;
1131 s->b_back_mv_table = NULL;
1132 s->b_bidir_forw_mv_table = NULL;
1133 s->b_bidir_back_mv_table = NULL;
1134 s->b_direct_mv_table = NULL;
1135 for (i = 0; i < 2; i++) {
1136 for (j = 0; j < 2; j++) {
1137 for (k = 0; k < 2; k++) {
1138 av_freep(&s->b_field_mv_table_base[i][j][k]);
1139 s->b_field_mv_table[i][j][k] = NULL;
1141 av_freep(&s->b_field_select_table[i][j]);
1142 av_freep(&s->p_field_mv_table_base[i][j]);
1143 s->p_field_mv_table[i][j] = NULL;
1145 av_freep(&s->p_field_select_table[i]);
1148 av_freep(&s->dc_val_base);
1149 av_freep(&s->coded_block_base);
1150 av_freep(&s->mbintra_table);
1151 av_freep(&s->cbp_table);
1152 av_freep(&s->pred_dir_table);
1154 av_freep(&s->mbskip_table);
1156 av_freep(&s->er.error_status_table);
1157 av_freep(&s->er.er_temp_buffer);
1158 av_freep(&s->mb_index2xy);
1159 av_freep(&s->lambda_table);
1161 av_freep(&s->cplx_tab);
1162 av_freep(&s->bits_tab);
1164 s->linesize = s->uvlinesize = 0;
1169 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1173 if (s->slice_context_count > 1) {
1174 for (i = 0; i < s->slice_context_count; i++) {
1175 free_duplicate_context(s->thread_context[i]);
1177 for (i = 1; i < s->slice_context_count; i++) {
1178 av_freep(&s->thread_context[i]);
1181 free_duplicate_context(s);
1183 if ((err = free_context_frame(s)) < 0)
1187 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1188 s->picture[i].needs_realloc = 1;
1191 s->last_picture_ptr =
1192 s->next_picture_ptr =
1193 s->current_picture_ptr = NULL;
1196 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1197 s->mb_height = (s->height + 31) / 32 * 2;
1199 s->mb_height = (s->height + 15) / 16;
1201 if ((s->width || s->height) &&
1202 av_image_check_size(s->width, s->height, 0, s->avctx))
1203 return AVERROR_INVALIDDATA;
1205 if ((err = init_context_frame(s)))
1208 s->thread_context[0] = s;
1210 if (s->width && s->height) {
1211 int nb_slices = s->slice_context_count;
1212 if (nb_slices > 1) {
1213 for (i = 1; i < nb_slices; i++) {
1214 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1215 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1218 for (i = 0; i < nb_slices; i++) {
1219 if (init_duplicate_context(s->thread_context[i]) < 0)
1221 s->thread_context[i]->start_mb_y =
1222 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1223 s->thread_context[i]->end_mb_y =
1224 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1227 err = init_duplicate_context(s);
1231 s->end_mb_y = s->mb_height;
1233 s->slice_context_count = nb_slices;
1238 ff_MPV_common_end(s);
1242 /* init common structure for both encoder and decoder */
1243 void ff_MPV_common_end(MpegEncContext *s)
1247 if (s->slice_context_count > 1) {
1248 for (i = 0; i < s->slice_context_count; i++) {
1249 free_duplicate_context(s->thread_context[i]);
1251 for (i = 1; i < s->slice_context_count; i++) {
1252 av_freep(&s->thread_context[i]);
1254 s->slice_context_count = 1;
1255 } else free_duplicate_context(s);
1257 av_freep(&s->parse_context.buffer);
1258 s->parse_context.buffer_size = 0;
1260 av_freep(&s->bitstream_buffer);
1261 s->allocated_bitstream_buffer_size = 0;
1263 av_freep(&s->avctx->stats_out);
1264 av_freep(&s->ac_stats);
1266 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1267 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1268 s->q_chroma_intra_matrix= NULL;
1269 s->q_chroma_intra_matrix16= NULL;
1270 av_freep(&s->q_intra_matrix);
1271 av_freep(&s->q_inter_matrix);
1272 av_freep(&s->q_intra_matrix16);
1273 av_freep(&s->q_inter_matrix16);
1274 av_freep(&s->input_picture);
1275 av_freep(&s->reordered_input_picture);
1276 av_freep(&s->dct_offset);
1279 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1280 free_picture_tables(&s->picture[i]);
1281 ff_mpeg_unref_picture(s, &s->picture[i]);
1284 av_freep(&s->picture);
1285 free_picture_tables(&s->last_picture);
1286 ff_mpeg_unref_picture(s, &s->last_picture);
1287 free_picture_tables(&s->current_picture);
1288 ff_mpeg_unref_picture(s, &s->current_picture);
1289 free_picture_tables(&s->next_picture);
1290 ff_mpeg_unref_picture(s, &s->next_picture);
1291 free_picture_tables(&s->new_picture);
1292 ff_mpeg_unref_picture(s, &s->new_picture);
1294 free_context_frame(s);
1296 s->context_initialized = 0;
1297 s->last_picture_ptr =
1298 s->next_picture_ptr =
1299 s->current_picture_ptr = NULL;
1300 s->linesize = s->uvlinesize = 0;
1303 av_cold void ff_init_rl(RLTable *rl,
1304 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1306 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1307 uint8_t index_run[MAX_RUN + 1];
1308 int last, run, level, start, end, i;
1310 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1311 if (static_store && rl->max_level[0])
1314 /* compute max_level[], max_run[] and index_run[] */
1315 for (last = 0; last < 2; last++) {
1324 memset(max_level, 0, MAX_RUN + 1);
1325 memset(max_run, 0, MAX_LEVEL + 1);
1326 memset(index_run, rl->n, MAX_RUN + 1);
1327 for (i = start; i < end; i++) {
1328 run = rl->table_run[i];
1329 level = rl->table_level[i];
1330 if (index_run[run] == rl->n)
1332 if (level > max_level[run])
1333 max_level[run] = level;
1334 if (run > max_run[level])
1335 max_run[level] = run;
1338 rl->max_level[last] = static_store[last];
1340 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1341 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1343 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1345 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1346 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1348 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1350 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1351 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1355 av_cold void ff_init_vlc_rl(RLTable *rl)
1359 for (q = 0; q < 32; q++) {
1361 int qadd = (q - 1) | 1;
1367 for (i = 0; i < rl->vlc.table_size; i++) {
1368 int code = rl->vlc.table[i][0];
1369 int len = rl->vlc.table[i][1];
1372 if (len == 0) { // illegal code
1375 } else if (len < 0) { // more bits needed
1379 if (code == rl->n) { // esc
1383 run = rl->table_run[code] + 1;
1384 level = rl->table_level[code] * qmul + qadd;
1385 if (code >= rl->last) run += 192;
1388 rl->rl_vlc[q][i].len = len;
1389 rl->rl_vlc[q][i].level = level;
1390 rl->rl_vlc[q][i].run = run;
1395 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1399 /* release non reference frames */
1400 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1401 if (!s->picture[i].reference &&
1402 (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1403 ff_mpeg_unref_picture(s, &s->picture[i]);
1408 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1410 if (pic == s->last_picture_ptr)
1412 if (pic->f.data[0] == NULL)
1414 if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1419 static int find_unused_picture(MpegEncContext *s, int shared)
1424 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1425 if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1429 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1430 if (pic_is_unused(s, &s->picture[i]))
1435 av_log(s->avctx, AV_LOG_FATAL,
1436 "Internal error, picture buffer overflow\n");
1437 /* We could return -1, but the codec would crash trying to draw into a
1438 * non-existing frame anyway. This is safer than waiting for a random crash.
1439 * Also the return of this is never useful, an encoder must only allocate
1440 * as much as allowed in the specification. This has no relationship to how
1441 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1442 * enough for such valid streams).
1443 * Plus, a decoder has to check stream validity and remove frames if too
1444 * many reference frames are around. Waiting for "OOM" is not correct at
1445 * all. Similarly, missing reference frames have to be replaced by
1446 * interpolated/MC frames, anything else is a bug in the codec ...
1452 int ff_find_unused_picture(MpegEncContext *s, int shared)
1454 int ret = find_unused_picture(s, shared);
1456 if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1457 if (s->picture[ret].needs_realloc) {
1458 s->picture[ret].needs_realloc = 0;
1459 free_picture_tables(&s->picture[ret]);
1460 ff_mpeg_unref_picture(s, &s->picture[ret]);
1461 avcodec_get_frame_defaults(&s->picture[ret].f);
1467 static void update_noise_reduction(MpegEncContext *s)
1471 for (intra = 0; intra < 2; intra++) {
1472 if (s->dct_count[intra] > (1 << 16)) {
1473 for (i = 0; i < 64; i++) {
1474 s->dct_error_sum[intra][i] >>= 1;
1476 s->dct_count[intra] >>= 1;
1479 for (i = 0; i < 64; i++) {
1480 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1481 s->dct_count[intra] +
1482 s->dct_error_sum[intra][i] / 2) /
1483 (s->dct_error_sum[intra][i] + 1);
1489 * generic function for encode/decode called after coding/decoding
1490 * the header and before a frame is coded/decoded.
1492 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1498 if (!ff_thread_can_start_frame(avctx)) {
1499 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1503 /* mark & release old frames */
1504 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1505 s->last_picture_ptr != s->next_picture_ptr &&
1506 s->last_picture_ptr->f.data[0]) {
1507 ff_mpeg_unref_picture(s, s->last_picture_ptr);
1510 /* release forgotten pictures */
1511 /* if (mpeg124/h263) */
1513 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1514 if (&s->picture[i] != s->last_picture_ptr &&
1515 &s->picture[i] != s->next_picture_ptr &&
1516 s->picture[i].reference && !s->picture[i].needs_realloc) {
1517 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1518 av_log(avctx, AV_LOG_ERROR,
1519 "releasing zombie picture\n");
1520 ff_mpeg_unref_picture(s, &s->picture[i]);
1525 ff_mpeg_unref_picture(s, &s->current_picture);
1528 ff_release_unused_pictures(s, 1);
1530 if (s->current_picture_ptr &&
1531 s->current_picture_ptr->f.data[0] == NULL) {
1532 // we already have a unused image
1533 // (maybe it was set before reading the header)
1534 pic = s->current_picture_ptr;
1536 i = ff_find_unused_picture(s, 0);
1538 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1541 pic = &s->picture[i];
1545 if (!s->droppable) {
1546 if (s->pict_type != AV_PICTURE_TYPE_B)
1550 pic->f.coded_picture_number = s->coded_picture_number++;
1552 if (ff_alloc_picture(s, pic, 0) < 0)
1555 s->current_picture_ptr = pic;
1556 // FIXME use only the vars from current_pic
1557 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1558 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1559 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1560 if (s->picture_structure != PICT_FRAME)
1561 s->current_picture_ptr->f.top_field_first =
1562 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1564 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1565 !s->progressive_sequence;
1566 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1569 s->current_picture_ptr->f.pict_type = s->pict_type;
1570 // if (s->flags && CODEC_FLAG_QSCALE)
1571 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1572 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1574 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1575 s->current_picture_ptr)) < 0)
1578 if (s->pict_type != AV_PICTURE_TYPE_B) {
1579 s->last_picture_ptr = s->next_picture_ptr;
1581 s->next_picture_ptr = s->current_picture_ptr;
1583 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1584 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1585 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1586 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1587 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1588 s->pict_type, s->droppable);
1590 if ((s->last_picture_ptr == NULL ||
1591 s->last_picture_ptr->f.data[0] == NULL) &&
1592 (s->pict_type != AV_PICTURE_TYPE_I ||
1593 s->picture_structure != PICT_FRAME)) {
1594 int h_chroma_shift, v_chroma_shift;
1595 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1596 &h_chroma_shift, &v_chroma_shift);
1597 if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f.data[0])
1598 av_log(avctx, AV_LOG_DEBUG,
1599 "allocating dummy last picture for B frame\n");
1600 else if (s->pict_type != AV_PICTURE_TYPE_I)
1601 av_log(avctx, AV_LOG_ERROR,
1602 "warning: first frame is no keyframe\n");
1603 else if (s->picture_structure != PICT_FRAME)
1604 av_log(avctx, AV_LOG_DEBUG,
1605 "allocate dummy last picture for field based first keyframe\n");
1607 /* Allocate a dummy frame */
1608 i = ff_find_unused_picture(s, 0);
1610 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1613 s->last_picture_ptr = &s->picture[i];
1614 s->last_picture_ptr->f.key_frame = 0;
1615 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1616 s->last_picture_ptr = NULL;
1620 memset(s->last_picture_ptr->f.data[0], 0x80,
1621 avctx->height * s->last_picture_ptr->f.linesize[0]);
1622 memset(s->last_picture_ptr->f.data[1], 0x80,
1623 (avctx->height >> v_chroma_shift) *
1624 s->last_picture_ptr->f.linesize[1]);
1625 memset(s->last_picture_ptr->f.data[2], 0x80,
1626 (avctx->height >> v_chroma_shift) *
1627 s->last_picture_ptr->f.linesize[2]);
1629 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1630 for(i=0; i<avctx->height; i++)
1631 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1634 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1635 ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1637 if ((s->next_picture_ptr == NULL ||
1638 s->next_picture_ptr->f.data[0] == NULL) &&
1639 s->pict_type == AV_PICTURE_TYPE_B) {
1640 /* Allocate a dummy frame */
1641 i = ff_find_unused_picture(s, 0);
1643 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1646 s->next_picture_ptr = &s->picture[i];
1647 s->next_picture_ptr->f.key_frame = 0;
1648 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1649 s->next_picture_ptr = NULL;
1652 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1653 ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1656 #if 0 // BUFREF-FIXME
1657 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1658 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1660 if (s->last_picture_ptr) {
1661 ff_mpeg_unref_picture(s, &s->last_picture);
1662 if (s->last_picture_ptr->f.data[0] &&
1663 (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1664 s->last_picture_ptr)) < 0)
1667 if (s->next_picture_ptr) {
1668 ff_mpeg_unref_picture(s, &s->next_picture);
1669 if (s->next_picture_ptr->f.data[0] &&
1670 (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1671 s->next_picture_ptr)) < 0)
1675 av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1676 s->last_picture_ptr->f.data[0]));
1678 if (s->picture_structure!= PICT_FRAME) {
1680 for (i = 0; i < 4; i++) {
1681 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1682 s->current_picture.f.data[i] +=
1683 s->current_picture.f.linesize[i];
1685 s->current_picture.f.linesize[i] *= 2;
1686 s->last_picture.f.linesize[i] *= 2;
1687 s->next_picture.f.linesize[i] *= 2;
1691 s->err_recognition = avctx->err_recognition;
1693 /* set dequantizer, we can't do it during init as
1694 * it might change for mpeg4 and we can't do it in the header
1695 * decode as init is not called for mpeg4 there yet */
1696 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1697 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1698 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1699 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1700 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1701 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1703 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1704 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1707 if (s->dct_error_sum) {
1708 av_assert2(s->avctx->noise_reduction && s->encoding);
1709 update_noise_reduction(s);
1712 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1713 return ff_xvmc_field_start(s, avctx);
1718 /* generic function for encode/decode called after a
1719 * frame has been coded/decoded. */
1720 void ff_MPV_frame_end(MpegEncContext *s)
1722 /* redraw edges for the frame if decoding didn't complete */
1723 // just to make sure that all data is rendered.
1724 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1725 ff_xvmc_field_end(s);
1726 } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1727 !s->avctx->hwaccel &&
1728 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1729 s->unrestricted_mv &&
1730 s->current_picture.reference &&
1732 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1735 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1736 int hshift = desc->log2_chroma_w;
1737 int vshift = desc->log2_chroma_h;
1738 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1739 s->h_edge_pos, s->v_edge_pos,
1740 EDGE_WIDTH, EDGE_WIDTH,
1741 EDGE_TOP | EDGE_BOTTOM);
1742 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1743 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1744 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1745 EDGE_TOP | EDGE_BOTTOM);
1746 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1747 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1748 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1749 EDGE_TOP | EDGE_BOTTOM);
1754 s->last_pict_type = s->pict_type;
1755 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1756 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1757 s->last_non_b_pict_type = s->pict_type;
1760 /* copy back current_picture variables */
1761 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1762 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1763 s->picture[i] = s->current_picture;
1767 av_assert0(i < MAX_PICTURE_COUNT);
1770 // clear copies, to avoid confusion
1772 memset(&s->last_picture, 0, sizeof(Picture));
1773 memset(&s->next_picture, 0, sizeof(Picture));
1774 memset(&s->current_picture, 0, sizeof(Picture));
1776 s->avctx->coded_frame = &s->current_picture_ptr->f;
1778 if (s->current_picture.reference)
1779 ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1783 * Draw a line from (ex, ey) -> (sx, sy).
1784 * @param w width of the image
1785 * @param h height of the image
1786 * @param stride stride/linesize of the image
1787 * @param color color of the arrow
1789 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1790 int w, int h, int stride, int color)
1794 sx = av_clip(sx, 0, w - 1);
1795 sy = av_clip(sy, 0, h - 1);
1796 ex = av_clip(ex, 0, w - 1);
1797 ey = av_clip(ey, 0, h - 1);
1799 buf[sy * stride + sx] += color;
1801 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1803 FFSWAP(int, sx, ex);
1804 FFSWAP(int, sy, ey);
1806 buf += sx + sy * stride;
1808 f = ((ey - sy) << 16) / ex;
1809 for (x = 0; x <= ex; x++) {
1811 fr = (x * f) & 0xFFFF;
1812 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1813 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1817 FFSWAP(int, sx, ex);
1818 FFSWAP(int, sy, ey);
1820 buf += sx + sy * stride;
1823 f = ((ex - sx) << 16) / ey;
1826 for(y= 0; y <= ey; y++){
1828 fr = (y*f) & 0xFFFF;
1829 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1830 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1836 * Draw an arrow from (ex, ey) -> (sx, sy).
1837 * @param w width of the image
1838 * @param h height of the image
1839 * @param stride stride/linesize of the image
1840 * @param color color of the arrow
1842 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1843 int ey, int w, int h, int stride, int color)
1847 sx = av_clip(sx, -100, w + 100);
1848 sy = av_clip(sy, -100, h + 100);
1849 ex = av_clip(ex, -100, w + 100);
1850 ey = av_clip(ey, -100, h + 100);
1855 if (dx * dx + dy * dy > 3 * 3) {
1858 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1860 // FIXME subpixel accuracy
1861 rx = ROUNDED_DIV(rx * 3 << 4, length);
1862 ry = ROUNDED_DIV(ry * 3 << 4, length);
1864 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1865 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1867 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1871 * Print debugging info for the given picture.
1873 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1875 int mb_width, int mb_height, int mb_stride, int quarter_sample)
1877 if (avctx->hwaccel || !p || !p->mb_type
1878 || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1882 if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1885 av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1886 av_get_picture_type_char(pict->pict_type));
1887 for (y = 0; y < mb_height; y++) {
1888 for (x = 0; x < mb_width; x++) {
1889 if (avctx->debug & FF_DEBUG_SKIP) {
1890 int count = mbskip_table[x + y * mb_stride];
1893 av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1895 if (avctx->debug & FF_DEBUG_QP) {
1896 av_log(avctx, AV_LOG_DEBUG, "%2d",
1897 p->qscale_table[x + y * mb_stride]);
1899 if (avctx->debug & FF_DEBUG_MB_TYPE) {
1900 int mb_type = p->mb_type[x + y * mb_stride];
1901 // Type & MV direction
1902 if (IS_PCM(mb_type))
1903 av_log(avctx, AV_LOG_DEBUG, "P");
1904 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1905 av_log(avctx, AV_LOG_DEBUG, "A");
1906 else if (IS_INTRA4x4(mb_type))
1907 av_log(avctx, AV_LOG_DEBUG, "i");
1908 else if (IS_INTRA16x16(mb_type))
1909 av_log(avctx, AV_LOG_DEBUG, "I");
1910 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1911 av_log(avctx, AV_LOG_DEBUG, "d");
1912 else if (IS_DIRECT(mb_type))
1913 av_log(avctx, AV_LOG_DEBUG, "D");
1914 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1915 av_log(avctx, AV_LOG_DEBUG, "g");
1916 else if (IS_GMC(mb_type))
1917 av_log(avctx, AV_LOG_DEBUG, "G");
1918 else if (IS_SKIP(mb_type))
1919 av_log(avctx, AV_LOG_DEBUG, "S");
1920 else if (!USES_LIST(mb_type, 1))
1921 av_log(avctx, AV_LOG_DEBUG, ">");
1922 else if (!USES_LIST(mb_type, 0))
1923 av_log(avctx, AV_LOG_DEBUG, "<");
1925 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1926 av_log(avctx, AV_LOG_DEBUG, "X");
1930 if (IS_8X8(mb_type))
1931 av_log(avctx, AV_LOG_DEBUG, "+");
1932 else if (IS_16X8(mb_type))
1933 av_log(avctx, AV_LOG_DEBUG, "-");
1934 else if (IS_8X16(mb_type))
1935 av_log(avctx, AV_LOG_DEBUG, "|");
1936 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1937 av_log(avctx, AV_LOG_DEBUG, " ");
1939 av_log(avctx, AV_LOG_DEBUG, "?");
1942 if (IS_INTERLACED(mb_type))
1943 av_log(avctx, AV_LOG_DEBUG, "=");
1945 av_log(avctx, AV_LOG_DEBUG, " ");
1948 av_log(avctx, AV_LOG_DEBUG, "\n");
1952 if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1953 (avctx->debug_mv)) {
1954 const int shift = 1 + quarter_sample;
1958 int h_chroma_shift, v_chroma_shift, block_height;
1959 const int width = avctx->width;
1960 const int height = avctx->height;
1961 const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1962 const int mv_stride = (mb_width << mv_sample_log2) +
1963 (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1965 *low_delay = 0; // needed to see the vectors without trashing the buffers
1967 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1969 av_frame_make_writable(pict);
1971 pict->opaque = NULL;
1972 ptr = pict->data[0];
1973 block_height = 16 >> v_chroma_shift;
1975 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1977 for (mb_x = 0; mb_x < mb_width; mb_x++) {
1978 const int mb_index = mb_x + mb_y * mb_stride;
1979 if ((avctx->debug_mv) && p->motion_val[0]) {
1981 for (type = 0; type < 3; type++) {
1985 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1986 (pict->pict_type!= AV_PICTURE_TYPE_P))
1991 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1992 (pict->pict_type!= AV_PICTURE_TYPE_B))
1997 if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1998 (pict->pict_type!= AV_PICTURE_TYPE_B))
2003 if (!USES_LIST(p->mb_type[mb_index], direction))
2006 if (IS_8X8(p->mb_type[mb_index])) {
2008 for (i = 0; i < 4; i++) {
2009 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2010 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2011 int xy = (mb_x * 2 + (i & 1) +
2012 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2013 int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2014 int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2015 draw_arrow(ptr, sx, sy, mx, my, width,
2016 height, pict->linesize[0], 100);
2018 } else if (IS_16X8(p->mb_type[mb_index])) {
2020 for (i = 0; i < 2; i++) {
2021 int sx = mb_x * 16 + 8;
2022 int sy = mb_y * 16 + 4 + 8 * i;
2023 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2024 int mx = (p->motion_val[direction][xy][0] >> shift);
2025 int my = (p->motion_val[direction][xy][1] >> shift);
2027 if (IS_INTERLACED(p->mb_type[mb_index]))
2030 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2031 height, pict->linesize[0], 100);
2033 } else if (IS_8X16(p->mb_type[mb_index])) {
2035 for (i = 0; i < 2; i++) {
2036 int sx = mb_x * 16 + 4 + 8 * i;
2037 int sy = mb_y * 16 + 8;
2038 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2039 int mx = p->motion_val[direction][xy][0] >> shift;
2040 int my = p->motion_val[direction][xy][1] >> shift;
2042 if (IS_INTERLACED(p->mb_type[mb_index]))
2045 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2046 height, pict->linesize[0], 100);
2049 int sx= mb_x * 16 + 8;
2050 int sy= mb_y * 16 + 8;
2051 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2052 int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2053 int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2054 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2058 if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2059 uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2060 0x0101010101010101ULL;
2062 for (y = 0; y < block_height; y++) {
2063 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2064 (block_height * mb_y + y) *
2065 pict->linesize[1]) = c;
2066 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2067 (block_height * mb_y + y) *
2068 pict->linesize[2]) = c;
2071 if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2073 int mb_type = p->mb_type[mb_index];
2076 #define COLOR(theta, r) \
2077 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2078 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2082 if (IS_PCM(mb_type)) {
2084 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2085 IS_INTRA16x16(mb_type)) {
2087 } else if (IS_INTRA4x4(mb_type)) {
2089 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2091 } else if (IS_DIRECT(mb_type)) {
2093 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2095 } else if (IS_GMC(mb_type)) {
2097 } else if (IS_SKIP(mb_type)) {
2099 } else if (!USES_LIST(mb_type, 1)) {
2101 } else if (!USES_LIST(mb_type, 0)) {
2104 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2108 u *= 0x0101010101010101ULL;
2109 v *= 0x0101010101010101ULL;
2110 for (y = 0; y < block_height; y++) {
2111 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2112 (block_height * mb_y + y) * pict->linesize[1]) = u;
2113 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2114 (block_height * mb_y + y) * pict->linesize[2]) = v;
2118 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2119 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2120 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2121 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2122 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2124 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2125 for (y = 0; y < 16; y++)
2126 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2127 pict->linesize[0]] ^= 0x80;
2129 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2130 int dm = 1 << (mv_sample_log2 - 2);
2131 for (i = 0; i < 4; i++) {
2132 int sx = mb_x * 16 + 8 * (i & 1);
2133 int sy = mb_y * 16 + 8 * (i >> 1);
2134 int xy = (mb_x * 2 + (i & 1) +
2135 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2137 int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2138 if (mv[0] != mv[dm] ||
2139 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2140 for (y = 0; y < 8; y++)
2141 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2142 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2143 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2144 pict->linesize[0]) ^= 0x8080808080808080ULL;
2148 if (IS_INTERLACED(mb_type) &&
2149 avctx->codec->id == AV_CODEC_ID_H264) {
2153 mbskip_table[mb_index] = 0;
2159 void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
2161 ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2162 s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2165 int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
2167 AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
2168 int offset = 2*s->mb_stride + 1;
2170 return AVERROR(ENOMEM);
2171 av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2172 ref->size -= offset;
2173 ref->data += offset;
2174 return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2177 static inline int hpel_motion_lowres(MpegEncContext *s,
2178 uint8_t *dest, uint8_t *src,
2179 int field_based, int field_select,
2180 int src_x, int src_y,
2181 int width, int height, ptrdiff_t stride,
2182 int h_edge_pos, int v_edge_pos,
2183 int w, int h, h264_chroma_mc_func *pix_op,
2184 int motion_x, int motion_y)
2186 const int lowres = s->avctx->lowres;
2187 const int op_index = FFMIN(lowres, 3);
2188 const int s_mask = (2 << lowres) - 1;
2192 if (s->quarter_sample) {
2197 sx = motion_x & s_mask;
2198 sy = motion_y & s_mask;
2199 src_x += motion_x >> lowres + 1;
2200 src_y += motion_y >> lowres + 1;
2202 src += src_y * stride + src_x;
2204 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2205 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2206 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->linesize,
2207 src, s->linesize, w + 1,
2208 (h + 1) << field_based, src_x,
2209 src_y << field_based,
2212 src = s->edge_emu_buffer;
2216 sx = (sx << 2) >> lowres;
2217 sy = (sy << 2) >> lowres;
2220 pix_op[op_index](dest, src, stride, h, sx, sy);
2224 /* apply one mpeg motion vector to the three components */
2225 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2232 uint8_t **ref_picture,
2233 h264_chroma_mc_func *pix_op,
2234 int motion_x, int motion_y,
2237 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2238 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2239 ptrdiff_t uvlinesize, linesize;
2240 const int lowres = s->avctx->lowres;
2241 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2242 const int block_s = 8>>lowres;
2243 const int s_mask = (2 << lowres) - 1;
2244 const int h_edge_pos = s->h_edge_pos >> lowres;
2245 const int v_edge_pos = s->v_edge_pos >> lowres;
2246 linesize = s->current_picture.f.linesize[0] << field_based;
2247 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2249 // FIXME obviously not perfect but qpel will not work in lowres anyway
2250 if (s->quarter_sample) {
2256 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2259 sx = motion_x & s_mask;
2260 sy = motion_y & s_mask;
2261 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2262 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2264 if (s->out_format == FMT_H263) {
2265 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2266 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2267 uvsrc_x = src_x >> 1;
2268 uvsrc_y = src_y >> 1;
2269 } else if (s->out_format == FMT_H261) {
2270 // even chroma mv's are full pel in H261
2273 uvsx = (2 * mx) & s_mask;
2274 uvsy = (2 * my) & s_mask;
2275 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2276 uvsrc_y = mb_y * block_s + (my >> lowres);
2278 if(s->chroma_y_shift){
2283 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2284 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2286 if(s->chroma_x_shift){
2290 uvsy = motion_y & s_mask;
2292 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2295 uvsx = motion_x & s_mask;
2296 uvsy = motion_y & s_mask;
2303 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2304 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2305 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2307 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2308 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2309 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, linesize >> field_based, ptr_y,
2310 linesize >> field_based, 17, 17 + field_based,
2311 src_x, src_y << field_based, h_edge_pos,
2313 ptr_y = s->edge_emu_buffer;
2314 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2315 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2316 s->vdsp.emulated_edge_mc(uvbuf, uvlinesize >> field_based,
2317 ptr_cb, uvlinesize >> field_based, 9,
2319 uvsrc_x, uvsrc_y << field_based,
2320 h_edge_pos >> 1, v_edge_pos >> 1);
2321 s->vdsp.emulated_edge_mc(uvbuf + 16, uvlinesize >> field_based,
2322 ptr_cr, uvlinesize >> field_based, 9,
2324 uvsrc_x, uvsrc_y << field_based,
2325 h_edge_pos >> 1, v_edge_pos >> 1);
2327 ptr_cr = uvbuf + 16;
2331 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2333 dest_y += s->linesize;
2334 dest_cb += s->uvlinesize;
2335 dest_cr += s->uvlinesize;
2339 ptr_y += s->linesize;
2340 ptr_cb += s->uvlinesize;
2341 ptr_cr += s->uvlinesize;
2344 sx = (sx << 2) >> lowres;
2345 sy = (sy << 2) >> lowres;
2346 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2348 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2349 int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2350 uvsx = (uvsx << 2) >> lowres;
2351 uvsy = (uvsy << 2) >> lowres;
2353 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2354 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2357 // FIXME h261 lowres loop filter
2360 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2361 uint8_t *dest_cb, uint8_t *dest_cr,
2362 uint8_t **ref_picture,
2363 h264_chroma_mc_func * pix_op,
2366 const int lowres = s->avctx->lowres;
2367 const int op_index = FFMIN(lowres, 3);
2368 const int block_s = 8 >> lowres;
2369 const int s_mask = (2 << lowres) - 1;
2370 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2371 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2372 int emu = 0, src_x, src_y, sx, sy;
2376 if (s->quarter_sample) {
2381 /* In case of 8X8, we construct a single chroma motion vector
2382 with a special rounding */
2383 mx = ff_h263_round_chroma(mx);
2384 my = ff_h263_round_chroma(my);
2388 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2389 src_y = s->mb_y * block_s + (my >> lowres + 1);
2391 offset = src_y * s->uvlinesize + src_x;
2392 ptr = ref_picture[1] + offset;
2393 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2394 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2395 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->uvlinesize, ptr, s->uvlinesize,
2396 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2397 ptr = s->edge_emu_buffer;
2400 sx = (sx << 2) >> lowres;
2401 sy = (sy << 2) >> lowres;
2402 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2404 ptr = ref_picture[2] + offset;
2406 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, s->uvlinesize,
2407 ptr, s->uvlinesize, 9, 9,
2408 src_x, src_y, h_edge_pos, v_edge_pos);
2409 ptr = s->edge_emu_buffer;
2411 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2415 * motion compensation of a single macroblock
2417 * @param dest_y luma destination pointer
2418 * @param dest_cb chroma cb/u destination pointer
2419 * @param dest_cr chroma cr/v destination pointer
2420 * @param dir direction (0->forward, 1->backward)
2421 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2422 * @param pix_op halfpel motion compensation function (average or put normally)
2423 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2425 static inline void MPV_motion_lowres(MpegEncContext *s,
2426 uint8_t *dest_y, uint8_t *dest_cb,
2428 int dir, uint8_t **ref_picture,
2429 h264_chroma_mc_func *pix_op)
2433 const int lowres = s->avctx->lowres;
2434 const int block_s = 8 >>lowres;
2439 switch (s->mv_type) {
2441 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2443 ref_picture, pix_op,
2444 s->mv[dir][0][0], s->mv[dir][0][1],
2450 for (i = 0; i < 4; i++) {
2451 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2452 s->linesize) * block_s,
2453 ref_picture[0], 0, 0,
2454 (2 * mb_x + (i & 1)) * block_s,
2455 (2 * mb_y + (i >> 1)) * block_s,
2456 s->width, s->height, s->linesize,
2457 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2458 block_s, block_s, pix_op,
2459 s->mv[dir][i][0], s->mv[dir][i][1]);
2461 mx += s->mv[dir][i][0];
2462 my += s->mv[dir][i][1];
2465 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2466 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2470 if (s->picture_structure == PICT_FRAME) {
2472 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2473 1, 0, s->field_select[dir][0],
2474 ref_picture, pix_op,
2475 s->mv[dir][0][0], s->mv[dir][0][1],
2478 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2479 1, 1, s->field_select[dir][1],
2480 ref_picture, pix_op,
2481 s->mv[dir][1][0], s->mv[dir][1][1],
2484 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2485 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2486 ref_picture = s->current_picture_ptr->f.data;
2489 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2490 0, 0, s->field_select[dir][0],
2491 ref_picture, pix_op,
2493 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2497 for (i = 0; i < 2; i++) {
2498 uint8_t **ref2picture;
2500 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2501 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2502 ref2picture = ref_picture;
2504 ref2picture = s->current_picture_ptr->f.data;
2507 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2508 0, 0, s->field_select[dir][i],
2509 ref2picture, pix_op,
2510 s->mv[dir][i][0], s->mv[dir][i][1] +
2511 2 * block_s * i, block_s, mb_y >> 1);
2513 dest_y += 2 * block_s * s->linesize;
2514 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2515 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2519 if (s->picture_structure == PICT_FRAME) {
2520 for (i = 0; i < 2; i++) {
2522 for (j = 0; j < 2; j++) {
2523 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2525 ref_picture, pix_op,
2526 s->mv[dir][2 * i + j][0],
2527 s->mv[dir][2 * i + j][1],
2530 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2533 for (i = 0; i < 2; i++) {
2534 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2535 0, 0, s->picture_structure != i + 1,
2536 ref_picture, pix_op,
2537 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2538 2 * block_s, mb_y >> 1);
2540 // after put we make avg of the same block
2541 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2543 // opposite parity is always in the same
2544 // frame if this is second field
2545 if (!s->first_field) {
2546 ref_picture = s->current_picture_ptr->f.data;
2557 * find the lowest MB row referenced in the MVs
2559 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2561 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2562 int my, off, i, mvs;
2564 if (s->picture_structure != PICT_FRAME || s->mcsel)
2567 switch (s->mv_type) {
2581 for (i = 0; i < mvs; i++) {
2582 my = s->mv[dir][i][1]<<qpel_shift;
2583 my_max = FFMAX(my_max, my);
2584 my_min = FFMIN(my_min, my);
2587 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2589 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2591 return s->mb_height-1;
2594 /* put block[] to dest[] */
2595 static inline void put_dct(MpegEncContext *s,
2596 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2598 s->dct_unquantize_intra(s, block, i, qscale);
2599 s->dsp.idct_put (dest, line_size, block);
2602 /* add block[] to dest[] */
2603 static inline void add_dct(MpegEncContext *s,
2604 int16_t *block, int i, uint8_t *dest, int line_size)
2606 if (s->block_last_index[i] >= 0) {
2607 s->dsp.idct_add (dest, line_size, block);
2611 static inline void add_dequant_dct(MpegEncContext *s,
2612 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2614 if (s->block_last_index[i] >= 0) {
2615 s->dct_unquantize_inter(s, block, i, qscale);
2617 s->dsp.idct_add (dest, line_size, block);
2622 * Clean dc, ac, coded_block for the current non-intra MB.
2624 void ff_clean_intra_table_entries(MpegEncContext *s)
2626 int wrap = s->b8_stride;
2627 int xy = s->block_index[0];
2630 s->dc_val[0][xy + 1 ] =
2631 s->dc_val[0][xy + wrap] =
2632 s->dc_val[0][xy + 1 + wrap] = 1024;
2634 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2635 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2636 if (s->msmpeg4_version>=3) {
2637 s->coded_block[xy ] =
2638 s->coded_block[xy + 1 ] =
2639 s->coded_block[xy + wrap] =
2640 s->coded_block[xy + 1 + wrap] = 0;
2643 wrap = s->mb_stride;
2644 xy = s->mb_x + s->mb_y * wrap;
2646 s->dc_val[2][xy] = 1024;
2648 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2649 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2651 s->mbintra_table[xy]= 0;
2654 /* generic function called after a macroblock has been parsed by the
2655 decoder or after it has been encoded by the encoder.
2657 Important variables used:
2658 s->mb_intra : true if intra macroblock
2659 s->mv_dir : motion vector direction
2660 s->mv_type : motion vector type
2661 s->mv : motion vector
2662 s->interlaced_dct : true if interlaced dct used (mpeg2)
2664 static av_always_inline
2665 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2666 int lowres_flag, int is_mpeg12)
2668 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2669 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2670 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2674 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2675 /* print DCT coefficients */
2677 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2679 for(j=0; j<64; j++){
2680 av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2682 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2686 s->current_picture.qscale_table[mb_xy] = s->qscale;
2688 /* update DC predictors for P macroblocks */
2690 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2691 if(s->mbintra_table[mb_xy])
2692 ff_clean_intra_table_entries(s);
2696 s->last_dc[2] = 128 << s->intra_dc_precision;
2699 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2700 s->mbintra_table[mb_xy]=1;
2702 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2703 uint8_t *dest_y, *dest_cb, *dest_cr;
2704 int dct_linesize, dct_offset;
2705 op_pixels_func (*op_pix)[4];
2706 qpel_mc_func (*op_qpix)[16];
2707 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2708 const int uvlinesize = s->current_picture.f.linesize[1];
2709 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2710 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2712 /* avoid copy if macroblock skipped in last frame too */
2713 /* skip only during decoding as we might trash the buffers during encoding a bit */
2715 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2717 if (s->mb_skipped) {
2719 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2721 } else if(!s->current_picture.reference) {
2724 *mbskip_ptr = 0; /* not skipped */
2728 dct_linesize = linesize << s->interlaced_dct;
2729 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2733 dest_cb= s->dest[1];
2734 dest_cr= s->dest[2];
2736 dest_y = s->b_scratchpad;
2737 dest_cb= s->b_scratchpad+16*linesize;
2738 dest_cr= s->b_scratchpad+32*linesize;
2742 /* motion handling */
2743 /* decoding or more than one mb_type (MC was already done otherwise) */
2746 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2747 if (s->mv_dir & MV_DIR_FORWARD) {
2748 ff_thread_await_progress(&s->last_picture_ptr->tf,
2749 ff_MPV_lowest_referenced_row(s, 0),
2752 if (s->mv_dir & MV_DIR_BACKWARD) {
2753 ff_thread_await_progress(&s->next_picture_ptr->tf,
2754 ff_MPV_lowest_referenced_row(s, 1),
2760 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2762 if (s->mv_dir & MV_DIR_FORWARD) {
2763 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2764 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2766 if (s->mv_dir & MV_DIR_BACKWARD) {
2767 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2770 op_qpix= s->me.qpel_put;
2771 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2772 op_pix = s->hdsp.put_pixels_tab;
2774 op_pix = s->hdsp.put_no_rnd_pixels_tab;
2776 if (s->mv_dir & MV_DIR_FORWARD) {
2777 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2778 op_pix = s->hdsp.avg_pixels_tab;
2779 op_qpix= s->me.qpel_avg;
2781 if (s->mv_dir & MV_DIR_BACKWARD) {
2782 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2787 /* skip dequant / idct if we are really late ;) */
2788 if(s->avctx->skip_idct){
2789 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2790 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2791 || s->avctx->skip_idct >= AVDISCARD_ALL)
2795 /* add dct residue */
2796 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2797 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2798 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2799 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2800 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2801 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2803 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2804 if (s->chroma_y_shift){
2805 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2806 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2810 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2811 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2812 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2813 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2816 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2817 add_dct(s, block[0], 0, dest_y , dct_linesize);
2818 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2819 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2820 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2822 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2823 if(s->chroma_y_shift){//Chroma420
2824 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2825 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2828 dct_linesize = uvlinesize << s->interlaced_dct;
2829 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2831 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2832 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2833 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2834 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2835 if(!s->chroma_x_shift){//Chroma444
2836 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2837 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2838 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2839 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2844 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2845 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2848 /* dct only in intra block */
2849 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2850 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2851 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2852 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2853 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2855 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2856 if(s->chroma_y_shift){
2857 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2858 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2862 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2863 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2864 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2865 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2869 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2870 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2871 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2872 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2874 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2875 if(s->chroma_y_shift){
2876 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2877 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2880 dct_linesize = uvlinesize << s->interlaced_dct;
2881 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2883 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2884 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2885 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2886 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2887 if(!s->chroma_x_shift){//Chroma444
2888 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2889 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2890 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2891 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2899 s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2900 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2901 s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2906 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2908 if(s->out_format == FMT_MPEG1) {
2909 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2910 else MPV_decode_mb_internal(s, block, 0, 1);
2913 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2914 else MPV_decode_mb_internal(s, block, 0, 0);
2918 * @param h is the normal height, this will be reduced automatically if needed for the last row
2920 void ff_draw_horiz_band(AVCodecContext *avctx, DSPContext *dsp, Picture *cur,
2921 Picture *last, int y, int h, int picture_structure,
2922 int first_field, int draw_edges, int low_delay,
2923 int v_edge_pos, int h_edge_pos)
2925 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2926 int hshift = desc->log2_chroma_w;
2927 int vshift = desc->log2_chroma_h;
2928 const int field_pic = picture_structure != PICT_FRAME;
2934 if (!avctx->hwaccel &&
2935 !(avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
2938 !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2939 int *linesize = cur->f.linesize;
2940 int sides = 0, edge_h;
2941 if (y==0) sides |= EDGE_TOP;
2942 if (y + h >= v_edge_pos)
2943 sides |= EDGE_BOTTOM;
2945 edge_h= FFMIN(h, v_edge_pos - y);
2947 dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2948 linesize[0], h_edge_pos, edge_h,
2949 EDGE_WIDTH, EDGE_WIDTH, sides);
2950 dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2951 linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2952 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2953 dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2954 linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2955 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2958 h = FFMIN(h, avctx->height - y);
2960 if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2962 if (avctx->draw_horiz_band) {
2964 int offset[AV_NUM_DATA_POINTERS];
2967 if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2968 (avctx->slice_flags & SLICE_FLAG_CODED_ORDER))
2975 if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2976 picture_structure == PICT_FRAME &&
2977 avctx->codec_id != AV_CODEC_ID_SVQ3) {
2978 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2981 offset[0]= y * src->linesize[0];
2983 offset[2]= (y >> vshift) * src->linesize[1];
2984 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2990 avctx->draw_horiz_band(avctx, src, offset,
2991 y, picture_structure, h);
2995 void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
2997 int draw_edges = s->unrestricted_mv && !s->intra_only;
2998 ff_draw_horiz_band(s->avctx, &s->dsp, s->current_picture_ptr,
2999 s->last_picture_ptr, y, h, s->picture_structure,
3000 s->first_field, draw_edges, s->low_delay,
3001 s->v_edge_pos, s->h_edge_pos);
3004 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3005 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
3006 const int uvlinesize = s->current_picture.f.linesize[1];
3007 const int mb_size= 4 - s->avctx->lowres;
3009 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3010 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3011 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3012 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3013 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3014 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3015 //block_index is not used by mpeg2, so it is not affected by chroma_format
3017 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3018 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3019 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3021 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
3023 if(s->picture_structure==PICT_FRAME){
3024 s->dest[0] += s->mb_y * linesize << mb_size;
3025 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3026 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3028 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3029 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3030 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3031 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
3037 * Permute an 8x8 block.
3038 * @param block the block which will be permuted according to the given permutation vector
3039 * @param permutation the permutation vector
3040 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3041 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3042 * (inverse) permutated to scantable order!
3044 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3050 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3052 for(i=0; i<=last; i++){
3053 const int j= scantable[i];
3058 for(i=0; i<=last; i++){
3059 const int j= scantable[i];
3060 const int perm_j= permutation[j];
3061 block[perm_j]= temp[j];
3065 void ff_mpeg_flush(AVCodecContext *avctx){
3067 MpegEncContext *s = avctx->priv_data;
3069 if(s==NULL || s->picture==NULL)
3072 for (i = 0; i < MAX_PICTURE_COUNT; i++)
3073 ff_mpeg_unref_picture(s, &s->picture[i]);
3074 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
3076 ff_mpeg_unref_picture(s, &s->current_picture);
3077 ff_mpeg_unref_picture(s, &s->last_picture);
3078 ff_mpeg_unref_picture(s, &s->next_picture);
3080 s->mb_x= s->mb_y= 0;
3083 s->parse_context.state= -1;
3084 s->parse_context.frame_start_found= 0;
3085 s->parse_context.overread= 0;
3086 s->parse_context.overread_index= 0;
3087 s->parse_context.index= 0;
3088 s->parse_context.last_index= 0;
3089 s->bitstream_buffer_size=0;
3093 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
3094 int16_t *block, int n, int qscale)
3096 int i, level, nCoeffs;
3097 const uint16_t *quant_matrix;
3099 nCoeffs= s->block_last_index[n];
3101 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3102 /* XXX: only mpeg1 */
3103 quant_matrix = s->intra_matrix;
3104 for(i=1;i<=nCoeffs;i++) {
3105 int j= s->intra_scantable.permutated[i];
3110 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3111 level = (level - 1) | 1;
3114 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3115 level = (level - 1) | 1;
3122 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
3123 int16_t *block, int n, int qscale)
3125 int i, level, nCoeffs;
3126 const uint16_t *quant_matrix;
3128 nCoeffs= s->block_last_index[n];
3130 quant_matrix = s->inter_matrix;
3131 for(i=0; i<=nCoeffs; i++) {
3132 int j= s->intra_scantable.permutated[i];
3137 level = (((level << 1) + 1) * qscale *
3138 ((int) (quant_matrix[j]))) >> 4;
3139 level = (level - 1) | 1;
3142 level = (((level << 1) + 1) * qscale *
3143 ((int) (quant_matrix[j]))) >> 4;
3144 level = (level - 1) | 1;
3151 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3152 int16_t *block, int n, int qscale)
3154 int i, level, nCoeffs;
3155 const uint16_t *quant_matrix;
3157 if(s->alternate_scan) nCoeffs= 63;
3158 else nCoeffs= s->block_last_index[n];
3160 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3161 quant_matrix = s->intra_matrix;
3162 for(i=1;i<=nCoeffs;i++) {
3163 int j= s->intra_scantable.permutated[i];
3168 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3171 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3178 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3179 int16_t *block, int n, int qscale)
3181 int i, level, nCoeffs;
3182 const uint16_t *quant_matrix;
3185 if(s->alternate_scan) nCoeffs= 63;
3186 else nCoeffs= s->block_last_index[n];
3188 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3190 quant_matrix = s->intra_matrix;
3191 for(i=1;i<=nCoeffs;i++) {
3192 int j= s->intra_scantable.permutated[i];
3197 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3200 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3209 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3210 int16_t *block, int n, int qscale)
3212 int i, level, nCoeffs;
3213 const uint16_t *quant_matrix;
3216 if(s->alternate_scan) nCoeffs= 63;
3217 else nCoeffs= s->block_last_index[n];
3219 quant_matrix = s->inter_matrix;
3220 for(i=0; i<=nCoeffs; i++) {
3221 int j= s->intra_scantable.permutated[i];
3226 level = (((level << 1) + 1) * qscale *
3227 ((int) (quant_matrix[j]))) >> 4;
3230 level = (((level << 1) + 1) * qscale *
3231 ((int) (quant_matrix[j]))) >> 4;
3240 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3241 int16_t *block, int n, int qscale)
3243 int i, level, qmul, qadd;
3246 av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
3251 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3252 qadd = (qscale - 1) | 1;
3259 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3261 for(i=1; i<=nCoeffs; i++) {
3265 level = level * qmul - qadd;
3267 level = level * qmul + qadd;
3274 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3275 int16_t *block, int n, int qscale)
3277 int i, level, qmul, qadd;
3280 av_assert2(s->block_last_index[n]>=0);
3282 qadd = (qscale - 1) | 1;
3285 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3287 for(i=0; i<=nCoeffs; i++) {
3291 level = level * qmul - qadd;
3293 level = level * qmul + qadd;
3301 * set qscale and update qscale dependent variables.
3303 void ff_set_qscale(MpegEncContext * s, int qscale)
3307 else if (qscale > 31)
3311 s->chroma_qscale= s->chroma_qscale_table[qscale];
3313 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3314 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3317 void ff_MPV_report_decode_progress(MpegEncContext *s)
3319 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
3320 ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
3323 #if CONFIG_ERROR_RESILIENCE
3324 void ff_mpeg_er_frame_start(MpegEncContext *s)
3326 ERContext *er = &s->er;
3328 er->cur_pic = s->current_picture_ptr;
3329 er->last_pic = s->last_picture_ptr;
3330 er->next_pic = s->next_picture_ptr;
3332 er->pp_time = s->pp_time;
3333 er->pb_time = s->pb_time;
3334 er->quarter_sample = s->quarter_sample;
3335 er->partitioned_frame = s->partitioned_frame;
3337 ff_er_frame_start(er);
3339 #endif /* CONFIG_ERROR_RESILIENCE */