2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
39 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 DCTELEM *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 DCTELEM *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 DCTELEM *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 DCTELEM *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 DCTELEM *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 DCTELEM *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 DCTELEM *block, int n, int qscale);
62 /* enable all paranoid tests for rounding, overflows, etc... */
68 static const uint8_t ff_default_chroma_qscale_table[32] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
71 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
74 const uint8_t ff_mpeg1_dc_scale_table[128] = {
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
86 static const uint8_t mpeg2_dc_scale_table1[128] = {
87 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
98 static const uint8_t mpeg2_dc_scale_table2[128] = {
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
110 static const uint8_t mpeg2_dc_scale_table3[128] = {
111 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
122 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
123 ff_mpeg1_dc_scale_table,
124 mpeg2_dc_scale_table1,
125 mpeg2_dc_scale_table2,
126 mpeg2_dc_scale_table3,
129 const enum PixelFormat ff_pixfmt_list_420[] = {
134 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
142 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
144 uint32_t * restrict state)
152 for (i = 0; i < 3; i++) {
153 uint32_t tmp = *state << 8;
154 *state = tmp + *(p++);
155 if (tmp == 0x100 || p == end)
160 if (p[-1] > 1 ) p += 3;
161 else if (p[-2] ) p += 2;
162 else if (p[-3]|(p[-1]-1)) p++;
169 p = FFMIN(p, end) - 4;
175 /* init common dct for both encoder and decoder */
176 av_cold int ff_dct_common_init(MpegEncContext *s)
178 ff_dsputil_init(&s->dsp, s->avctx);
180 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
181 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
182 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
183 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
184 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
185 if (s->flags & CODEC_FLAG_BITEXACT)
186 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
187 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
190 ff_MPV_common_init_mmx(s);
192 ff_MPV_common_init_axp(s);
194 ff_MPV_common_init_mmi(s);
196 ff_MPV_common_init_arm(s);
198 ff_MPV_common_init_altivec(s);
200 ff_MPV_common_init_bfin(s);
203 /* load & permutate scantables
204 * note: only wmv uses different ones
206 if (s->alternate_scan) {
207 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
208 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
210 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
213 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
219 void ff_copy_picture(Picture *dst, Picture *src)
222 dst->f.type = FF_BUFFER_TYPE_COPY;
226 * Release a frame buffer
228 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
230 /* Windows Media Image codecs allocate internal buffers with different
231 * dimensions; ignore user defined callbacks for these
233 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE && s->codec_id != AV_CODEC_ID_VC1IMAGE)
234 ff_thread_release_buffer(s->avctx, &pic->f);
236 avcodec_default_release_buffer(s->avctx, &pic->f);
237 av_freep(&pic->f.hwaccel_picture_private);
241 * Allocate a frame buffer
243 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
247 if (s->avctx->hwaccel) {
248 assert(!pic->f.hwaccel_picture_private);
249 if (s->avctx->hwaccel->priv_data_size) {
250 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
251 if (!pic->f.hwaccel_picture_private) {
252 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
258 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE && s->codec_id != AV_CODEC_ID_VC1IMAGE)
259 r = ff_thread_get_buffer(s->avctx, &pic->f);
261 r = avcodec_default_get_buffer(s->avctx, &pic->f);
263 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
264 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
265 r, pic->f.type, pic->f.data[0]);
266 av_freep(&pic->f.hwaccel_picture_private);
270 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
271 s->uvlinesize != pic->f.linesize[1])) {
272 av_log(s->avctx, AV_LOG_ERROR,
273 "get_buffer() failed (stride changed)\n");
274 free_frame_buffer(s, pic);
278 if (pic->f.linesize[1] != pic->f.linesize[2]) {
279 av_log(s->avctx, AV_LOG_ERROR,
280 "get_buffer() failed (uv stride mismatch)\n");
281 free_frame_buffer(s, pic);
289 * Allocate a Picture.
290 * The pixels are allocated/set by calling get_buffer() if shared = 0
292 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
294 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
296 // the + 1 is needed so memset(,,stride*height) does not sig11
298 const int mb_array_size = s->mb_stride * s->mb_height;
299 const int b8_array_size = s->b8_stride * s->mb_height * 2;
300 const int b4_array_size = s->b4_stride * s->mb_height * 4;
305 assert(pic->f.data[0]);
306 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
307 pic->f.type = FF_BUFFER_TYPE_SHARED;
309 assert(!pic->f.data[0]);
311 if (alloc_frame_buffer(s, pic) < 0)
314 s->linesize = pic->f.linesize[0];
315 s->uvlinesize = pic->f.linesize[1];
318 if (pic->f.qscale_table == NULL) {
320 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
321 mb_array_size * sizeof(int16_t), fail)
322 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
323 mb_array_size * sizeof(int16_t), fail)
324 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
325 mb_array_size * sizeof(int8_t ), fail)
328 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
329 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
330 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
331 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
333 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
334 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
336 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
337 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
338 if (s->out_format == FMT_H264) {
339 for (i = 0; i < 2; i++) {
340 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
341 2 * (b4_array_size + 4) * sizeof(int16_t),
343 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
344 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
345 4 * mb_array_size * sizeof(uint8_t), fail)
347 pic->f.motion_subsample_log2 = 2;
348 } else if (s->out_format == FMT_H263 || s->encoding ||
349 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
350 for (i = 0; i < 2; i++) {
351 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
352 2 * (b8_array_size + 4) * sizeof(int16_t),
354 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
355 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
356 4 * mb_array_size * sizeof(uint8_t), fail)
358 pic->f.motion_subsample_log2 = 3;
360 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
361 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
362 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
364 pic->f.qstride = s->mb_stride;
365 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
366 1 * sizeof(AVPanScan), fail)
372 fail: // for the FF_ALLOCZ_OR_GOTO macro
374 free_frame_buffer(s, pic);
379 * Deallocate a picture.
381 static void free_picture(MpegEncContext *s, Picture *pic)
385 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
386 free_frame_buffer(s, pic);
389 av_freep(&pic->mb_var);
390 av_freep(&pic->mc_mb_var);
391 av_freep(&pic->mb_mean);
392 av_freep(&pic->f.mbskip_table);
393 av_freep(&pic->qscale_table_base);
394 av_freep(&pic->mb_type_base);
395 av_freep(&pic->f.dct_coeff);
396 av_freep(&pic->f.pan_scan);
397 pic->f.mb_type = NULL;
398 for (i = 0; i < 2; i++) {
399 av_freep(&pic->motion_val_base[i]);
400 av_freep(&pic->f.ref_index[i]);
403 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
404 for (i = 0; i < 4; i++) {
406 pic->f.data[i] = NULL;
412 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
414 int y_size = s->b8_stride * (2 * s->mb_height + 1);
415 int c_size = s->mb_stride * (s->mb_height + 1);
416 int yc_size = y_size + 2 * c_size;
419 // edge emu needs blocksize + filter length - 1
420 // (= 17x17 for halfpel / 21x21 for h264)
421 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
422 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
424 // FIXME should be linesize instead of s->width * 2
425 // but that is not known before get_buffer()
426 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
427 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
428 s->me.temp = s->me.scratchpad;
429 s->rd_scratchpad = s->me.scratchpad;
430 s->b_scratchpad = s->me.scratchpad;
431 s->obmc_scratchpad = s->me.scratchpad + 16;
433 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
434 ME_MAP_SIZE * sizeof(uint32_t), fail)
435 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
436 ME_MAP_SIZE * sizeof(uint32_t), fail)
437 if (s->avctx->noise_reduction) {
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
439 2 * 64 * sizeof(int), fail)
442 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
443 s->block = s->blocks[0];
445 for (i = 0; i < 12; i++) {
446 s->pblocks[i] = &s->block[i];
449 if (s->out_format == FMT_H263) {
451 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
452 yc_size * sizeof(int16_t) * 16, fail);
453 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
454 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
455 s->ac_val[2] = s->ac_val[1] + c_size;
460 return -1; // free() through ff_MPV_common_end()
463 static void free_duplicate_context(MpegEncContext *s)
468 av_freep(&s->edge_emu_buffer);
469 av_freep(&s->me.scratchpad);
473 s->obmc_scratchpad = NULL;
475 av_freep(&s->dct_error_sum);
476 av_freep(&s->me.map);
477 av_freep(&s->me.score_map);
478 av_freep(&s->blocks);
479 av_freep(&s->ac_val_base);
483 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
485 #define COPY(a) bak->a = src->a
486 COPY(edge_emu_buffer);
491 COPY(obmc_scratchpad);
498 COPY(me.map_generation);
510 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
514 // FIXME copy only needed parts
516 backup_duplicate_context(&bak, dst);
517 memcpy(dst, src, sizeof(MpegEncContext));
518 backup_duplicate_context(dst, &bak);
519 for (i = 0; i < 12; i++) {
520 dst->pblocks[i] = &dst->block[i];
522 // STOP_TIMER("update_duplicate_context")
523 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
526 int ff_mpeg_update_thread_context(AVCodecContext *dst,
527 const AVCodecContext *src)
529 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
531 if (dst == src || !s1->context_initialized)
534 // FIXME can parameters change on I-frames?
535 // in that case dst may need a reinit
536 if (!s->context_initialized) {
537 memcpy(s, s1, sizeof(MpegEncContext));
540 s->picture_range_start += MAX_PICTURE_COUNT;
541 s->picture_range_end += MAX_PICTURE_COUNT;
542 s->bitstream_buffer = NULL;
543 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
545 ff_MPV_common_init(s);
548 s->avctx->coded_height = s1->avctx->coded_height;
549 s->avctx->coded_width = s1->avctx->coded_width;
550 s->avctx->width = s1->avctx->width;
551 s->avctx->height = s1->avctx->height;
553 s->coded_picture_number = s1->coded_picture_number;
554 s->picture_number = s1->picture_number;
555 s->input_picture_number = s1->input_picture_number;
557 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
558 memcpy(&s->last_picture, &s1->last_picture,
559 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
561 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
562 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
563 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
565 // Error/bug resilience
566 s->next_p_frame_damaged = s1->next_p_frame_damaged;
567 s->workaround_bugs = s1->workaround_bugs;
570 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
571 (char *) &s1->shape - (char *) &s1->time_increment_bits);
574 s->max_b_frames = s1->max_b_frames;
575 s->low_delay = s1->low_delay;
576 s->dropable = s1->dropable;
578 // DivX handling (doesn't work)
579 s->divx_packed = s1->divx_packed;
581 if (s1->bitstream_buffer) {
582 if (s1->bitstream_buffer_size +
583 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
584 av_fast_malloc(&s->bitstream_buffer,
585 &s->allocated_bitstream_buffer_size,
586 s1->allocated_bitstream_buffer_size);
587 s->bitstream_buffer_size = s1->bitstream_buffer_size;
588 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
589 s1->bitstream_buffer_size);
590 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
591 FF_INPUT_BUFFER_PADDING_SIZE);
594 // MPEG2/interlacing info
595 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
596 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
598 if (!s1->first_field) {
599 s->last_pict_type = s1->pict_type;
600 if (s1->current_picture_ptr)
601 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
603 if (s1->pict_type != AV_PICTURE_TYPE_B) {
604 s->last_non_b_pict_type = s1->pict_type;
612 * Set the given MpegEncContext to common defaults
613 * (same for encoding and decoding).
614 * The changed fields will not depend upon the
615 * prior state of the MpegEncContext.
617 void ff_MPV_common_defaults(MpegEncContext *s)
619 s->y_dc_scale_table =
620 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
621 s->chroma_qscale_table = ff_default_chroma_qscale_table;
622 s->progressive_frame = 1;
623 s->progressive_sequence = 1;
624 s->picture_structure = PICT_FRAME;
626 s->coded_picture_number = 0;
627 s->picture_number = 0;
628 s->input_picture_number = 0;
630 s->picture_in_gop_number = 0;
635 s->picture_range_start = 0;
636 s->picture_range_end = MAX_PICTURE_COUNT;
638 s->slice_context_count = 1;
642 * Set the given MpegEncContext to defaults for decoding.
643 * the changed fields will not depend upon
644 * the prior state of the MpegEncContext.
646 void ff_MPV_decode_defaults(MpegEncContext *s)
648 ff_MPV_common_defaults(s);
652 * init common structure for both encoder and decoder.
653 * this assumes that some variables like width/height are already set
655 av_cold int ff_MPV_common_init(MpegEncContext *s)
657 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
658 int nb_slices = (HAVE_THREADS &&
659 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
660 s->avctx->thread_count : 1;
662 if (s->encoding && s->avctx->slices)
663 nb_slices = s->avctx->slices;
665 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
666 s->mb_height = (s->height + 31) / 32 * 2;
667 else if (s->codec_id != AV_CODEC_ID_H264)
668 s->mb_height = (s->height + 15) / 16;
670 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
671 av_log(s->avctx, AV_LOG_ERROR,
672 "decoding to PIX_FMT_NONE is not supported.\n");
676 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
679 max_slices = FFMIN(MAX_THREADS, s->mb_height);
681 max_slices = MAX_THREADS;
682 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
683 " reducing to %d\n", nb_slices, max_slices);
684 nb_slices = max_slices;
687 if ((s->width || s->height) &&
688 av_image_check_size(s->width, s->height, 0, s->avctx))
691 ff_dct_common_init(s);
693 s->flags = s->avctx->flags;
694 s->flags2 = s->avctx->flags2;
696 if (s->width && s->height) {
697 s->mb_width = (s->width + 15) / 16;
698 s->mb_stride = s->mb_width + 1;
699 s->b8_stride = s->mb_width * 2 + 1;
700 s->b4_stride = s->mb_width * 4 + 1;
701 mb_array_size = s->mb_height * s->mb_stride;
702 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
704 /* set chroma shifts */
705 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
708 /* set default edge pos, will be overriden
709 * in decode_header if needed */
710 s->h_edge_pos = s->mb_width * 16;
711 s->v_edge_pos = s->mb_height * 16;
713 s->mb_num = s->mb_width * s->mb_height;
718 s->block_wrap[3] = s->b8_stride;
720 s->block_wrap[5] = s->mb_stride;
722 y_size = s->b8_stride * (2 * s->mb_height + 1);
723 c_size = s->mb_stride * (s->mb_height + 1);
724 yc_size = y_size + 2 * c_size;
726 /* convert fourcc to upper case */
727 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
729 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
731 s->avctx->coded_frame = &s->current_picture.f;
733 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
734 fail); // error ressilience code looks cleaner with this
735 for (y = 0; y < s->mb_height; y++)
736 for (x = 0; x < s->mb_width; x++)
737 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
739 s->mb_index2xy[s->mb_height * s->mb_width] =
740 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
743 /* Allocate MV tables */
744 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
745 mv_table_size * 2 * sizeof(int16_t), fail);
746 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
747 mv_table_size * 2 * sizeof(int16_t), fail);
748 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
749 mv_table_size * 2 * sizeof(int16_t), fail);
750 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
751 mv_table_size * 2 * sizeof(int16_t), fail);
752 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
753 mv_table_size * 2 * sizeof(int16_t), fail);
754 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
755 mv_table_size * 2 * sizeof(int16_t), fail);
756 s->p_mv_table = s->p_mv_table_base +
758 s->b_forw_mv_table = s->b_forw_mv_table_base +
760 s->b_back_mv_table = s->b_back_mv_table_base +
762 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
764 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
766 s->b_direct_mv_table = s->b_direct_mv_table_base +
769 if (s->msmpeg4_version) {
770 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
771 2 * 2 * (MAX_LEVEL + 1) *
772 (MAX_RUN + 1) * 2 * sizeof(int), fail);
774 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
776 /* Allocate MB type table */
777 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
778 sizeof(uint16_t), fail); // needed for encoding
780 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
783 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
784 64 * 32 * sizeof(int), fail);
785 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
786 64 * 32 * sizeof(int), fail);
787 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
788 64 * 32 * 2 * sizeof(uint16_t), fail);
789 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
790 64 * 32 * 2 * sizeof(uint16_t), fail);
791 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
792 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
793 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
794 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
796 if (s->avctx->noise_reduction) {
797 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
798 2 * 64 * sizeof(uint16_t), fail);
801 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
802 mb_array_size * sizeof(float), fail);
803 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
804 mb_array_size * sizeof(float), fail);
808 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
809 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
810 s->picture_count * sizeof(Picture), fail);
811 for (i = 0; i < s->picture_count; i++) {
812 avcodec_get_frame_defaults(&s->picture[i].f);
815 if (s->width && s->height) {
816 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
817 mb_array_size * sizeof(uint8_t), fail);
818 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
819 mb_array_size * sizeof(uint8_t), fail);
821 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
822 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
823 /* interlaced direct mode decoding tables */
824 for (i = 0; i < 2; i++) {
826 for (j = 0; j < 2; j++) {
827 for (k = 0; k < 2; k++) {
828 FF_ALLOCZ_OR_GOTO(s->avctx,
829 s->b_field_mv_table_base[i][j][k],
830 mv_table_size * 2 * sizeof(int16_t),
832 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
835 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
836 mb_array_size * 2 * sizeof(uint8_t),
838 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
839 mv_table_size * 2 * sizeof(int16_t),
841 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
844 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
845 mb_array_size * 2 * sizeof(uint8_t),
849 if (s->out_format == FMT_H263) {
851 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
852 s->coded_block = s->coded_block_base + s->b8_stride + 1;
854 /* cbp, ac_pred, pred_dir */
855 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
856 mb_array_size * sizeof(uint8_t), fail);
857 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
858 mb_array_size * sizeof(uint8_t), fail);
861 if (s->h263_pred || s->h263_plus || !s->encoding) {
863 // MN: we need these for error resilience of intra-frames
864 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
865 yc_size * sizeof(int16_t), fail);
866 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
867 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
868 s->dc_val[2] = s->dc_val[1] + c_size;
869 for (i = 0; i < yc_size; i++)
870 s->dc_val_base[i] = 1024;
873 /* which mb is a intra block */
874 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
875 memset(s->mbintra_table, 1, mb_array_size);
877 /* init macroblock skip table */
878 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
879 // Note the + 1 is for a quicker mpeg4 slice_end detection
881 s->parse_context.state = -1;
882 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
883 s->avctx->debug_mv) {
884 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
885 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
886 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
887 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
888 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
889 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
893 s->context_initialized = 1;
894 s->thread_context[0] = s;
896 if (s->width && s->height) {
898 for (i = 1; i < nb_slices; i++) {
899 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
900 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
903 for (i = 0; i < nb_slices; i++) {
904 if (init_duplicate_context(s->thread_context[i], s) < 0)
906 s->thread_context[i]->start_mb_y =
907 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
908 s->thread_context[i]->end_mb_y =
909 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
912 if (init_duplicate_context(s, s) < 0)
915 s->end_mb_y = s->mb_height;
917 s->slice_context_count = nb_slices;
922 ff_MPV_common_end(s);
926 /* init common structure for both encoder and decoder */
927 void ff_MPV_common_end(MpegEncContext *s)
931 if (s->slice_context_count > 1) {
932 for (i = 0; i < s->slice_context_count; i++) {
933 free_duplicate_context(s->thread_context[i]);
935 for (i = 1; i < s->slice_context_count; i++) {
936 av_freep(&s->thread_context[i]);
938 s->slice_context_count = 1;
939 } else free_duplicate_context(s);
941 av_freep(&s->parse_context.buffer);
942 s->parse_context.buffer_size = 0;
944 av_freep(&s->mb_type);
945 av_freep(&s->p_mv_table_base);
946 av_freep(&s->b_forw_mv_table_base);
947 av_freep(&s->b_back_mv_table_base);
948 av_freep(&s->b_bidir_forw_mv_table_base);
949 av_freep(&s->b_bidir_back_mv_table_base);
950 av_freep(&s->b_direct_mv_table_base);
951 s->p_mv_table = NULL;
952 s->b_forw_mv_table = NULL;
953 s->b_back_mv_table = NULL;
954 s->b_bidir_forw_mv_table = NULL;
955 s->b_bidir_back_mv_table = NULL;
956 s->b_direct_mv_table = NULL;
957 for (i = 0; i < 2; i++) {
958 for (j = 0; j < 2; j++) {
959 for (k = 0; k < 2; k++) {
960 av_freep(&s->b_field_mv_table_base[i][j][k]);
961 s->b_field_mv_table[i][j][k] = NULL;
963 av_freep(&s->b_field_select_table[i][j]);
964 av_freep(&s->p_field_mv_table_base[i][j]);
965 s->p_field_mv_table[i][j] = NULL;
967 av_freep(&s->p_field_select_table[i]);
970 av_freep(&s->dc_val_base);
971 av_freep(&s->coded_block_base);
972 av_freep(&s->mbintra_table);
973 av_freep(&s->cbp_table);
974 av_freep(&s->pred_dir_table);
976 av_freep(&s->mbskip_table);
977 av_freep(&s->bitstream_buffer);
978 s->allocated_bitstream_buffer_size = 0;
980 av_freep(&s->avctx->stats_out);
981 av_freep(&s->ac_stats);
982 av_freep(&s->error_status_table);
983 av_freep(&s->er_temp_buffer);
984 av_freep(&s->mb_index2xy);
985 av_freep(&s->lambda_table);
986 av_freep(&s->q_intra_matrix);
987 av_freep(&s->q_inter_matrix);
988 av_freep(&s->q_intra_matrix16);
989 av_freep(&s->q_inter_matrix16);
990 av_freep(&s->input_picture);
991 av_freep(&s->reordered_input_picture);
992 av_freep(&s->dct_offset);
993 av_freep(&s->cplx_tab);
994 av_freep(&s->bits_tab);
996 if (s->picture && !s->avctx->internal->is_copy) {
997 for (i = 0; i < s->picture_count; i++) {
998 free_picture(s, &s->picture[i]);
1001 av_freep(&s->picture);
1002 s->context_initialized = 0;
1003 s->last_picture_ptr =
1004 s->next_picture_ptr =
1005 s->current_picture_ptr = NULL;
1006 s->linesize = s->uvlinesize = 0;
1008 for (i = 0; i < 3; i++)
1009 av_freep(&s->visualization_buffer[i]);
1011 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1012 avcodec_default_free_buffers(s->avctx);
1015 void ff_init_rl(RLTable *rl,
1016 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1018 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1019 uint8_t index_run[MAX_RUN + 1];
1020 int last, run, level, start, end, i;
1022 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1023 if (static_store && rl->max_level[0])
1026 /* compute max_level[], max_run[] and index_run[] */
1027 for (last = 0; last < 2; last++) {
1036 memset(max_level, 0, MAX_RUN + 1);
1037 memset(max_run, 0, MAX_LEVEL + 1);
1038 memset(index_run, rl->n, MAX_RUN + 1);
1039 for (i = start; i < end; i++) {
1040 run = rl->table_run[i];
1041 level = rl->table_level[i];
1042 if (index_run[run] == rl->n)
1044 if (level > max_level[run])
1045 max_level[run] = level;
1046 if (run > max_run[level])
1047 max_run[level] = run;
1050 rl->max_level[last] = static_store[last];
1052 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1053 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1055 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1057 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1058 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1060 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1062 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1063 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1067 void ff_init_vlc_rl(RLTable *rl)
1071 for (q = 0; q < 32; q++) {
1073 int qadd = (q - 1) | 1;
1079 for (i = 0; i < rl->vlc.table_size; i++) {
1080 int code = rl->vlc.table[i][0];
1081 int len = rl->vlc.table[i][1];
1084 if (len == 0) { // illegal code
1087 } else if (len < 0) { // more bits needed
1091 if (code == rl->n) { // esc
1095 run = rl->table_run[code] + 1;
1096 level = rl->table_level[code] * qmul + qadd;
1097 if (code >= rl->last) run += 192;
1100 rl->rl_vlc[q][i].len = len;
1101 rl->rl_vlc[q][i].level = level;
1102 rl->rl_vlc[q][i].run = run;
1107 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1111 /* release non reference frames */
1112 for (i = 0; i < s->picture_count; i++) {
1113 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1114 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1115 (remove_current || &s->picture[i] != s->current_picture_ptr)
1116 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1117 free_frame_buffer(s, &s->picture[i]);
1122 int ff_find_unused_picture(MpegEncContext *s, int shared)
1127 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1128 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1132 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1133 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1136 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1137 if (s->picture[i].f.data[0] == NULL)
1142 return AVERROR_INVALIDDATA;
1145 static void update_noise_reduction(MpegEncContext *s)
1149 for (intra = 0; intra < 2; intra++) {
1150 if (s->dct_count[intra] > (1 << 16)) {
1151 for (i = 0; i < 64; i++) {
1152 s->dct_error_sum[intra][i] >>= 1;
1154 s->dct_count[intra] >>= 1;
1157 for (i = 0; i < 64; i++) {
1158 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1159 s->dct_count[intra] +
1160 s->dct_error_sum[intra][i] / 2) /
1161 (s->dct_error_sum[intra][i] + 1);
1167 * generic function for encode/decode called after coding/decoding
1168 * the header and before a frame is coded/decoded.
1170 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1176 assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
1177 s->codec_id == AV_CODEC_ID_SVQ3);
1179 /* mark & release old frames */
1180 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1181 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1182 s->last_picture_ptr != s->next_picture_ptr &&
1183 s->last_picture_ptr->f.data[0]) {
1184 if (s->last_picture_ptr->owner2 == s)
1185 free_frame_buffer(s, s->last_picture_ptr);
1188 /* release forgotten pictures */
1189 /* if (mpeg124/h263) */
1191 for (i = 0; i < s->picture_count; i++) {
1192 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1193 &s->picture[i] != s->last_picture_ptr &&
1194 &s->picture[i] != s->next_picture_ptr &&
1195 s->picture[i].f.reference) {
1196 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1197 av_log(avctx, AV_LOG_ERROR,
1198 "releasing zombie picture\n");
1199 free_frame_buffer(s, &s->picture[i]);
1206 ff_release_unused_pictures(s, 1);
1208 if (s->current_picture_ptr &&
1209 s->current_picture_ptr->f.data[0] == NULL) {
1210 // we already have a unused image
1211 // (maybe it was set before reading the header)
1212 pic = s->current_picture_ptr;
1214 i = ff_find_unused_picture(s, 0);
1215 pic = &s->picture[i];
1218 pic->f.reference = 0;
1220 if (s->codec_id == AV_CODEC_ID_H264)
1221 pic->f.reference = s->picture_structure;
1222 else if (s->pict_type != AV_PICTURE_TYPE_B)
1223 pic->f.reference = 3;
1226 pic->f.coded_picture_number = s->coded_picture_number++;
1228 if (ff_alloc_picture(s, pic, 0) < 0)
1231 s->current_picture_ptr = pic;
1232 // FIXME use only the vars from current_pic
1233 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1234 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1235 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1236 if (s->picture_structure != PICT_FRAME)
1237 s->current_picture_ptr->f.top_field_first =
1238 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1240 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1241 !s->progressive_sequence;
1242 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1245 s->current_picture_ptr->f.pict_type = s->pict_type;
1246 // if (s->flags && CODEC_FLAG_QSCALE)
1247 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1248 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1250 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1252 if (s->pict_type != AV_PICTURE_TYPE_B) {
1253 s->last_picture_ptr = s->next_picture_ptr;
1255 s->next_picture_ptr = s->current_picture_ptr;
1257 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1258 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1259 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1260 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1261 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1262 s->pict_type, s->dropable); */
1264 if (s->codec_id != AV_CODEC_ID_H264) {
1265 if ((s->last_picture_ptr == NULL ||
1266 s->last_picture_ptr->f.data[0] == NULL) &&
1267 (s->pict_type != AV_PICTURE_TYPE_I ||
1268 s->picture_structure != PICT_FRAME)) {
1269 if (s->pict_type != AV_PICTURE_TYPE_I)
1270 av_log(avctx, AV_LOG_ERROR,
1271 "warning: first frame is no keyframe\n");
1272 else if (s->picture_structure != PICT_FRAME)
1273 av_log(avctx, AV_LOG_INFO,
1274 "allocate dummy last picture for field based first keyframe\n");
1276 /* Allocate a dummy frame */
1277 i = ff_find_unused_picture(s, 0);
1278 s->last_picture_ptr = &s->picture[i];
1279 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1280 s->last_picture_ptr = NULL;
1283 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1284 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1285 s->last_picture_ptr->f.reference = 3;
1287 if ((s->next_picture_ptr == NULL ||
1288 s->next_picture_ptr->f.data[0] == NULL) &&
1289 s->pict_type == AV_PICTURE_TYPE_B) {
1290 /* Allocate a dummy frame */
1291 i = ff_find_unused_picture(s, 0);
1292 s->next_picture_ptr = &s->picture[i];
1293 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1294 s->next_picture_ptr = NULL;
1297 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1298 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1299 s->next_picture_ptr->f.reference = 3;
1303 if (s->last_picture_ptr)
1304 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1305 if (s->next_picture_ptr)
1306 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1308 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1309 (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3)) {
1310 if (s->next_picture_ptr)
1311 s->next_picture_ptr->owner2 = s;
1312 if (s->last_picture_ptr)
1313 s->last_picture_ptr->owner2 = s;
1316 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1317 s->last_picture_ptr->f.data[0]));
1319 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1321 for (i = 0; i < 4; i++) {
1322 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1323 s->current_picture.f.data[i] +=
1324 s->current_picture.f.linesize[i];
1326 s->current_picture.f.linesize[i] *= 2;
1327 s->last_picture.f.linesize[i] *= 2;
1328 s->next_picture.f.linesize[i] *= 2;
1332 s->err_recognition = avctx->err_recognition;
1334 /* set dequantizer, we can't do it during init as
1335 * it might change for mpeg4 and we can't do it in the header
1336 * decode as init is not called for mpeg4 there yet */
1337 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1338 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1339 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1340 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1341 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1342 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1344 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1345 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1348 if (s->dct_error_sum) {
1349 assert(s->avctx->noise_reduction && s->encoding);
1350 update_noise_reduction(s);
1353 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1354 return ff_xvmc_field_start(s, avctx);
1359 /* generic function for encode/decode called after a
1360 * frame has been coded/decoded. */
1361 void ff_MPV_frame_end(MpegEncContext *s)
1364 /* redraw edges for the frame if decoding didn't complete */
1365 // just to make sure that all data is rendered.
1366 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1367 ff_xvmc_field_end(s);
1368 } else if ((s->error_count || s->encoding) &&
1369 !s->avctx->hwaccel &&
1370 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1371 s->unrestricted_mv &&
1372 s->current_picture.f.reference &&
1374 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1375 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1376 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1377 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1378 s->h_edge_pos, s->v_edge_pos,
1379 EDGE_WIDTH, EDGE_WIDTH,
1380 EDGE_TOP | EDGE_BOTTOM);
1381 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1382 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1383 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1384 EDGE_TOP | EDGE_BOTTOM);
1385 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1386 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1387 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1388 EDGE_TOP | EDGE_BOTTOM);
1393 s->last_pict_type = s->pict_type;
1394 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1395 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1396 s->last_non_b_pict_type = s->pict_type;
1399 /* copy back current_picture variables */
1400 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1401 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1402 s->picture[i] = s->current_picture;
1406 assert(i < MAX_PICTURE_COUNT);
1410 /* release non-reference frames */
1411 for (i = 0; i < s->picture_count; i++) {
1412 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1413 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1414 free_frame_buffer(s, &s->picture[i]);
1418 // clear copies, to avoid confusion
1420 memset(&s->last_picture, 0, sizeof(Picture));
1421 memset(&s->next_picture, 0, sizeof(Picture));
1422 memset(&s->current_picture, 0, sizeof(Picture));
1424 s->avctx->coded_frame = &s->current_picture_ptr->f;
1426 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1427 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1432 * Draw a line from (ex, ey) -> (sx, sy).
1433 * @param w width of the image
1434 * @param h height of the image
1435 * @param stride stride/linesize of the image
1436 * @param color color of the arrow
1438 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1439 int w, int h, int stride, int color)
1443 sx = av_clip(sx, 0, w - 1);
1444 sy = av_clip(sy, 0, h - 1);
1445 ex = av_clip(ex, 0, w - 1);
1446 ey = av_clip(ey, 0, h - 1);
1448 buf[sy * stride + sx] += color;
1450 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1452 FFSWAP(int, sx, ex);
1453 FFSWAP(int, sy, ey);
1455 buf += sx + sy * stride;
1457 f = ((ey - sy) << 16) / ex;
1458 for (x = 0; x = ex; x++) {
1460 fr = (x * f) & 0xFFFF;
1461 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1462 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1466 FFSWAP(int, sx, ex);
1467 FFSWAP(int, sy, ey);
1469 buf += sx + sy * stride;
1472 f = ((ex - sx) << 16) / ey;
1475 for (y = 0; y = ey; y++) {
1477 fr = (y * f) & 0xFFFF;
1478 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1479 buf[y * stride + x + 1] += (color * fr ) >> 16;
1485 * Draw an arrow from (ex, ey) -> (sx, sy).
1486 * @param w width of the image
1487 * @param h height of the image
1488 * @param stride stride/linesize of the image
1489 * @param color color of the arrow
1491 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1492 int ey, int w, int h, int stride, int color)
1496 sx = av_clip(sx, -100, w + 100);
1497 sy = av_clip(sy, -100, h + 100);
1498 ex = av_clip(ex, -100, w + 100);
1499 ey = av_clip(ey, -100, h + 100);
1504 if (dx * dx + dy * dy > 3 * 3) {
1507 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1509 // FIXME subpixel accuracy
1510 rx = ROUNDED_DIV(rx * 3 << 4, length);
1511 ry = ROUNDED_DIV(ry * 3 << 4, length);
1513 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1514 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1516 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1520 * Print debugging info for the given picture.
1522 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1524 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1527 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1530 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1531 switch (pict->pict_type) {
1532 case AV_PICTURE_TYPE_I:
1533 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1535 case AV_PICTURE_TYPE_P:
1536 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1538 case AV_PICTURE_TYPE_B:
1539 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1541 case AV_PICTURE_TYPE_S:
1542 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1544 case AV_PICTURE_TYPE_SI:
1545 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1547 case AV_PICTURE_TYPE_SP:
1548 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1551 for (y = 0; y < s->mb_height; y++) {
1552 for (x = 0; x < s->mb_width; x++) {
1553 if (s->avctx->debug & FF_DEBUG_SKIP) {
1554 int count = s->mbskip_table[x + y * s->mb_stride];
1557 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1559 if (s->avctx->debug & FF_DEBUG_QP) {
1560 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1561 pict->qscale_table[x + y * s->mb_stride]);
1563 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1564 int mb_type = pict->mb_type[x + y * s->mb_stride];
1565 // Type & MV direction
1566 if (IS_PCM(mb_type))
1567 av_log(s->avctx, AV_LOG_DEBUG, "P");
1568 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1569 av_log(s->avctx, AV_LOG_DEBUG, "A");
1570 else if (IS_INTRA4x4(mb_type))
1571 av_log(s->avctx, AV_LOG_DEBUG, "i");
1572 else if (IS_INTRA16x16(mb_type))
1573 av_log(s->avctx, AV_LOG_DEBUG, "I");
1574 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1575 av_log(s->avctx, AV_LOG_DEBUG, "d");
1576 else if (IS_DIRECT(mb_type))
1577 av_log(s->avctx, AV_LOG_DEBUG, "D");
1578 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1579 av_log(s->avctx, AV_LOG_DEBUG, "g");
1580 else if (IS_GMC(mb_type))
1581 av_log(s->avctx, AV_LOG_DEBUG, "G");
1582 else if (IS_SKIP(mb_type))
1583 av_log(s->avctx, AV_LOG_DEBUG, "S");
1584 else if (!USES_LIST(mb_type, 1))
1585 av_log(s->avctx, AV_LOG_DEBUG, ">");
1586 else if (!USES_LIST(mb_type, 0))
1587 av_log(s->avctx, AV_LOG_DEBUG, "<");
1589 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1590 av_log(s->avctx, AV_LOG_DEBUG, "X");
1594 if (IS_8X8(mb_type))
1595 av_log(s->avctx, AV_LOG_DEBUG, "+");
1596 else if (IS_16X8(mb_type))
1597 av_log(s->avctx, AV_LOG_DEBUG, "-");
1598 else if (IS_8X16(mb_type))
1599 av_log(s->avctx, AV_LOG_DEBUG, "|");
1600 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1601 av_log(s->avctx, AV_LOG_DEBUG, " ");
1603 av_log(s->avctx, AV_LOG_DEBUG, "?");
1606 if (IS_INTERLACED(mb_type))
1607 av_log(s->avctx, AV_LOG_DEBUG, "=");
1609 av_log(s->avctx, AV_LOG_DEBUG, " ");
1611 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1613 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1617 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1618 (s->avctx->debug_mv)) {
1619 const int shift = 1 + s->quarter_sample;
1623 int h_chroma_shift, v_chroma_shift, block_height;
1624 const int width = s->avctx->width;
1625 const int height = s->avctx->height;
1626 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1627 const int mv_stride = (s->mb_width << mv_sample_log2) +
1628 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1629 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1631 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1632 &h_chroma_shift, &v_chroma_shift);
1633 for (i = 0; i < 3; i++) {
1634 memcpy(s->visualization_buffer[i], pict->data[i],
1635 (i == 0) ? pict->linesize[i] * height:
1636 pict->linesize[i] * height >> v_chroma_shift);
1637 pict->data[i] = s->visualization_buffer[i];
1639 pict->type = FF_BUFFER_TYPE_COPY;
1640 ptr = pict->data[0];
1641 block_height = 16 >> v_chroma_shift;
1643 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1645 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1646 const int mb_index = mb_x + mb_y * s->mb_stride;
1647 if ((s->avctx->debug_mv) && pict->motion_val) {
1649 for (type = 0; type < 3; type++) {
1653 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1654 (pict->pict_type!= AV_PICTURE_TYPE_P))
1659 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1660 (pict->pict_type!= AV_PICTURE_TYPE_B))
1665 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1666 (pict->pict_type!= AV_PICTURE_TYPE_B))
1671 if (!USES_LIST(pict->mb_type[mb_index], direction))
1674 if (IS_8X8(pict->mb_type[mb_index])) {
1676 for (i = 0; i < 4; i++) {
1677 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1678 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1679 int xy = (mb_x * 2 + (i & 1) +
1680 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1681 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1682 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1683 draw_arrow(ptr, sx, sy, mx, my, width,
1684 height, s->linesize, 100);
1686 } else if (IS_16X8(pict->mb_type[mb_index])) {
1688 for (i = 0; i < 2; i++) {
1689 int sx = mb_x * 16 + 8;
1690 int sy = mb_y * 16 + 4 + 8 * i;
1691 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1692 int mx = (pict->motion_val[direction][xy][0] >> shift);
1693 int my = (pict->motion_val[direction][xy][1] >> shift);
1695 if (IS_INTERLACED(pict->mb_type[mb_index]))
1698 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1699 height, s->linesize, 100);
1701 } else if (IS_8X16(pict->mb_type[mb_index])) {
1703 for (i = 0; i < 2; i++) {
1704 int sx = mb_x * 16 + 4 + 8 * i;
1705 int sy = mb_y * 16 + 8;
1706 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1707 int mx = pict->motion_val[direction][xy][0] >> shift;
1708 int my = pict->motion_val[direction][xy][1] >> shift;
1710 if (IS_INTERLACED(pict->mb_type[mb_index]))
1713 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1714 height, s->linesize, 100);
1717 int sx = mb_x * 16 + 8;
1718 int sy = mb_y * 16 + 8;
1719 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1720 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1721 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1722 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1726 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1727 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1728 0x0101010101010101ULL;
1730 for (y = 0; y < block_height; y++) {
1731 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1732 (block_height * mb_y + y) *
1733 pict->linesize[1]) = c;
1734 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1735 (block_height * mb_y + y) *
1736 pict->linesize[2]) = c;
1739 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1741 int mb_type = pict->mb_type[mb_index];
1744 #define COLOR(theta, r) \
1745 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1746 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1750 if (IS_PCM(mb_type)) {
1752 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1753 IS_INTRA16x16(mb_type)) {
1755 } else if (IS_INTRA4x4(mb_type)) {
1757 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1759 } else if (IS_DIRECT(mb_type)) {
1761 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1763 } else if (IS_GMC(mb_type)) {
1765 } else if (IS_SKIP(mb_type)) {
1767 } else if (!USES_LIST(mb_type, 1)) {
1769 } else if (!USES_LIST(mb_type, 0)) {
1772 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1776 u *= 0x0101010101010101ULL;
1777 v *= 0x0101010101010101ULL;
1778 for (y = 0; y < block_height; y++) {
1779 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1780 (block_height * mb_y + y) * pict->linesize[1]) = u;
1781 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1782 (block_height * mb_y + y) * pict->linesize[2]) = v;
1786 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1787 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1788 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1789 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1790 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1792 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1793 for (y = 0; y < 16; y++)
1794 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1795 pict->linesize[0]] ^= 0x80;
1797 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1798 int dm = 1 << (mv_sample_log2 - 2);
1799 for (i = 0; i < 4; i++) {
1800 int sx = mb_x * 16 + 8 * (i & 1);
1801 int sy = mb_y * 16 + 8 * (i >> 1);
1802 int xy = (mb_x * 2 + (i & 1) +
1803 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1805 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1806 if (mv[0] != mv[dm] ||
1807 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1808 for (y = 0; y < 8; y++)
1809 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1810 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1811 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1812 pict->linesize[0]) ^= 0x8080808080808080ULL;
1816 if (IS_INTERLACED(mb_type) &&
1817 s->codec_id == AV_CODEC_ID_H264) {
1821 s->mbskip_table[mb_index] = 0;
1828 * find the lowest MB row referenced in the MVs
1830 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1832 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1833 int my, off, i, mvs;
1835 if (s->picture_structure != PICT_FRAME) goto unhandled;
1837 switch (s->mv_type) {
1851 for (i = 0; i < mvs; i++) {
1852 my = s->mv[dir][i][1]<<qpel_shift;
1853 my_max = FFMAX(my_max, my);
1854 my_min = FFMIN(my_min, my);
1857 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1859 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1861 return s->mb_height-1;
1864 /* put block[] to dest[] */
1865 static inline void put_dct(MpegEncContext *s,
1866 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1868 s->dct_unquantize_intra(s, block, i, qscale);
1869 s->dsp.idct_put (dest, line_size, block);
1872 /* add block[] to dest[] */
1873 static inline void add_dct(MpegEncContext *s,
1874 DCTELEM *block, int i, uint8_t *dest, int line_size)
1876 if (s->block_last_index[i] >= 0) {
1877 s->dsp.idct_add (dest, line_size, block);
1881 static inline void add_dequant_dct(MpegEncContext *s,
1882 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1884 if (s->block_last_index[i] >= 0) {
1885 s->dct_unquantize_inter(s, block, i, qscale);
1887 s->dsp.idct_add (dest, line_size, block);
1892 * Clean dc, ac, coded_block for the current non-intra MB.
1894 void ff_clean_intra_table_entries(MpegEncContext *s)
1896 int wrap = s->b8_stride;
1897 int xy = s->block_index[0];
1900 s->dc_val[0][xy + 1 ] =
1901 s->dc_val[0][xy + wrap] =
1902 s->dc_val[0][xy + 1 + wrap] = 1024;
1904 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1905 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1906 if (s->msmpeg4_version>=3) {
1907 s->coded_block[xy ] =
1908 s->coded_block[xy + 1 ] =
1909 s->coded_block[xy + wrap] =
1910 s->coded_block[xy + 1 + wrap] = 0;
1913 wrap = s->mb_stride;
1914 xy = s->mb_x + s->mb_y * wrap;
1916 s->dc_val[2][xy] = 1024;
1918 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1919 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1921 s->mbintra_table[xy]= 0;
1924 /* generic function called after a macroblock has been parsed by the
1925 decoder or after it has been encoded by the encoder.
1927 Important variables used:
1928 s->mb_intra : true if intra macroblock
1929 s->mv_dir : motion vector direction
1930 s->mv_type : motion vector type
1931 s->mv : motion vector
1932 s->interlaced_dct : true if interlaced dct used (mpeg2)
1934 static av_always_inline
1935 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1938 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1939 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1940 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1944 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1945 /* save DCT coefficients */
1947 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
1948 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1950 for(j=0; j<64; j++){
1951 *dct++ = block[i][s->dsp.idct_permutation[j]];
1952 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
1954 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1958 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
1960 /* update DC predictors for P macroblocks */
1962 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1963 if(s->mbintra_table[mb_xy])
1964 ff_clean_intra_table_entries(s);
1968 s->last_dc[2] = 128 << s->intra_dc_precision;
1971 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1972 s->mbintra_table[mb_xy]=1;
1974 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1975 uint8_t *dest_y, *dest_cb, *dest_cr;
1976 int dct_linesize, dct_offset;
1977 op_pixels_func (*op_pix)[4];
1978 qpel_mc_func (*op_qpix)[16];
1979 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
1980 const int uvlinesize = s->current_picture.f.linesize[1];
1981 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
1982 const int block_size = 8;
1984 /* avoid copy if macroblock skipped in last frame too */
1985 /* skip only during decoding as we might trash the buffers during encoding a bit */
1987 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1989 if (s->mb_skipped) {
1991 assert(s->pict_type!=AV_PICTURE_TYPE_I);
1993 } else if(!s->current_picture.f.reference) {
1996 *mbskip_ptr = 0; /* not skipped */
2000 dct_linesize = linesize << s->interlaced_dct;
2001 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2005 dest_cb= s->dest[1];
2006 dest_cr= s->dest[2];
2008 dest_y = s->b_scratchpad;
2009 dest_cb= s->b_scratchpad+16*linesize;
2010 dest_cr= s->b_scratchpad+32*linesize;
2014 /* motion handling */
2015 /* decoding or more than one mb_type (MC was already done otherwise) */
2018 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2019 if (s->mv_dir & MV_DIR_FORWARD) {
2020 ff_thread_await_progress(&s->last_picture_ptr->f,
2021 ff_MPV_lowest_referenced_row(s, 0),
2024 if (s->mv_dir & MV_DIR_BACKWARD) {
2025 ff_thread_await_progress(&s->next_picture_ptr->f,
2026 ff_MPV_lowest_referenced_row(s, 1),
2031 op_qpix= s->me.qpel_put;
2032 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2033 op_pix = s->dsp.put_pixels_tab;
2035 op_pix = s->dsp.put_no_rnd_pixels_tab;
2037 if (s->mv_dir & MV_DIR_FORWARD) {
2038 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2039 op_pix = s->dsp.avg_pixels_tab;
2040 op_qpix= s->me.qpel_avg;
2042 if (s->mv_dir & MV_DIR_BACKWARD) {
2043 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2047 /* skip dequant / idct if we are really late ;) */
2048 if(s->avctx->skip_idct){
2049 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2050 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2051 || s->avctx->skip_idct >= AVDISCARD_ALL)
2055 /* add dct residue */
2056 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2057 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2058 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2059 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2060 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2061 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2063 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2064 if (s->chroma_y_shift){
2065 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2066 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2070 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2071 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2072 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2073 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2076 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2077 add_dct(s, block[0], 0, dest_y , dct_linesize);
2078 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2079 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2080 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2082 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2083 if(s->chroma_y_shift){//Chroma420
2084 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2085 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2088 dct_linesize = uvlinesize << s->interlaced_dct;
2089 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2091 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2092 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2093 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2094 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2095 if(!s->chroma_x_shift){//Chroma444
2096 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2097 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2098 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2099 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2104 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2105 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2108 /* dct only in intra block */
2109 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2110 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2111 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2112 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2113 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2115 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2116 if(s->chroma_y_shift){
2117 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2118 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2122 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2123 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2124 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2125 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2129 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2130 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2131 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2132 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2134 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2135 if(s->chroma_y_shift){
2136 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2137 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2140 dct_linesize = uvlinesize << s->interlaced_dct;
2141 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2143 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2144 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2145 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2146 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2147 if(!s->chroma_x_shift){//Chroma444
2148 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2149 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2150 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2151 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2159 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2160 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2161 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2166 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2168 if(s->out_format == FMT_MPEG1) {
2169 MPV_decode_mb_internal(s, block, 1);
2172 MPV_decode_mb_internal(s, block, 0);
2176 * @param h is the normal height, this will be reduced automatically if needed for the last row
2178 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2179 const int field_pic= s->picture_structure != PICT_FRAME;
2185 if (!s->avctx->hwaccel
2186 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2187 && s->unrestricted_mv
2188 && s->current_picture.f.reference
2190 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2191 int sides = 0, edge_h;
2192 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2193 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2194 if (y==0) sides |= EDGE_TOP;
2195 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2197 edge_h= FFMIN(h, s->v_edge_pos - y);
2199 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2200 s->linesize, s->h_edge_pos, edge_h,
2201 EDGE_WIDTH, EDGE_WIDTH, sides);
2202 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2203 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2204 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2205 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2206 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2207 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2210 h= FFMIN(h, s->avctx->height - y);
2212 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2214 if (s->avctx->draw_horiz_band) {
2216 int offset[AV_NUM_DATA_POINTERS];
2219 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2220 src = &s->current_picture_ptr->f;
2221 else if(s->last_picture_ptr)
2222 src = &s->last_picture_ptr->f;
2226 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2227 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2230 offset[0]= y * s->linesize;
2232 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2233 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2239 s->avctx->draw_horiz_band(s->avctx, src, offset,
2240 y, s->picture_structure, h);
2244 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2245 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2246 const int uvlinesize = s->current_picture.f.linesize[1];
2247 const int mb_size= 4;
2249 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2250 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2251 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2252 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2253 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2254 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2255 //block_index is not used by mpeg2, so it is not affected by chroma_format
2257 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2258 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2259 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2261 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2263 if(s->picture_structure==PICT_FRAME){
2264 s->dest[0] += s->mb_y * linesize << mb_size;
2265 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2266 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2268 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2269 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2270 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2271 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2276 void ff_mpeg_flush(AVCodecContext *avctx){
2278 MpegEncContext *s = avctx->priv_data;
2280 if(s==NULL || s->picture==NULL)
2283 for(i=0; i<s->picture_count; i++){
2284 if (s->picture[i].f.data[0] &&
2285 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2286 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2287 free_frame_buffer(s, &s->picture[i]);
2289 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2291 s->mb_x= s->mb_y= 0;
2293 s->parse_context.state= -1;
2294 s->parse_context.frame_start_found= 0;
2295 s->parse_context.overread= 0;
2296 s->parse_context.overread_index= 0;
2297 s->parse_context.index= 0;
2298 s->parse_context.last_index= 0;
2299 s->bitstream_buffer_size=0;
2303 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2304 DCTELEM *block, int n, int qscale)
2306 int i, level, nCoeffs;
2307 const uint16_t *quant_matrix;
2309 nCoeffs= s->block_last_index[n];
2312 block[0] = block[0] * s->y_dc_scale;
2314 block[0] = block[0] * s->c_dc_scale;
2315 /* XXX: only mpeg1 */
2316 quant_matrix = s->intra_matrix;
2317 for(i=1;i<=nCoeffs;i++) {
2318 int j= s->intra_scantable.permutated[i];
2323 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2324 level = (level - 1) | 1;
2327 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2328 level = (level - 1) | 1;
2335 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2336 DCTELEM *block, int n, int qscale)
2338 int i, level, nCoeffs;
2339 const uint16_t *quant_matrix;
2341 nCoeffs= s->block_last_index[n];
2343 quant_matrix = s->inter_matrix;
2344 for(i=0; i<=nCoeffs; i++) {
2345 int j= s->intra_scantable.permutated[i];
2350 level = (((level << 1) + 1) * qscale *
2351 ((int) (quant_matrix[j]))) >> 4;
2352 level = (level - 1) | 1;
2355 level = (((level << 1) + 1) * qscale *
2356 ((int) (quant_matrix[j]))) >> 4;
2357 level = (level - 1) | 1;
2364 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2365 DCTELEM *block, int n, int qscale)
2367 int i, level, nCoeffs;
2368 const uint16_t *quant_matrix;
2370 if(s->alternate_scan) nCoeffs= 63;
2371 else nCoeffs= s->block_last_index[n];
2374 block[0] = block[0] * s->y_dc_scale;
2376 block[0] = block[0] * s->c_dc_scale;
2377 quant_matrix = s->intra_matrix;
2378 for(i=1;i<=nCoeffs;i++) {
2379 int j= s->intra_scantable.permutated[i];
2384 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2387 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2394 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2395 DCTELEM *block, int n, int qscale)
2397 int i, level, nCoeffs;
2398 const uint16_t *quant_matrix;
2401 if(s->alternate_scan) nCoeffs= 63;
2402 else nCoeffs= s->block_last_index[n];
2405 block[0] = block[0] * s->y_dc_scale;
2407 block[0] = block[0] * s->c_dc_scale;
2408 quant_matrix = s->intra_matrix;
2409 for(i=1;i<=nCoeffs;i++) {
2410 int j= s->intra_scantable.permutated[i];
2415 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2418 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2427 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2428 DCTELEM *block, int n, int qscale)
2430 int i, level, nCoeffs;
2431 const uint16_t *quant_matrix;
2434 if(s->alternate_scan) nCoeffs= 63;
2435 else nCoeffs= s->block_last_index[n];
2437 quant_matrix = s->inter_matrix;
2438 for(i=0; i<=nCoeffs; i++) {
2439 int j= s->intra_scantable.permutated[i];
2444 level = (((level << 1) + 1) * qscale *
2445 ((int) (quant_matrix[j]))) >> 4;
2448 level = (((level << 1) + 1) * qscale *
2449 ((int) (quant_matrix[j]))) >> 4;
2458 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2459 DCTELEM *block, int n, int qscale)
2461 int i, level, qmul, qadd;
2464 assert(s->block_last_index[n]>=0);
2470 block[0] = block[0] * s->y_dc_scale;
2472 block[0] = block[0] * s->c_dc_scale;
2473 qadd = (qscale - 1) | 1;
2480 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2482 for(i=1; i<=nCoeffs; i++) {
2486 level = level * qmul - qadd;
2488 level = level * qmul + qadd;
2495 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2496 DCTELEM *block, int n, int qscale)
2498 int i, level, qmul, qadd;
2501 assert(s->block_last_index[n]>=0);
2503 qadd = (qscale - 1) | 1;
2506 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2508 for(i=0; i<=nCoeffs; i++) {
2512 level = level * qmul - qadd;
2514 level = level * qmul + qadd;
2522 * set qscale and update qscale dependent variables.
2524 void ff_set_qscale(MpegEncContext * s, int qscale)
2528 else if (qscale > 31)
2532 s->chroma_qscale= s->chroma_qscale_table[qscale];
2534 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2535 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2538 void ff_MPV_report_decode_progress(MpegEncContext *s)
2540 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2541 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);