2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 ff_mpeg1_dc_scale_table,
125 mpeg2_dc_scale_table1,
126 mpeg2_dc_scale_table2,
127 mpeg2_dc_scale_table3,
130 const enum PixelFormat ff_pixfmt_list_420[] = {
135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
145 uint32_t * restrict state)
153 for (i = 0; i < 3; i++) {
154 uint32_t tmp = *state << 8;
155 *state = tmp + *(p++);
156 if (tmp == 0x100 || p == end)
161 if (p[-1] > 1 ) p += 3;
162 else if (p[-2] ) p += 2;
163 else if (p[-3]|(p[-1]-1)) p++;
170 p = FFMIN(p, end) - 4;
176 /* init common dct for both encoder and decoder */
177 av_cold int ff_dct_common_init(MpegEncContext *s)
179 ff_dsputil_init(&s->dsp, s->avctx);
181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
186 if (s->flags & CODEC_FLAG_BITEXACT)
187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
191 ff_MPV_common_init_mmx(s);
193 ff_MPV_common_init_axp(s);
195 ff_MPV_common_init_mmi(s);
197 ff_MPV_common_init_arm(s);
199 ff_MPV_common_init_altivec(s);
201 ff_MPV_common_init_bfin(s);
204 /* load & permutate scantables
205 * note: only wmv uses different ones
207 if (s->alternate_scan) {
208 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
209 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
212 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
215 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
220 void ff_copy_picture(Picture *dst, Picture *src)
223 dst->f.type = FF_BUFFER_TYPE_COPY;
227 * Release a frame buffer
229 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
231 /* Windows Media Image codecs allocate internal buffers with different
232 * dimensions; ignore user defined callbacks for these
234 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
235 ff_thread_release_buffer(s->avctx, &pic->f);
237 avcodec_default_release_buffer(s->avctx, &pic->f);
238 av_freep(&pic->f.hwaccel_picture_private);
242 * Allocate a frame buffer
244 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
248 if (s->avctx->hwaccel) {
249 assert(!pic->f.hwaccel_picture_private);
250 if (s->avctx->hwaccel->priv_data_size) {
251 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
252 if (!pic->f.hwaccel_picture_private) {
253 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
259 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
260 r = ff_thread_get_buffer(s->avctx, &pic->f);
262 r = avcodec_default_get_buffer(s->avctx, &pic->f);
264 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
265 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
266 r, pic->f.type, pic->f.data[0]);
267 av_freep(&pic->f.hwaccel_picture_private);
271 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
272 s->uvlinesize != pic->f.linesize[1])) {
273 av_log(s->avctx, AV_LOG_ERROR,
274 "get_buffer() failed (stride changed)\n");
275 free_frame_buffer(s, pic);
279 if (pic->f.linesize[1] != pic->f.linesize[2]) {
280 av_log(s->avctx, AV_LOG_ERROR,
281 "get_buffer() failed (uv stride mismatch)\n");
282 free_frame_buffer(s, pic);
290 * Allocate a Picture.
291 * The pixels are allocated/set by calling get_buffer() if shared = 0
293 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
295 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
297 // the + 1 is needed so memset(,,stride*height) does not sig11
299 const int mb_array_size = s->mb_stride * s->mb_height;
300 const int b8_array_size = s->b8_stride * s->mb_height * 2;
301 const int b4_array_size = s->b4_stride * s->mb_height * 4;
306 assert(pic->f.data[0]);
307 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
308 pic->f.type = FF_BUFFER_TYPE_SHARED;
310 assert(!pic->f.data[0]);
312 if (alloc_frame_buffer(s, pic) < 0)
315 s->linesize = pic->f.linesize[0];
316 s->uvlinesize = pic->f.linesize[1];
319 if (pic->f.qscale_table == NULL) {
321 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
322 mb_array_size * sizeof(int16_t), fail)
323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
324 mb_array_size * sizeof(int16_t), fail)
325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
326 mb_array_size * sizeof(int8_t ), fail)
329 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
330 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
332 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
334 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
335 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
337 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
338 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
339 if (s->out_format == FMT_H264) {
340 for (i = 0; i < 2; i++) {
341 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
342 2 * (b4_array_size + 4) * sizeof(int16_t),
344 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
345 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
346 4 * mb_array_size * sizeof(uint8_t), fail)
348 pic->f.motion_subsample_log2 = 2;
349 } else if (s->out_format == FMT_H263 || s->encoding ||
350 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
351 for (i = 0; i < 2; i++) {
352 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
353 2 * (b8_array_size + 4) * sizeof(int16_t),
355 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
356 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
357 4 * mb_array_size * sizeof(uint8_t), fail)
359 pic->f.motion_subsample_log2 = 3;
361 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
362 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
363 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
365 pic->f.qstride = s->mb_stride;
366 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
367 1 * sizeof(AVPanScan), fail)
373 fail: // for the FF_ALLOCZ_OR_GOTO macro
375 free_frame_buffer(s, pic);
380 * Deallocate a picture.
382 static void free_picture(MpegEncContext *s, Picture *pic)
386 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
387 free_frame_buffer(s, pic);
390 av_freep(&pic->mb_var);
391 av_freep(&pic->mc_mb_var);
392 av_freep(&pic->mb_mean);
393 av_freep(&pic->f.mbskip_table);
394 av_freep(&pic->qscale_table_base);
395 av_freep(&pic->mb_type_base);
396 av_freep(&pic->f.dct_coeff);
397 av_freep(&pic->f.pan_scan);
398 pic->f.mb_type = NULL;
399 for (i = 0; i < 2; i++) {
400 av_freep(&pic->motion_val_base[i]);
401 av_freep(&pic->f.ref_index[i]);
404 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
405 for (i = 0; i < 4; i++) {
407 pic->f.data[i] = NULL;
413 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
415 int y_size = s->b8_stride * (2 * s->mb_height + 1);
416 int c_size = s->mb_stride * (s->mb_height + 1);
417 int yc_size = y_size + 2 * c_size;
420 // edge emu needs blocksize + filter length - 1
421 // (= 17x17 for halfpel / 21x21 for h264)
422 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
423 (s->width + 95) * 2 * 21 * 4, fail); // (width + edge + align)*interlaced*MBsize*tolerance
425 // FIXME should be linesize instead of s->width * 2
426 // but that is not known before get_buffer()
427 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
428 (s->width + 95) * 4 * 16 * 2 * sizeof(uint8_t), fail)
429 s->me.temp = s->me.scratchpad;
430 s->rd_scratchpad = s->me.scratchpad;
431 s->b_scratchpad = s->me.scratchpad;
432 s->obmc_scratchpad = s->me.scratchpad + 16;
434 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
435 ME_MAP_SIZE * sizeof(uint32_t), fail)
436 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
437 ME_MAP_SIZE * sizeof(uint32_t), fail)
438 if (s->avctx->noise_reduction) {
439 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
440 2 * 64 * sizeof(int), fail)
443 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
444 s->block = s->blocks[0];
446 for (i = 0; i < 12; i++) {
447 s->pblocks[i] = &s->block[i];
450 if (s->out_format == FMT_H263) {
452 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
453 yc_size * sizeof(int16_t) * 16, fail);
454 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
455 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
456 s->ac_val[2] = s->ac_val[1] + c_size;
461 return -1; // free() through ff_MPV_common_end()
464 static void free_duplicate_context(MpegEncContext *s)
469 av_freep(&s->edge_emu_buffer);
470 av_freep(&s->me.scratchpad);
474 s->obmc_scratchpad = NULL;
476 av_freep(&s->dct_error_sum);
477 av_freep(&s->me.map);
478 av_freep(&s->me.score_map);
479 av_freep(&s->blocks);
480 av_freep(&s->ac_val_base);
484 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
486 #define COPY(a) bak->a = src->a
487 COPY(edge_emu_buffer);
492 COPY(obmc_scratchpad);
499 COPY(me.map_generation);
511 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
515 // FIXME copy only needed parts
517 backup_duplicate_context(&bak, dst);
518 memcpy(dst, src, sizeof(MpegEncContext));
519 backup_duplicate_context(dst, &bak);
520 for (i = 0; i < 12; i++) {
521 dst->pblocks[i] = &dst->block[i];
523 // STOP_TIMER("update_duplicate_context")
524 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
527 int ff_mpeg_update_thread_context(AVCodecContext *dst,
528 const AVCodecContext *src)
530 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
535 // FIXME can parameters change on I-frames?
536 // in that case dst may need a reinit
537 if (!s->context_initialized) {
538 memcpy(s, s1, sizeof(MpegEncContext));
541 s->bitstream_buffer = NULL;
542 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
544 if (s1->context_initialized){
545 s->picture_range_start += MAX_PICTURE_COUNT;
546 s->picture_range_end += MAX_PICTURE_COUNT;
547 ff_MPV_common_init(s);
551 s->avctx->coded_height = s1->avctx->coded_height;
552 s->avctx->coded_width = s1->avctx->coded_width;
553 s->avctx->width = s1->avctx->width;
554 s->avctx->height = s1->avctx->height;
556 s->coded_picture_number = s1->coded_picture_number;
557 s->picture_number = s1->picture_number;
558 s->input_picture_number = s1->input_picture_number;
560 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
561 memcpy(&s->last_picture, &s1->last_picture,
562 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
564 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
565 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
566 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
568 // Error/bug resilience
569 s->next_p_frame_damaged = s1->next_p_frame_damaged;
570 s->workaround_bugs = s1->workaround_bugs;
571 s->padding_bug_score = s1->padding_bug_score;
574 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
575 (char *) &s1->shape - (char *) &s1->time_increment_bits);
578 s->max_b_frames = s1->max_b_frames;
579 s->low_delay = s1->low_delay;
580 s->dropable = s1->dropable;
582 // DivX handling (doesn't work)
583 s->divx_packed = s1->divx_packed;
585 if (s1->bitstream_buffer) {
586 if (s1->bitstream_buffer_size +
587 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
588 av_fast_malloc(&s->bitstream_buffer,
589 &s->allocated_bitstream_buffer_size,
590 s1->allocated_bitstream_buffer_size);
591 s->bitstream_buffer_size = s1->bitstream_buffer_size;
592 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
593 s1->bitstream_buffer_size);
594 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
595 FF_INPUT_BUFFER_PADDING_SIZE);
598 // MPEG2/interlacing info
599 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
600 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
602 if (!s1->first_field) {
603 s->last_pict_type = s1->pict_type;
604 if (s1->current_picture_ptr)
605 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
607 if (s1->pict_type != AV_PICTURE_TYPE_B) {
608 s->last_non_b_pict_type = s1->pict_type;
616 * Set the given MpegEncContext to common defaults
617 * (same for encoding and decoding).
618 * The changed fields will not depend upon the
619 * prior state of the MpegEncContext.
621 void ff_MPV_common_defaults(MpegEncContext *s)
623 s->y_dc_scale_table =
624 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
625 s->chroma_qscale_table = ff_default_chroma_qscale_table;
626 s->progressive_frame = 1;
627 s->progressive_sequence = 1;
628 s->picture_structure = PICT_FRAME;
630 s->coded_picture_number = 0;
631 s->picture_number = 0;
632 s->input_picture_number = 0;
634 s->picture_in_gop_number = 0;
639 s->picture_range_start = 0;
640 s->picture_range_end = MAX_PICTURE_COUNT;
642 s->slice_context_count = 1;
646 * Set the given MpegEncContext to defaults for decoding.
647 * the changed fields will not depend upon
648 * the prior state of the MpegEncContext.
650 void ff_MPV_decode_defaults(MpegEncContext *s)
652 ff_MPV_common_defaults(s);
656 * init common structure for both encoder and decoder.
657 * this assumes that some variables like width/height are already set
659 av_cold int ff_MPV_common_init(MpegEncContext *s)
661 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
662 int nb_slices = (HAVE_THREADS &&
663 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
664 s->avctx->thread_count : 1;
666 if (s->encoding && s->avctx->slices)
667 nb_slices = s->avctx->slices;
669 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
670 s->mb_height = (s->height + 31) / 32 * 2;
671 else if (s->codec_id != CODEC_ID_H264)
672 s->mb_height = (s->height + 15) / 16;
674 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
675 av_log(s->avctx, AV_LOG_ERROR,
676 "decoding to PIX_FMT_NONE is not supported.\n");
680 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
683 max_slices = FFMIN(MAX_THREADS, s->mb_height);
685 max_slices = MAX_THREADS;
686 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
687 " reducing to %d\n", nb_slices, max_slices);
688 nb_slices = max_slices;
691 if ((s->width || s->height) &&
692 av_image_check_size(s->width, s->height, 0, s->avctx))
695 ff_dct_common_init(s);
697 s->flags = s->avctx->flags;
698 s->flags2 = s->avctx->flags2;
700 s->mb_width = (s->width + 15) / 16;
701 s->mb_stride = s->mb_width + 1;
702 s->b8_stride = s->mb_width * 2 + 1;
703 s->b4_stride = s->mb_width * 4 + 1;
704 mb_array_size = s->mb_height * s->mb_stride;
705 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
707 /* set chroma shifts */
708 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
711 /* set default edge pos, will be overriden in decode_header if needed */
712 s->h_edge_pos = s->mb_width * 16;
713 s->v_edge_pos = s->mb_height * 16;
715 s->mb_num = s->mb_width * s->mb_height;
720 s->block_wrap[3] = s->b8_stride;
722 s->block_wrap[5] = s->mb_stride;
724 y_size = s->b8_stride * (2 * s->mb_height + 1);
725 c_size = s->mb_stride * (s->mb_height + 1);
726 yc_size = y_size + 2 * c_size;
728 /* convert fourcc to upper case */
729 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
730 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
732 s->avctx->coded_frame = &s->current_picture.f;
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
735 for (y = 0; y < s->mb_height; y++)
736 for (x = 0; x < s->mb_width; x++)
737 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
739 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
742 /* Allocate MV tables */
743 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
744 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
745 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
746 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
747 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
748 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
749 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
750 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
751 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
752 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
753 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
754 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
756 if(s->msmpeg4_version){
757 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
759 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
761 /* Allocate MB type table */
762 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
764 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
766 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
767 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix , 64*32 * sizeof(int), fail)
768 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
769 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
770 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
771 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
772 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
773 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
775 if(s->avctx->noise_reduction){
776 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
780 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
781 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
782 s->picture_count * sizeof(Picture), fail);
783 for (i = 0; i < s->picture_count; i++) {
784 avcodec_get_frame_defaults(&s->picture[i].f);
787 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
789 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
790 /* interlaced direct mode decoding tables */
791 for (i = 0; i < 2; i++) {
793 for (j = 0; j < 2; j++) {
794 for (k = 0; k < 2; k++) {
795 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
796 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
798 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
799 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
800 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
802 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
805 if (s->out_format == FMT_H263) {
807 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
808 s->coded_block = s->coded_block_base + s->b8_stride + 1;
810 /* cbp, ac_pred, pred_dir */
811 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
812 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
815 if (s->h263_pred || s->h263_plus || !s->encoding) {
817 // MN: we need these for error resilience of intra-frames
818 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
819 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
820 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
821 s->dc_val[2] = s->dc_val[1] + c_size;
822 for (i = 0; i < yc_size; i++)
823 s->dc_val_base[i] = 1024;
826 /* which mb is a intra block */
827 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
828 memset(s->mbintra_table, 1, mb_array_size);
830 /* init macroblock skip table */
831 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
832 // Note the + 1 is for a quicker mpeg4 slice_end detection
834 s->parse_context.state = -1;
836 s->context_initialized = 1;
837 s->thread_context[0] = s;
839 // if (s->width && s->height) {
841 for (i = 1; i < nb_slices; i++) {
842 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
843 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
846 for (i = 0; i < nb_slices; i++) {
847 if (init_duplicate_context(s->thread_context[i], s) < 0)
849 s->thread_context[i]->start_mb_y =
850 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
851 s->thread_context[i]->end_mb_y =
852 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
855 if (init_duplicate_context(s, s) < 0)
858 s->end_mb_y = s->mb_height;
860 s->slice_context_count = nb_slices;
865 ff_MPV_common_end(s);
869 /* init common structure for both encoder and decoder */
870 void ff_MPV_common_end(MpegEncContext *s)
874 if (s->slice_context_count > 1) {
875 for (i = 0; i < s->slice_context_count; i++) {
876 free_duplicate_context(s->thread_context[i]);
878 for (i = 1; i < s->slice_context_count; i++) {
879 av_freep(&s->thread_context[i]);
881 s->slice_context_count = 1;
882 } else free_duplicate_context(s);
884 av_freep(&s->parse_context.buffer);
885 s->parse_context.buffer_size = 0;
887 av_freep(&s->mb_type);
888 av_freep(&s->p_mv_table_base);
889 av_freep(&s->b_forw_mv_table_base);
890 av_freep(&s->b_back_mv_table_base);
891 av_freep(&s->b_bidir_forw_mv_table_base);
892 av_freep(&s->b_bidir_back_mv_table_base);
893 av_freep(&s->b_direct_mv_table_base);
894 s->p_mv_table = NULL;
895 s->b_forw_mv_table = NULL;
896 s->b_back_mv_table = NULL;
897 s->b_bidir_forw_mv_table = NULL;
898 s->b_bidir_back_mv_table = NULL;
899 s->b_direct_mv_table = NULL;
900 for (i = 0; i < 2; i++) {
901 for (j = 0; j < 2; j++) {
902 for (k = 0; k < 2; k++) {
903 av_freep(&s->b_field_mv_table_base[i][j][k]);
904 s->b_field_mv_table[i][j][k] = NULL;
906 av_freep(&s->b_field_select_table[i][j]);
907 av_freep(&s->p_field_mv_table_base[i][j]);
908 s->p_field_mv_table[i][j] = NULL;
910 av_freep(&s->p_field_select_table[i]);
913 av_freep(&s->dc_val_base);
914 av_freep(&s->coded_block_base);
915 av_freep(&s->mbintra_table);
916 av_freep(&s->cbp_table);
917 av_freep(&s->pred_dir_table);
919 av_freep(&s->mbskip_table);
920 av_freep(&s->bitstream_buffer);
921 s->allocated_bitstream_buffer_size = 0;
923 av_freep(&s->avctx->stats_out);
924 av_freep(&s->ac_stats);
925 av_freep(&s->error_status_table);
926 av_freep(&s->mb_index2xy);
927 av_freep(&s->lambda_table);
928 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
929 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
930 s->q_chroma_intra_matrix= NULL;
931 s->q_chroma_intra_matrix16= NULL;
932 av_freep(&s->q_intra_matrix);
933 av_freep(&s->q_inter_matrix);
934 av_freep(&s->q_intra_matrix16);
935 av_freep(&s->q_inter_matrix16);
936 av_freep(&s->input_picture);
937 av_freep(&s->reordered_input_picture);
938 av_freep(&s->dct_offset);
940 if (s->picture && !s->avctx->internal->is_copy) {
941 for (i = 0; i < s->picture_count; i++) {
942 free_picture(s, &s->picture[i]);
945 av_freep(&s->picture);
946 s->context_initialized = 0;
947 s->last_picture_ptr =
948 s->next_picture_ptr =
949 s->current_picture_ptr = NULL;
950 s->linesize = s->uvlinesize = 0;
952 for (i = 0; i < 3; i++)
953 av_freep(&s->visualization_buffer[i]);
955 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
956 avcodec_default_free_buffers(s->avctx);
959 void ff_init_rl(RLTable *rl,
960 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
962 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
963 uint8_t index_run[MAX_RUN + 1];
964 int last, run, level, start, end, i;
966 /* If table is static, we can quit if rl->max_level[0] is not NULL */
967 if (static_store && rl->max_level[0])
970 /* compute max_level[], max_run[] and index_run[] */
971 for (last = 0; last < 2; last++) {
980 memset(max_level, 0, MAX_RUN + 1);
981 memset(max_run, 0, MAX_LEVEL + 1);
982 memset(index_run, rl->n, MAX_RUN + 1);
983 for (i = start; i < end; i++) {
984 run = rl->table_run[i];
985 level = rl->table_level[i];
986 if (index_run[run] == rl->n)
988 if (level > max_level[run])
989 max_level[run] = level;
990 if (run > max_run[level])
991 max_run[level] = run;
994 rl->max_level[last] = static_store[last];
996 rl->max_level[last] = av_malloc(MAX_RUN + 1);
997 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
999 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1001 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1002 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1004 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1006 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1007 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1011 void ff_init_vlc_rl(RLTable *rl)
1015 for (q = 0; q < 32; q++) {
1017 int qadd = (q - 1) | 1;
1023 for (i = 0; i < rl->vlc.table_size; i++) {
1024 int code = rl->vlc.table[i][0];
1025 int len = rl->vlc.table[i][1];
1028 if (len == 0) { // illegal code
1031 } else if (len < 0) { // more bits needed
1035 if (code == rl->n) { // esc
1039 run = rl->table_run[code] + 1;
1040 level = rl->table_level[code] * qmul + qadd;
1041 if (code >= rl->last) run += 192;
1044 rl->rl_vlc[q][i].len = len;
1045 rl->rl_vlc[q][i].level = level;
1046 rl->rl_vlc[q][i].run = run;
1051 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1055 /* release non reference frames */
1056 for (i = 0; i < s->picture_count; i++) {
1057 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1058 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1059 (remove_current || &s->picture[i] != s->current_picture_ptr)
1060 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1061 free_frame_buffer(s, &s->picture[i]);
1066 int ff_find_unused_picture(MpegEncContext *s, int shared)
1071 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1072 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1076 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1077 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1080 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1081 if (s->picture[i].f.data[0] == NULL)
1086 av_log(s->avctx, AV_LOG_FATAL,
1087 "Internal error, picture buffer overflow\n");
1088 /* We could return -1, but the codec would crash trying to draw into a
1089 * non-existing frame anyway. This is safer than waiting for a random crash.
1090 * Also the return of this is never useful, an encoder must only allocate
1091 * as much as allowed in the specification. This has no relationship to how
1092 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1093 * enough for such valid streams).
1094 * Plus, a decoder has to check stream validity and remove frames if too
1095 * many reference frames are around. Waiting for "OOM" is not correct at
1096 * all. Similarly, missing reference frames have to be replaced by
1097 * interpolated/MC frames, anything else is a bug in the codec ...
1103 static void update_noise_reduction(MpegEncContext *s)
1107 for (intra = 0; intra < 2; intra++) {
1108 if (s->dct_count[intra] > (1 << 16)) {
1109 for (i = 0; i < 64; i++) {
1110 s->dct_error_sum[intra][i] >>= 1;
1112 s->dct_count[intra] >>= 1;
1115 for (i = 0; i < 64; i++) {
1116 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1117 s->dct_count[intra] +
1118 s->dct_error_sum[intra][i] / 2) /
1119 (s->dct_error_sum[intra][i] + 1);
1125 * generic function for encode/decode called after coding/decoding
1126 * the header and before a frame is coded/decoded.
1128 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1134 assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
1135 s->codec_id == CODEC_ID_SVQ3);
1137 if (!ff_thread_can_start_frame(avctx)) {
1138 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1142 /* mark & release old frames */
1143 if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
1144 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1145 s->last_picture_ptr != s->next_picture_ptr &&
1146 s->last_picture_ptr->f.data[0]) {
1147 if (s->last_picture_ptr->owner2 == s)
1148 free_frame_buffer(s, s->last_picture_ptr);
1151 /* release forgotten pictures */
1152 /* if (mpeg124/h263) */
1154 for (i = 0; i < s->picture_count; i++) {
1155 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1156 &s->picture[i] != s->last_picture_ptr &&
1157 &s->picture[i] != s->next_picture_ptr &&
1158 s->picture[i].f.reference) {
1159 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1160 av_log(avctx, AV_LOG_ERROR,
1161 "releasing zombie picture\n");
1162 free_frame_buffer(s, &s->picture[i]);
1169 ff_release_unused_pictures(s, 1);
1171 if (s->current_picture_ptr &&
1172 s->current_picture_ptr->f.data[0] == NULL) {
1173 // we already have a unused image
1174 // (maybe it was set before reading the header)
1175 pic = s->current_picture_ptr;
1177 i = ff_find_unused_picture(s, 0);
1180 pic = &s->picture[i];
1183 pic->f.reference = 0;
1185 if (s->codec_id == CODEC_ID_H264)
1186 pic->f.reference = s->picture_structure;
1187 else if (s->pict_type != AV_PICTURE_TYPE_B)
1188 pic->f.reference = 3;
1191 pic->f.coded_picture_number = s->coded_picture_number++;
1193 if (ff_alloc_picture(s, pic, 0) < 0)
1196 s->current_picture_ptr = pic;
1197 // FIXME use only the vars from current_pic
1198 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1199 if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
1200 s->codec_id == CODEC_ID_MPEG2VIDEO) {
1201 if (s->picture_structure != PICT_FRAME)
1202 s->current_picture_ptr->f.top_field_first =
1203 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1205 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1206 !s->progressive_sequence;
1207 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1210 s->current_picture_ptr->f.pict_type = s->pict_type;
1211 // if (s->flags && CODEC_FLAG_QSCALE)
1212 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1213 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1215 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1217 if (s->pict_type != AV_PICTURE_TYPE_B) {
1218 s->last_picture_ptr = s->next_picture_ptr;
1220 s->next_picture_ptr = s->current_picture_ptr;
1222 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1223 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1224 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1225 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1226 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1227 s->pict_type, s->dropable); */
1229 if (s->codec_id != CODEC_ID_H264) {
1230 if ((s->last_picture_ptr == NULL ||
1231 s->last_picture_ptr->f.data[0] == NULL) &&
1232 (s->pict_type != AV_PICTURE_TYPE_I ||
1233 s->picture_structure != PICT_FRAME)) {
1234 if (s->pict_type != AV_PICTURE_TYPE_I)
1235 av_log(avctx, AV_LOG_ERROR,
1236 "warning: first frame is no keyframe\n");
1237 else if (s->picture_structure != PICT_FRAME)
1238 av_log(avctx, AV_LOG_INFO,
1239 "allocate dummy last picture for field based first keyframe\n");
1241 /* Allocate a dummy frame */
1242 i = ff_find_unused_picture(s, 0);
1245 s->last_picture_ptr = &s->picture[i];
1246 s->last_picture_ptr->f.key_frame = 0;
1247 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1248 s->last_picture_ptr = NULL;
1252 if(s->codec_id == CODEC_ID_FLV1 || s->codec_id == CODEC_ID_H263){
1253 for(i=0; i<avctx->height; i++)
1254 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1257 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1258 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1259 s->last_picture_ptr->f.reference = 3;
1261 if ((s->next_picture_ptr == NULL ||
1262 s->next_picture_ptr->f.data[0] == NULL) &&
1263 s->pict_type == AV_PICTURE_TYPE_B) {
1264 /* Allocate a dummy frame */
1265 i = ff_find_unused_picture(s, 0);
1268 s->next_picture_ptr = &s->picture[i];
1269 s->next_picture_ptr->f.key_frame = 0;
1270 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1271 s->next_picture_ptr = NULL;
1274 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1275 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1276 s->next_picture_ptr->f.reference = 3;
1280 if (s->last_picture_ptr)
1281 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1282 if (s->next_picture_ptr)
1283 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1285 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1286 (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
1287 if (s->next_picture_ptr)
1288 s->next_picture_ptr->owner2 = s;
1289 if (s->last_picture_ptr)
1290 s->last_picture_ptr->owner2 = s;
1293 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1294 s->last_picture_ptr->f.data[0]));
1296 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1298 for (i = 0; i < 4; i++) {
1299 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1300 s->current_picture.f.data[i] +=
1301 s->current_picture.f.linesize[i];
1303 s->current_picture.f.linesize[i] *= 2;
1304 s->last_picture.f.linesize[i] *= 2;
1305 s->next_picture.f.linesize[i] *= 2;
1309 s->err_recognition = avctx->err_recognition;
1311 /* set dequantizer, we can't do it during init as
1312 * it might change for mpeg4 and we can't do it in the header
1313 * decode as init is not called for mpeg4 there yet */
1314 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1315 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1316 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1317 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1318 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1319 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1321 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1322 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1325 if (s->dct_error_sum) {
1326 assert(s->avctx->noise_reduction && s->encoding);
1327 update_noise_reduction(s);
1330 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1331 return ff_xvmc_field_start(s, avctx);
1336 /* generic function for encode/decode called after a
1337 * frame has been coded/decoded. */
1338 void ff_MPV_frame_end(MpegEncContext *s)
1341 /* redraw edges for the frame if decoding didn't complete */
1342 // just to make sure that all data is rendered.
1343 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1344 ff_xvmc_field_end(s);
1345 } else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1346 !s->avctx->hwaccel &&
1347 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1348 s->unrestricted_mv &&
1349 s->current_picture.f.reference &&
1351 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1352 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1353 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1354 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1355 s->h_edge_pos, s->v_edge_pos,
1356 EDGE_WIDTH, EDGE_WIDTH,
1357 EDGE_TOP | EDGE_BOTTOM);
1358 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1359 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1360 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1361 EDGE_TOP | EDGE_BOTTOM);
1362 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1363 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1364 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1365 EDGE_TOP | EDGE_BOTTOM);
1370 s->last_pict_type = s->pict_type;
1371 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1372 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1373 s->last_non_b_pict_type = s->pict_type;
1376 /* copy back current_picture variables */
1377 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1378 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1379 s->picture[i] = s->current_picture;
1383 assert(i < MAX_PICTURE_COUNT);
1387 /* release non-reference frames */
1388 for (i = 0; i < s->picture_count; i++) {
1389 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1390 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1391 free_frame_buffer(s, &s->picture[i]);
1395 // clear copies, to avoid confusion
1397 memset(&s->last_picture, 0, sizeof(Picture));
1398 memset(&s->next_picture, 0, sizeof(Picture));
1399 memset(&s->current_picture, 0, sizeof(Picture));
1401 s->avctx->coded_frame = &s->current_picture_ptr->f;
1403 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1404 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1409 * Draw a line from (ex, ey) -> (sx, sy).
1410 * @param w width of the image
1411 * @param h height of the image
1412 * @param stride stride/linesize of the image
1413 * @param color color of the arrow
1415 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1416 int w, int h, int stride, int color)
1420 sx = av_clip(sx, 0, w - 1);
1421 sy = av_clip(sy, 0, h - 1);
1422 ex = av_clip(ex, 0, w - 1);
1423 ey = av_clip(ey, 0, h - 1);
1425 buf[sy * stride + sx] += color;
1427 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1429 FFSWAP(int, sx, ex);
1430 FFSWAP(int, sy, ey);
1432 buf += sx + sy * stride;
1434 f = ((ey - sy) << 16) / ex;
1435 for(x= 0; x <= ex; x++){
1437 fr = (x * f) & 0xFFFF;
1438 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1439 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1443 FFSWAP(int, sx, ex);
1444 FFSWAP(int, sy, ey);
1446 buf += sx + sy * stride;
1449 f = ((ex - sx) << 16) / ey;
1452 for(y= 0; y <= ey; y++){
1454 fr = (y*f) & 0xFFFF;
1455 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1456 buf[y * stride + x + 1] += (color * fr ) >> 16;
1462 * Draw an arrow from (ex, ey) -> (sx, sy).
1463 * @param w width of the image
1464 * @param h height of the image
1465 * @param stride stride/linesize of the image
1466 * @param color color of the arrow
1468 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1469 int ey, int w, int h, int stride, int color)
1473 sx = av_clip(sx, -100, w + 100);
1474 sy = av_clip(sy, -100, h + 100);
1475 ex = av_clip(ex, -100, w + 100);
1476 ey = av_clip(ey, -100, h + 100);
1481 if (dx * dx + dy * dy > 3 * 3) {
1484 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1486 // FIXME subpixel accuracy
1487 rx = ROUNDED_DIV(rx * 3 << 4, length);
1488 ry = ROUNDED_DIV(ry * 3 << 4, length);
1490 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1491 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1493 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1497 * Print debugging info for the given picture.
1499 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1501 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1504 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1507 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1508 av_get_picture_type_char(pict->pict_type));
1509 for (y = 0; y < s->mb_height; y++) {
1510 for (x = 0; x < s->mb_width; x++) {
1511 if (s->avctx->debug & FF_DEBUG_SKIP) {
1512 int count = s->mbskip_table[x + y * s->mb_stride];
1515 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1517 if (s->avctx->debug & FF_DEBUG_QP) {
1518 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1519 pict->qscale_table[x + y * s->mb_stride]);
1521 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1522 int mb_type = pict->mb_type[x + y * s->mb_stride];
1523 // Type & MV direction
1524 if (IS_PCM(mb_type))
1525 av_log(s->avctx, AV_LOG_DEBUG, "P");
1526 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1527 av_log(s->avctx, AV_LOG_DEBUG, "A");
1528 else if (IS_INTRA4x4(mb_type))
1529 av_log(s->avctx, AV_LOG_DEBUG, "i");
1530 else if (IS_INTRA16x16(mb_type))
1531 av_log(s->avctx, AV_LOG_DEBUG, "I");
1532 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1533 av_log(s->avctx, AV_LOG_DEBUG, "d");
1534 else if (IS_DIRECT(mb_type))
1535 av_log(s->avctx, AV_LOG_DEBUG, "D");
1536 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1537 av_log(s->avctx, AV_LOG_DEBUG, "g");
1538 else if (IS_GMC(mb_type))
1539 av_log(s->avctx, AV_LOG_DEBUG, "G");
1540 else if (IS_SKIP(mb_type))
1541 av_log(s->avctx, AV_LOG_DEBUG, "S");
1542 else if (!USES_LIST(mb_type, 1))
1543 av_log(s->avctx, AV_LOG_DEBUG, ">");
1544 else if (!USES_LIST(mb_type, 0))
1545 av_log(s->avctx, AV_LOG_DEBUG, "<");
1547 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1548 av_log(s->avctx, AV_LOG_DEBUG, "X");
1552 if (IS_8X8(mb_type))
1553 av_log(s->avctx, AV_LOG_DEBUG, "+");
1554 else if (IS_16X8(mb_type))
1555 av_log(s->avctx, AV_LOG_DEBUG, "-");
1556 else if (IS_8X16(mb_type))
1557 av_log(s->avctx, AV_LOG_DEBUG, "|");
1558 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1559 av_log(s->avctx, AV_LOG_DEBUG, " ");
1561 av_log(s->avctx, AV_LOG_DEBUG, "?");
1564 if (IS_INTERLACED(mb_type))
1565 av_log(s->avctx, AV_LOG_DEBUG, "=");
1567 av_log(s->avctx, AV_LOG_DEBUG, " ");
1569 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1571 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1575 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1576 (s->avctx->debug_mv)) {
1577 const int shift = 1 + s->quarter_sample;
1581 int h_chroma_shift, v_chroma_shift, block_height;
1582 const int width = s->avctx->width;
1583 const int height = s->avctx->height;
1584 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1585 const int mv_stride = (s->mb_width << mv_sample_log2) +
1586 (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1587 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1589 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1590 &h_chroma_shift, &v_chroma_shift);
1591 for (i = 0; i < 3; i++) {
1592 size_t size= (i == 0) ? pict->linesize[i] * height:
1593 pict->linesize[i] * height >> v_chroma_shift;
1594 s->visualization_buffer[i]= av_realloc(s->visualization_buffer[i], size);
1595 memcpy(s->visualization_buffer[i], pict->data[i], size);
1596 pict->data[i] = s->visualization_buffer[i];
1598 pict->type = FF_BUFFER_TYPE_COPY;
1600 ptr = pict->data[0];
1601 block_height = 16 >> v_chroma_shift;
1603 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1605 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1606 const int mb_index = mb_x + mb_y * s->mb_stride;
1607 if ((s->avctx->debug_mv) && pict->motion_val) {
1609 for (type = 0; type < 3; type++) {
1613 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1614 (pict->pict_type!= AV_PICTURE_TYPE_P))
1619 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1620 (pict->pict_type!= AV_PICTURE_TYPE_B))
1625 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1626 (pict->pict_type!= AV_PICTURE_TYPE_B))
1631 if (!USES_LIST(pict->mb_type[mb_index], direction))
1634 if (IS_8X8(pict->mb_type[mb_index])) {
1636 for (i = 0; i < 4; i++) {
1637 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1638 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1639 int xy = (mb_x * 2 + (i & 1) +
1640 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1641 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1642 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1643 draw_arrow(ptr, sx, sy, mx, my, width,
1644 height, s->linesize, 100);
1646 } else if (IS_16X8(pict->mb_type[mb_index])) {
1648 for (i = 0; i < 2; i++) {
1649 int sx = mb_x * 16 + 8;
1650 int sy = mb_y * 16 + 4 + 8 * i;
1651 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1652 int mx = (pict->motion_val[direction][xy][0] >> shift);
1653 int my = (pict->motion_val[direction][xy][1] >> shift);
1655 if (IS_INTERLACED(pict->mb_type[mb_index]))
1658 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1659 height, s->linesize, 100);
1661 } else if (IS_8X16(pict->mb_type[mb_index])) {
1663 for (i = 0; i < 2; i++) {
1664 int sx = mb_x * 16 + 4 + 8 * i;
1665 int sy = mb_y * 16 + 8;
1666 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1667 int mx = pict->motion_val[direction][xy][0] >> shift;
1668 int my = pict->motion_val[direction][xy][1] >> shift;
1670 if (IS_INTERLACED(pict->mb_type[mb_index]))
1673 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1674 height, s->linesize, 100);
1677 int sx= mb_x * 16 + 8;
1678 int sy= mb_y * 16 + 8;
1679 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
1680 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1681 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1682 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1686 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1687 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1688 0x0101010101010101ULL;
1690 for (y = 0; y < block_height; y++) {
1691 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1692 (block_height * mb_y + y) *
1693 pict->linesize[1]) = c;
1694 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1695 (block_height * mb_y + y) *
1696 pict->linesize[2]) = c;
1699 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1701 int mb_type = pict->mb_type[mb_index];
1704 #define COLOR(theta, r) \
1705 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1706 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1710 if (IS_PCM(mb_type)) {
1712 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1713 IS_INTRA16x16(mb_type)) {
1715 } else if (IS_INTRA4x4(mb_type)) {
1717 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1719 } else if (IS_DIRECT(mb_type)) {
1721 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1723 } else if (IS_GMC(mb_type)) {
1725 } else if (IS_SKIP(mb_type)) {
1727 } else if (!USES_LIST(mb_type, 1)) {
1729 } else if (!USES_LIST(mb_type, 0)) {
1732 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1736 u *= 0x0101010101010101ULL;
1737 v *= 0x0101010101010101ULL;
1738 for (y = 0; y < block_height; y++) {
1739 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1740 (block_height * mb_y + y) * pict->linesize[1]) = u;
1741 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1742 (block_height * mb_y + y) * pict->linesize[2]) = v;
1746 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1747 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1748 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1749 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1750 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1752 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1753 for (y = 0; y < 16; y++)
1754 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1755 pict->linesize[0]] ^= 0x80;
1757 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1758 int dm = 1 << (mv_sample_log2 - 2);
1759 for (i = 0; i < 4; i++) {
1760 int sx = mb_x * 16 + 8 * (i & 1);
1761 int sy = mb_y * 16 + 8 * (i >> 1);
1762 int xy = (mb_x * 2 + (i & 1) +
1763 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1765 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1766 if (mv[0] != mv[dm] ||
1767 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1768 for (y = 0; y < 8; y++)
1769 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1770 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1771 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1772 pict->linesize[0]) ^= 0x8080808080808080ULL;
1776 if (IS_INTERLACED(mb_type) &&
1777 s->codec_id == CODEC_ID_H264) {
1781 s->mbskip_table[mb_index] = 0;
1788 * find the lowest MB row referenced in the MVs
1790 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1792 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1793 int my, off, i, mvs;
1795 if (s->picture_structure != PICT_FRAME) goto unhandled;
1797 switch (s->mv_type) {
1811 for (i = 0; i < mvs; i++) {
1812 my = s->mv[dir][i][1]<<qpel_shift;
1813 my_max = FFMAX(my_max, my);
1814 my_min = FFMIN(my_min, my);
1817 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1819 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1821 return s->mb_height-1;
1824 /* put block[] to dest[] */
1825 static inline void put_dct(MpegEncContext *s,
1826 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1828 s->dct_unquantize_intra(s, block, i, qscale);
1829 s->dsp.idct_put (dest, line_size, block);
1832 /* add block[] to dest[] */
1833 static inline void add_dct(MpegEncContext *s,
1834 DCTELEM *block, int i, uint8_t *dest, int line_size)
1836 if (s->block_last_index[i] >= 0) {
1837 s->dsp.idct_add (dest, line_size, block);
1841 static inline void add_dequant_dct(MpegEncContext *s,
1842 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1844 if (s->block_last_index[i] >= 0) {
1845 s->dct_unquantize_inter(s, block, i, qscale);
1847 s->dsp.idct_add (dest, line_size, block);
1852 * Clean dc, ac, coded_block for the current non-intra MB.
1854 void ff_clean_intra_table_entries(MpegEncContext *s)
1856 int wrap = s->b8_stride;
1857 int xy = s->block_index[0];
1860 s->dc_val[0][xy + 1 ] =
1861 s->dc_val[0][xy + wrap] =
1862 s->dc_val[0][xy + 1 + wrap] = 1024;
1864 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1865 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1866 if (s->msmpeg4_version>=3) {
1867 s->coded_block[xy ] =
1868 s->coded_block[xy + 1 ] =
1869 s->coded_block[xy + wrap] =
1870 s->coded_block[xy + 1 + wrap] = 0;
1873 wrap = s->mb_stride;
1874 xy = s->mb_x + s->mb_y * wrap;
1876 s->dc_val[2][xy] = 1024;
1878 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1879 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1881 s->mbintra_table[xy]= 0;
1884 /* generic function called after a macroblock has been parsed by the
1885 decoder or after it has been encoded by the encoder.
1887 Important variables used:
1888 s->mb_intra : true if intra macroblock
1889 s->mv_dir : motion vector direction
1890 s->mv_type : motion vector type
1891 s->mv : motion vector
1892 s->interlaced_dct : true if interlaced dct used (mpeg2)
1894 static av_always_inline
1895 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1898 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1899 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1900 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1904 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1905 /* save DCT coefficients */
1907 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
1908 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1910 for(j=0; j<64; j++){
1911 *dct++ = block[i][s->dsp.idct_permutation[j]];
1912 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
1914 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1918 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
1920 /* update DC predictors for P macroblocks */
1922 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1923 if(s->mbintra_table[mb_xy])
1924 ff_clean_intra_table_entries(s);
1928 s->last_dc[2] = 128 << s->intra_dc_precision;
1931 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1932 s->mbintra_table[mb_xy]=1;
1934 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1935 uint8_t *dest_y, *dest_cb, *dest_cr;
1936 int dct_linesize, dct_offset;
1937 op_pixels_func (*op_pix)[4];
1938 qpel_mc_func (*op_qpix)[16];
1939 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
1940 const int uvlinesize = s->current_picture.f.linesize[1];
1941 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || s->avctx->lowres;
1942 const int block_size= 8 >> s->avctx->lowres;
1944 /* avoid copy if macroblock skipped in last frame too */
1945 /* skip only during decoding as we might trash the buffers during encoding a bit */
1947 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1949 if (s->mb_skipped) {
1951 assert(s->pict_type!=AV_PICTURE_TYPE_I);
1953 } else if(!s->current_picture.f.reference) {
1956 *mbskip_ptr = 0; /* not skipped */
1960 dct_linesize = linesize << s->interlaced_dct;
1961 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
1965 dest_cb= s->dest[1];
1966 dest_cr= s->dest[2];
1968 dest_y = s->b_scratchpad;
1969 dest_cb= s->b_scratchpad+16*linesize;
1970 dest_cr= s->b_scratchpad+32*linesize;
1974 /* motion handling */
1975 /* decoding or more than one mb_type (MC was already done otherwise) */
1978 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
1979 if (s->mv_dir & MV_DIR_FORWARD) {
1980 ff_thread_await_progress(&s->last_picture_ptr->f,
1981 ff_MPV_lowest_referenced_row(s, 0),
1984 if (s->mv_dir & MV_DIR_BACKWARD) {
1985 ff_thread_await_progress(&s->next_picture_ptr->f,
1986 ff_MPV_lowest_referenced_row(s, 1),
1991 op_qpix= s->me.qpel_put;
1992 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
1993 op_pix = s->dsp.put_pixels_tab;
1995 op_pix = s->dsp.put_no_rnd_pixels_tab;
1997 if (s->mv_dir & MV_DIR_FORWARD) {
1998 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
1999 op_pix = s->dsp.avg_pixels_tab;
2000 op_qpix= s->me.qpel_avg;
2002 if (s->mv_dir & MV_DIR_BACKWARD) {
2003 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2007 /* skip dequant / idct if we are really late ;) */
2008 if(s->avctx->skip_idct){
2009 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2010 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2011 || s->avctx->skip_idct >= AVDISCARD_ALL)
2015 /* add dct residue */
2016 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2017 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2018 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2019 add_dequant_dct(s, block[1], 1, dest_y + 8 , dct_linesize, s->qscale);
2020 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2021 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8 , dct_linesize, s->qscale);
2023 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2024 if (s->chroma_y_shift){
2025 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2026 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2030 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2031 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2032 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2033 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2036 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2037 add_dct(s, block[0], 0, dest_y , dct_linesize);
2038 add_dct(s, block[1], 1, dest_y + 8 , dct_linesize);
2039 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2040 add_dct(s, block[3], 3, dest_y + dct_offset + 8 , dct_linesize);
2042 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2043 if(s->chroma_y_shift){//Chroma420
2044 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2045 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2048 dct_linesize = uvlinesize << s->interlaced_dct;
2049 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*8;
2051 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2052 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2053 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2054 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2055 if(!s->chroma_x_shift){//Chroma444
2056 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2057 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2058 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2059 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2064 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2065 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2068 /* dct only in intra block */
2069 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2070 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2071 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2072 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2073 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2075 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2076 if(s->chroma_y_shift){
2077 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2078 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2082 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2083 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2084 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2085 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2089 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2090 s->dsp.idct_put(dest_y + 8 , dct_linesize, block[1]);
2091 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2092 s->dsp.idct_put(dest_y + dct_offset + 8 , dct_linesize, block[3]);
2094 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2095 if(s->chroma_y_shift){
2096 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2097 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2100 dct_linesize = uvlinesize << s->interlaced_dct;
2101 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*8;
2103 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2104 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2105 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2106 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2107 if(!s->chroma_x_shift){//Chroma444
2108 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2109 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2110 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2111 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2119 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2120 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2121 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2126 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2128 if(s->out_format == FMT_MPEG1) {
2129 MPV_decode_mb_internal(s, block, 1);
2132 MPV_decode_mb_internal(s, block, 0);
2136 * @param h is the normal height, this will be reduced automatically if needed for the last row
2138 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2139 const int field_pic= s->picture_structure != PICT_FRAME;
2145 if (!s->avctx->hwaccel
2146 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2147 && s->unrestricted_mv
2148 && s->current_picture.f.reference
2150 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2151 int sides = 0, edge_h;
2152 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2153 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2154 if (y==0) sides |= EDGE_TOP;
2155 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2157 edge_h= FFMIN(h, s->v_edge_pos - y);
2159 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2160 s->linesize, s->h_edge_pos, edge_h,
2161 EDGE_WIDTH, EDGE_WIDTH, sides);
2162 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2163 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2164 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2165 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2166 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2167 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2170 h= FFMIN(h, s->avctx->height - y);
2172 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2174 if (s->avctx->draw_horiz_band) {
2176 int offset[AV_NUM_DATA_POINTERS];
2179 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2180 src = &s->current_picture_ptr->f;
2181 else if(s->last_picture_ptr)
2182 src = &s->last_picture_ptr->f;
2186 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2187 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2190 offset[0]= y * s->linesize;
2192 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2193 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2199 s->avctx->draw_horiz_band(s->avctx, src, offset,
2200 y, s->picture_structure, h);
2204 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2205 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2206 const int uvlinesize = s->current_picture.f.linesize[1];
2207 const int mb_size= 4 - s->avctx->lowres;
2209 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2210 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2211 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2212 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2213 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2214 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2215 //block_index is not used by mpeg2, so it is not affected by chroma_format
2217 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2218 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2219 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2221 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2223 if(s->picture_structure==PICT_FRAME){
2224 s->dest[0] += s->mb_y * linesize << mb_size;
2225 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2226 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2228 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2229 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2230 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2231 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2236 void ff_mpeg_flush(AVCodecContext *avctx){
2238 MpegEncContext *s = avctx->priv_data;
2240 if(s==NULL || s->picture==NULL)
2243 for(i=0; i<s->picture_count; i++){
2244 if (s->picture[i].f.data[0] &&
2245 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2246 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2247 free_frame_buffer(s, &s->picture[i]);
2249 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2251 s->mb_x= s->mb_y= 0;
2254 s->parse_context.state= -1;
2255 s->parse_context.frame_start_found= 0;
2256 s->parse_context.overread= 0;
2257 s->parse_context.overread_index= 0;
2258 s->parse_context.index= 0;
2259 s->parse_context.last_index= 0;
2260 s->bitstream_buffer_size=0;
2264 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2265 DCTELEM *block, int n, int qscale)
2267 int i, level, nCoeffs;
2268 const uint16_t *quant_matrix;
2270 nCoeffs= s->block_last_index[n];
2272 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
2273 /* XXX: only mpeg1 */
2274 quant_matrix = s->intra_matrix;
2275 for(i=1;i<=nCoeffs;i++) {
2276 int j= s->intra_scantable.permutated[i];
2281 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2282 level = (level - 1) | 1;
2285 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2286 level = (level - 1) | 1;
2293 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2294 DCTELEM *block, int n, int qscale)
2296 int i, level, nCoeffs;
2297 const uint16_t *quant_matrix;
2299 nCoeffs= s->block_last_index[n];
2301 quant_matrix = s->inter_matrix;
2302 for(i=0; i<=nCoeffs; i++) {
2303 int j= s->intra_scantable.permutated[i];
2308 level = (((level << 1) + 1) * qscale *
2309 ((int) (quant_matrix[j]))) >> 4;
2310 level = (level - 1) | 1;
2313 level = (((level << 1) + 1) * qscale *
2314 ((int) (quant_matrix[j]))) >> 4;
2315 level = (level - 1) | 1;
2322 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2323 DCTELEM *block, int n, int qscale)
2325 int i, level, nCoeffs;
2326 const uint16_t *quant_matrix;
2328 if(s->alternate_scan) nCoeffs= 63;
2329 else nCoeffs= s->block_last_index[n];
2331 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
2332 quant_matrix = s->intra_matrix;
2333 for(i=1;i<=nCoeffs;i++) {
2334 int j= s->intra_scantable.permutated[i];
2339 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2342 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2349 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2350 DCTELEM *block, int n, int qscale)
2352 int i, level, nCoeffs;
2353 const uint16_t *quant_matrix;
2356 if(s->alternate_scan) nCoeffs= 63;
2357 else nCoeffs= s->block_last_index[n];
2359 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
2361 quant_matrix = s->intra_matrix;
2362 for(i=1;i<=nCoeffs;i++) {
2363 int j= s->intra_scantable.permutated[i];
2368 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2371 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2380 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2381 DCTELEM *block, int n, int qscale)
2383 int i, level, nCoeffs;
2384 const uint16_t *quant_matrix;
2387 if(s->alternate_scan) nCoeffs= 63;
2388 else nCoeffs= s->block_last_index[n];
2390 quant_matrix = s->inter_matrix;
2391 for(i=0; i<=nCoeffs; i++) {
2392 int j= s->intra_scantable.permutated[i];
2397 level = (((level << 1) + 1) * qscale *
2398 ((int) (quant_matrix[j]))) >> 4;
2401 level = (((level << 1) + 1) * qscale *
2402 ((int) (quant_matrix[j]))) >> 4;
2411 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2412 DCTELEM *block, int n, int qscale)
2414 int i, level, qmul, qadd;
2417 assert(s->block_last_index[n]>=0);
2422 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
2423 qadd = (qscale - 1) | 1;
2430 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2432 for(i=1; i<=nCoeffs; i++) {
2436 level = level * qmul - qadd;
2438 level = level * qmul + qadd;
2445 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2446 DCTELEM *block, int n, int qscale)
2448 int i, level, qmul, qadd;
2451 assert(s->block_last_index[n]>=0);
2453 qadd = (qscale - 1) | 1;
2456 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2458 for(i=0; i<=nCoeffs; i++) {
2462 level = level * qmul - qadd;
2464 level = level * qmul + qadd;
2472 * set qscale and update qscale dependent variables.
2474 void ff_set_qscale(MpegEncContext * s, int qscale)
2478 else if (qscale > 31)
2482 s->chroma_qscale= s->chroma_qscale_table[qscale];
2484 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2485 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2488 void ff_MPV_report_decode_progress(MpegEncContext *s)
2490 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2491 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);