2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
38 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
61 /* enable all paranoid tests for rounding, overflows, etc... */
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
128 const enum PixelFormat ff_pixfmt_list_420[] = {
133 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
141 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
143 uint32_t * restrict state)
151 for (i = 0; i < 3; i++) {
152 uint32_t tmp = *state << 8;
153 *state = tmp + *(p++);
154 if (tmp == 0x100 || p == end)
159 if (p[-1] > 1 ) p += 3;
160 else if (p[-2] ) p += 2;
161 else if (p[-3]|(p[-1]-1)) p++;
168 p = FFMIN(p, end) - 4;
174 /* init common dct for both encoder and decoder */
175 av_cold int ff_dct_common_init(MpegEncContext *s)
177 ff_dsputil_init(&s->dsp, s->avctx);
179 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
180 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
181 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
182 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
183 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
184 if (s->flags & CODEC_FLAG_BITEXACT)
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
186 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
189 ff_MPV_common_init_x86(s);
191 ff_MPV_common_init_axp(s);
193 ff_MPV_common_init_mmi(s);
195 ff_MPV_common_init_arm(s);
197 ff_MPV_common_init_altivec(s);
199 ff_MPV_common_init_bfin(s);
202 /* load & permutate scantables
203 * note: only wmv uses different ones
205 if (s->alternate_scan) {
206 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
207 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
209 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
210 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
212 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
213 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
218 void ff_copy_picture(Picture *dst, Picture *src)
221 dst->f.type = FF_BUFFER_TYPE_COPY;
225 * Release a frame buffer
227 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
229 /* WM Image / Screen codecs allocate internal buffers with different
230 * dimensions / colorspaces; ignore user-defined callbacks for these. */
231 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
233 s->codec_id != AV_CODEC_ID_MSS2)
234 ff_thread_release_buffer(s->avctx, &pic->f);
236 avcodec_default_release_buffer(s->avctx, &pic->f);
237 av_freep(&pic->f.hwaccel_picture_private);
241 * Allocate a frame buffer
243 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
247 if (s->avctx->hwaccel) {
248 assert(!pic->f.hwaccel_picture_private);
249 if (s->avctx->hwaccel->priv_data_size) {
250 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
251 if (!pic->f.hwaccel_picture_private) {
252 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
258 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
259 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
260 s->codec_id != AV_CODEC_ID_MSS2)
261 r = ff_thread_get_buffer(s->avctx, &pic->f);
263 r = avcodec_default_get_buffer(s->avctx, &pic->f);
265 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
266 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
267 r, pic->f.type, pic->f.data[0]);
268 av_freep(&pic->f.hwaccel_picture_private);
272 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
273 s->uvlinesize != pic->f.linesize[1])) {
274 av_log(s->avctx, AV_LOG_ERROR,
275 "get_buffer() failed (stride changed)\n");
276 free_frame_buffer(s, pic);
280 if (pic->f.linesize[1] != pic->f.linesize[2]) {
281 av_log(s->avctx, AV_LOG_ERROR,
282 "get_buffer() failed (uv stride mismatch)\n");
283 free_frame_buffer(s, pic);
291 * Allocate a Picture.
292 * The pixels are allocated/set by calling get_buffer() if shared = 0
294 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
296 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
298 // the + 1 is needed so memset(,,stride*height) does not sig11
300 const int mb_array_size = s->mb_stride * s->mb_height;
301 const int b8_array_size = s->b8_stride * s->mb_height * 2;
302 const int b4_array_size = s->b4_stride * s->mb_height * 4;
307 assert(pic->f.data[0]);
308 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
309 pic->f.type = FF_BUFFER_TYPE_SHARED;
311 assert(!pic->f.data[0]);
313 if (alloc_frame_buffer(s, pic) < 0)
316 s->linesize = pic->f.linesize[0];
317 s->uvlinesize = pic->f.linesize[1];
320 if (pic->f.qscale_table == NULL) {
322 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
323 mb_array_size * sizeof(int16_t), fail)
324 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
325 mb_array_size * sizeof(int16_t), fail)
326 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
327 mb_array_size * sizeof(int8_t ), fail)
330 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
331 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
332 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
333 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
335 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
336 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
338 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
339 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
340 if (s->out_format == FMT_H264) {
341 for (i = 0; i < 2; i++) {
342 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
343 2 * (b4_array_size + 4) * sizeof(int16_t),
345 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
346 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
347 4 * mb_array_size * sizeof(uint8_t), fail)
349 pic->f.motion_subsample_log2 = 2;
350 } else if (s->out_format == FMT_H263 || s->encoding ||
351 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
352 for (i = 0; i < 2; i++) {
353 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
354 2 * (b8_array_size + 4) * sizeof(int16_t),
356 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
357 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
358 4 * mb_array_size * sizeof(uint8_t), fail)
360 pic->f.motion_subsample_log2 = 3;
362 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
363 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
364 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
366 pic->f.qstride = s->mb_stride;
367 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
368 1 * sizeof(AVPanScan), fail)
374 fail: // for the FF_ALLOCZ_OR_GOTO macro
376 free_frame_buffer(s, pic);
381 * Deallocate a picture.
383 static void free_picture(MpegEncContext *s, Picture *pic)
387 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
388 free_frame_buffer(s, pic);
391 av_freep(&pic->mb_var);
392 av_freep(&pic->mc_mb_var);
393 av_freep(&pic->mb_mean);
394 av_freep(&pic->f.mbskip_table);
395 av_freep(&pic->qscale_table_base);
396 pic->f.qscale_table = NULL;
397 av_freep(&pic->mb_type_base);
398 pic->f.mb_type = NULL;
399 av_freep(&pic->f.dct_coeff);
400 av_freep(&pic->f.pan_scan);
401 pic->f.mb_type = NULL;
402 for (i = 0; i < 2; i++) {
403 av_freep(&pic->motion_val_base[i]);
404 av_freep(&pic->f.ref_index[i]);
405 pic->f.motion_val[i] = NULL;
408 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
409 for (i = 0; i < 4; i++) {
411 pic->f.data[i] = NULL;
417 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
419 int y_size = s->b8_stride * (2 * s->mb_height + 1);
420 int c_size = s->mb_stride * (s->mb_height + 1);
421 int yc_size = y_size + 2 * c_size;
424 // edge emu needs blocksize + filter length - 1
425 // (= 17x17 for halfpel / 21x21 for h264)
426 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
427 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
429 // FIXME should be linesize instead of s->width * 2
430 // but that is not known before get_buffer()
431 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
432 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
433 s->me.temp = s->me.scratchpad;
434 s->rd_scratchpad = s->me.scratchpad;
435 s->b_scratchpad = s->me.scratchpad;
436 s->obmc_scratchpad = s->me.scratchpad + 16;
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
439 ME_MAP_SIZE * sizeof(uint32_t), fail)
440 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
441 ME_MAP_SIZE * sizeof(uint32_t), fail)
442 if (s->avctx->noise_reduction) {
443 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
444 2 * 64 * sizeof(int), fail)
447 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
448 s->block = s->blocks[0];
450 for (i = 0; i < 12; i++) {
451 s->pblocks[i] = &s->block[i];
454 if (s->out_format == FMT_H263) {
456 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
457 yc_size * sizeof(int16_t) * 16, fail);
458 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
459 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
460 s->ac_val[2] = s->ac_val[1] + c_size;
465 return -1; // free() through ff_MPV_common_end()
468 static void free_duplicate_context(MpegEncContext *s)
473 av_freep(&s->edge_emu_buffer);
474 av_freep(&s->me.scratchpad);
478 s->obmc_scratchpad = NULL;
480 av_freep(&s->dct_error_sum);
481 av_freep(&s->me.map);
482 av_freep(&s->me.score_map);
483 av_freep(&s->blocks);
484 av_freep(&s->ac_val_base);
488 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
490 #define COPY(a) bak->a = src->a
491 COPY(edge_emu_buffer);
496 COPY(obmc_scratchpad);
503 COPY(me.map_generation);
515 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
519 // FIXME copy only needed parts
521 backup_duplicate_context(&bak, dst);
522 memcpy(dst, src, sizeof(MpegEncContext));
523 backup_duplicate_context(dst, &bak);
524 for (i = 0; i < 12; i++) {
525 dst->pblocks[i] = &dst->block[i];
527 // STOP_TIMER("update_duplicate_context")
528 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
531 int ff_mpeg_update_thread_context(AVCodecContext *dst,
532 const AVCodecContext *src)
534 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
536 if (dst == src || !s1->context_initialized)
539 // FIXME can parameters change on I-frames?
540 // in that case dst may need a reinit
541 if (!s->context_initialized) {
542 memcpy(s, s1, sizeof(MpegEncContext));
545 s->picture_range_start += MAX_PICTURE_COUNT;
546 s->picture_range_end += MAX_PICTURE_COUNT;
547 s->bitstream_buffer = NULL;
548 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
550 ff_MPV_common_init(s);
553 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
555 s->context_reinit = 0;
556 s->height = s1->height;
557 s->width = s1->width;
558 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
562 s->avctx->coded_height = s1->avctx->coded_height;
563 s->avctx->coded_width = s1->avctx->coded_width;
564 s->avctx->width = s1->avctx->width;
565 s->avctx->height = s1->avctx->height;
567 s->coded_picture_number = s1->coded_picture_number;
568 s->picture_number = s1->picture_number;
569 s->input_picture_number = s1->input_picture_number;
571 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
572 memcpy(&s->last_picture, &s1->last_picture,
573 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
575 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
576 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
577 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
579 // Error/bug resilience
580 s->next_p_frame_damaged = s1->next_p_frame_damaged;
581 s->workaround_bugs = s1->workaround_bugs;
584 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
585 (char *) &s1->shape - (char *) &s1->time_increment_bits);
588 s->max_b_frames = s1->max_b_frames;
589 s->low_delay = s1->low_delay;
590 s->dropable = s1->dropable;
592 // DivX handling (doesn't work)
593 s->divx_packed = s1->divx_packed;
595 if (s1->bitstream_buffer) {
596 if (s1->bitstream_buffer_size +
597 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
598 av_fast_malloc(&s->bitstream_buffer,
599 &s->allocated_bitstream_buffer_size,
600 s1->allocated_bitstream_buffer_size);
601 s->bitstream_buffer_size = s1->bitstream_buffer_size;
602 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
603 s1->bitstream_buffer_size);
604 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
605 FF_INPUT_BUFFER_PADDING_SIZE);
608 // MPEG2/interlacing info
609 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
610 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
612 if (!s1->first_field) {
613 s->last_pict_type = s1->pict_type;
614 if (s1->current_picture_ptr)
615 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
617 if (s1->pict_type != AV_PICTURE_TYPE_B) {
618 s->last_non_b_pict_type = s1->pict_type;
626 * Set the given MpegEncContext to common defaults
627 * (same for encoding and decoding).
628 * The changed fields will not depend upon the
629 * prior state of the MpegEncContext.
631 void ff_MPV_common_defaults(MpegEncContext *s)
633 s->y_dc_scale_table =
634 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
635 s->chroma_qscale_table = ff_default_chroma_qscale_table;
636 s->progressive_frame = 1;
637 s->progressive_sequence = 1;
638 s->picture_structure = PICT_FRAME;
640 s->coded_picture_number = 0;
641 s->picture_number = 0;
642 s->input_picture_number = 0;
644 s->picture_in_gop_number = 0;
649 s->picture_range_start = 0;
650 s->picture_range_end = MAX_PICTURE_COUNT;
652 s->slice_context_count = 1;
656 * Set the given MpegEncContext to defaults for decoding.
657 * the changed fields will not depend upon
658 * the prior state of the MpegEncContext.
660 void ff_MPV_decode_defaults(MpegEncContext *s)
662 ff_MPV_common_defaults(s);
666 * Initialize and allocates MpegEncContext fields dependent on the resolution.
668 static int init_context_frame(MpegEncContext *s)
670 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
672 s->mb_width = (s->width + 15) / 16;
673 s->mb_stride = s->mb_width + 1;
674 s->b8_stride = s->mb_width * 2 + 1;
675 s->b4_stride = s->mb_width * 4 + 1;
676 mb_array_size = s->mb_height * s->mb_stride;
677 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
679 /* set default edge pos, will be overriden
680 * in decode_header if needed */
681 s->h_edge_pos = s->mb_width * 16;
682 s->v_edge_pos = s->mb_height * 16;
684 s->mb_num = s->mb_width * s->mb_height;
689 s->block_wrap[3] = s->b8_stride;
691 s->block_wrap[5] = s->mb_stride;
693 y_size = s->b8_stride * (2 * s->mb_height + 1);
694 c_size = s->mb_stride * (s->mb_height + 1);
695 yc_size = y_size + 2 * c_size;
697 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
698 fail); // error ressilience code looks cleaner with this
699 for (y = 0; y < s->mb_height; y++)
700 for (x = 0; x < s->mb_width; x++)
701 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
703 s->mb_index2xy[s->mb_height * s->mb_width] =
704 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
707 /* Allocate MV tables */
708 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
709 mv_table_size * 2 * sizeof(int16_t), fail);
710 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
711 mv_table_size * 2 * sizeof(int16_t), fail);
712 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
713 mv_table_size * 2 * sizeof(int16_t), fail);
714 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
715 mv_table_size * 2 * sizeof(int16_t), fail);
716 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
717 mv_table_size * 2 * sizeof(int16_t), fail);
718 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
719 mv_table_size * 2 * sizeof(int16_t), fail);
720 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
721 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
722 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
723 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
725 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
727 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
729 /* Allocate MB type table */
730 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
731 sizeof(uint16_t), fail); // needed for encoding
733 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
736 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
737 mb_array_size * sizeof(float), fail);
738 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
739 mb_array_size * sizeof(float), fail);
743 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
744 mb_array_size * sizeof(uint8_t), fail);
745 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
746 mb_array_size * sizeof(uint8_t), fail);
748 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
749 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
750 /* interlaced direct mode decoding tables */
751 for (i = 0; i < 2; i++) {
753 for (j = 0; j < 2; j++) {
754 for (k = 0; k < 2; k++) {
755 FF_ALLOCZ_OR_GOTO(s->avctx,
756 s->b_field_mv_table_base[i][j][k],
757 mv_table_size * 2 * sizeof(int16_t),
759 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
762 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
763 mb_array_size * 2 * sizeof(uint8_t), fail);
764 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
765 mv_table_size * 2 * sizeof(int16_t), fail);
766 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
769 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
770 mb_array_size * 2 * sizeof(uint8_t), fail);
773 if (s->out_format == FMT_H263) {
775 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
776 s->coded_block = s->coded_block_base + s->b8_stride + 1;
778 /* cbp, ac_pred, pred_dir */
779 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
780 mb_array_size * sizeof(uint8_t), fail);
781 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
782 mb_array_size * sizeof(uint8_t), fail);
785 if (s->h263_pred || s->h263_plus || !s->encoding) {
787 // MN: we need these for error resilience of intra-frames
788 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
789 yc_size * sizeof(int16_t), fail);
790 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
791 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
792 s->dc_val[2] = s->dc_val[1] + c_size;
793 for (i = 0; i < yc_size; i++)
794 s->dc_val_base[i] = 1024;
797 /* which mb is a intra block */
798 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
799 memset(s->mbintra_table, 1, mb_array_size);
801 /* init macroblock skip table */
802 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
803 // Note the + 1 is for a quicker mpeg4 slice_end detection
805 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
806 s->avctx->debug_mv) {
807 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
808 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
809 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
810 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
811 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
812 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
817 return AVERROR(ENOMEM);
821 * init common structure for both encoder and decoder.
822 * this assumes that some variables like width/height are already set
824 av_cold int ff_MPV_common_init(MpegEncContext *s)
827 int nb_slices = (HAVE_THREADS &&
828 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
829 s->avctx->thread_count : 1;
831 if (s->encoding && s->avctx->slices)
832 nb_slices = s->avctx->slices;
834 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
835 s->mb_height = (s->height + 31) / 32 * 2;
836 else if (s->codec_id != AV_CODEC_ID_H264)
837 s->mb_height = (s->height + 15) / 16;
839 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
840 av_log(s->avctx, AV_LOG_ERROR,
841 "decoding to PIX_FMT_NONE is not supported.\n");
845 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
848 max_slices = FFMIN(MAX_THREADS, s->mb_height);
850 max_slices = MAX_THREADS;
851 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
852 " reducing to %d\n", nb_slices, max_slices);
853 nb_slices = max_slices;
856 if ((s->width || s->height) &&
857 av_image_check_size(s->width, s->height, 0, s->avctx))
860 ff_dct_common_init(s);
862 s->flags = s->avctx->flags;
863 s->flags2 = s->avctx->flags2;
865 if (s->width && s->height) {
866 /* set chroma shifts */
867 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
870 /* convert fourcc to upper case */
871 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
873 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
875 s->avctx->coded_frame = &s->current_picture.f;
878 if (s->msmpeg4_version) {
879 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
880 2 * 2 * (MAX_LEVEL + 1) *
881 (MAX_RUN + 1) * 2 * sizeof(int), fail);
883 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
885 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
886 64 * 32 * sizeof(int), fail);
887 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
888 64 * 32 * sizeof(int), fail);
889 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
890 64 * 32 * 2 * sizeof(uint16_t), fail);
891 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
892 64 * 32 * 2 * sizeof(uint16_t), fail);
893 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
894 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
895 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
896 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
898 if (s->avctx->noise_reduction) {
899 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
900 2 * 64 * sizeof(uint16_t), fail);
905 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
906 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
907 s->picture_count * sizeof(Picture), fail);
908 for (i = 0; i < s->picture_count; i++) {
909 avcodec_get_frame_defaults(&s->picture[i].f);
912 if (s->width && s->height) {
913 if ((err = init_context_frame(s)))
916 s->parse_context.state = -1;
919 s->context_initialized = 1;
920 s->thread_context[0] = s;
922 if (s->width && s->height) {
924 for (i = 1; i < nb_slices; i++) {
925 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
926 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
929 for (i = 0; i < nb_slices; i++) {
930 if (init_duplicate_context(s->thread_context[i], s) < 0)
932 s->thread_context[i]->start_mb_y =
933 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
934 s->thread_context[i]->end_mb_y =
935 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
938 if (init_duplicate_context(s, s) < 0)
941 s->end_mb_y = s->mb_height;
943 s->slice_context_count = nb_slices;
948 ff_MPV_common_end(s);
953 * Frees and resets MpegEncContext fields depending on the resolution.
954 * Is used during resolution changes to avoid a full reinitialization of the
957 static int free_context_frame(MpegEncContext *s)
961 av_freep(&s->mb_type);
962 av_freep(&s->p_mv_table_base);
963 av_freep(&s->b_forw_mv_table_base);
964 av_freep(&s->b_back_mv_table_base);
965 av_freep(&s->b_bidir_forw_mv_table_base);
966 av_freep(&s->b_bidir_back_mv_table_base);
967 av_freep(&s->b_direct_mv_table_base);
968 s->p_mv_table = NULL;
969 s->b_forw_mv_table = NULL;
970 s->b_back_mv_table = NULL;
971 s->b_bidir_forw_mv_table = NULL;
972 s->b_bidir_back_mv_table = NULL;
973 s->b_direct_mv_table = NULL;
974 for (i = 0; i < 2; i++) {
975 for (j = 0; j < 2; j++) {
976 for (k = 0; k < 2; k++) {
977 av_freep(&s->b_field_mv_table_base[i][j][k]);
978 s->b_field_mv_table[i][j][k] = NULL;
980 av_freep(&s->b_field_select_table[i][j]);
981 av_freep(&s->p_field_mv_table_base[i][j]);
982 s->p_field_mv_table[i][j] = NULL;
984 av_freep(&s->p_field_select_table[i]);
987 av_freep(&s->dc_val_base);
988 av_freep(&s->coded_block_base);
989 av_freep(&s->mbintra_table);
990 av_freep(&s->cbp_table);
991 av_freep(&s->pred_dir_table);
993 av_freep(&s->mbskip_table);
995 av_freep(&s->error_status_table);
996 av_freep(&s->er_temp_buffer);
997 av_freep(&s->mb_index2xy);
998 av_freep(&s->lambda_table);
999 av_freep(&s->cplx_tab);
1000 av_freep(&s->bits_tab);
1002 s->linesize = s->uvlinesize = 0;
1004 for (i = 0; i < 3; i++)
1005 av_freep(&s->visualization_buffer[i]);
1007 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1008 avcodec_default_free_buffers(s->avctx);
1013 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1017 if (s->slice_context_count > 1) {
1018 for (i = 0; i < s->slice_context_count; i++) {
1019 free_duplicate_context(s->thread_context[i]);
1021 for (i = 1; i < s->slice_context_count; i++) {
1022 av_freep(&s->thread_context[i]);
1025 free_duplicate_context(s);
1027 free_context_frame(s);
1030 for (i = 0; i < s->picture_count; i++) {
1031 s->picture[i].needs_realloc = 1;
1034 s->last_picture_ptr =
1035 s->next_picture_ptr =
1036 s->current_picture_ptr = NULL;
1039 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1040 s->mb_height = (s->height + 31) / 32 * 2;
1041 else if (s->codec_id != AV_CODEC_ID_H264)
1042 s->mb_height = (s->height + 15) / 16;
1044 if ((s->width || s->height) &&
1045 av_image_check_size(s->width, s->height, 0, s->avctx))
1046 return AVERROR_INVALIDDATA;
1048 if ((err = init_context_frame(s)))
1051 s->thread_context[0] = s;
1053 if (s->width && s->height) {
1054 int nb_slices = s->slice_context_count;
1055 if (nb_slices > 1) {
1056 for (i = 1; i < nb_slices; i++) {
1057 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1058 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1061 for (i = 0; i < nb_slices; i++) {
1062 if (init_duplicate_context(s->thread_context[i], s) < 0)
1064 s->thread_context[i]->start_mb_y =
1065 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1066 s->thread_context[i]->end_mb_y =
1067 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1070 if (init_duplicate_context(s, s) < 0)
1073 s->end_mb_y = s->mb_height;
1075 s->slice_context_count = nb_slices;
1080 ff_MPV_common_end(s);
1084 /* init common structure for both encoder and decoder */
1085 void ff_MPV_common_end(MpegEncContext *s)
1089 if (s->slice_context_count > 1) {
1090 for (i = 0; i < s->slice_context_count; i++) {
1091 free_duplicate_context(s->thread_context[i]);
1093 for (i = 1; i < s->slice_context_count; i++) {
1094 av_freep(&s->thread_context[i]);
1096 s->slice_context_count = 1;
1097 } else free_duplicate_context(s);
1099 av_freep(&s->parse_context.buffer);
1100 s->parse_context.buffer_size = 0;
1102 av_freep(&s->bitstream_buffer);
1103 s->allocated_bitstream_buffer_size = 0;
1105 av_freep(&s->avctx->stats_out);
1106 av_freep(&s->ac_stats);
1108 av_freep(&s->q_intra_matrix);
1109 av_freep(&s->q_inter_matrix);
1110 av_freep(&s->q_intra_matrix16);
1111 av_freep(&s->q_inter_matrix16);
1112 av_freep(&s->input_picture);
1113 av_freep(&s->reordered_input_picture);
1114 av_freep(&s->dct_offset);
1116 if (s->picture && !s->avctx->internal->is_copy) {
1117 for (i = 0; i < s->picture_count; i++) {
1118 free_picture(s, &s->picture[i]);
1121 av_freep(&s->picture);
1123 free_context_frame(s);
1125 s->context_initialized = 0;
1126 s->last_picture_ptr =
1127 s->next_picture_ptr =
1128 s->current_picture_ptr = NULL;
1129 s->linesize = s->uvlinesize = 0;
1132 void ff_init_rl(RLTable *rl,
1133 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1135 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1136 uint8_t index_run[MAX_RUN + 1];
1137 int last, run, level, start, end, i;
1139 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1140 if (static_store && rl->max_level[0])
1143 /* compute max_level[], max_run[] and index_run[] */
1144 for (last = 0; last < 2; last++) {
1153 memset(max_level, 0, MAX_RUN + 1);
1154 memset(max_run, 0, MAX_LEVEL + 1);
1155 memset(index_run, rl->n, MAX_RUN + 1);
1156 for (i = start; i < end; i++) {
1157 run = rl->table_run[i];
1158 level = rl->table_level[i];
1159 if (index_run[run] == rl->n)
1161 if (level > max_level[run])
1162 max_level[run] = level;
1163 if (run > max_run[level])
1164 max_run[level] = run;
1167 rl->max_level[last] = static_store[last];
1169 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1170 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1172 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1174 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1175 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1177 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1179 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1180 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1184 void ff_init_vlc_rl(RLTable *rl)
1188 for (q = 0; q < 32; q++) {
1190 int qadd = (q - 1) | 1;
1196 for (i = 0; i < rl->vlc.table_size; i++) {
1197 int code = rl->vlc.table[i][0];
1198 int len = rl->vlc.table[i][1];
1201 if (len == 0) { // illegal code
1204 } else if (len < 0) { // more bits needed
1208 if (code == rl->n) { // esc
1212 run = rl->table_run[code] + 1;
1213 level = rl->table_level[code] * qmul + qadd;
1214 if (code >= rl->last) run += 192;
1217 rl->rl_vlc[q][i].len = len;
1218 rl->rl_vlc[q][i].level = level;
1219 rl->rl_vlc[q][i].run = run;
1224 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1228 /* release non reference frames */
1229 for (i = 0; i < s->picture_count; i++) {
1230 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1231 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1232 (remove_current || &s->picture[i] != s->current_picture_ptr)
1233 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1234 free_frame_buffer(s, &s->picture[i]);
1239 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1241 if (pic->f.data[0] == NULL)
1243 if (pic->needs_realloc)
1244 if (!pic->owner2 || pic->owner2 == s)
1249 static int find_unused_picture(MpegEncContext *s, int shared)
1254 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1255 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1259 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1260 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1263 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1264 if (pic_is_unused(s, &s->picture[i]))
1269 return AVERROR_INVALIDDATA;
1272 int ff_find_unused_picture(MpegEncContext *s, int shared)
1274 int ret = find_unused_picture(s, shared);
1276 if (ret >= 0 && ret < s->picture_range_end) {
1277 if (s->picture[ret].needs_realloc) {
1278 s->picture[ret].needs_realloc = 0;
1279 free_picture(s, &s->picture[ret]);
1280 avcodec_get_frame_defaults(&s->picture[ret].f);
1286 static void update_noise_reduction(MpegEncContext *s)
1290 for (intra = 0; intra < 2; intra++) {
1291 if (s->dct_count[intra] > (1 << 16)) {
1292 for (i = 0; i < 64; i++) {
1293 s->dct_error_sum[intra][i] >>= 1;
1295 s->dct_count[intra] >>= 1;
1298 for (i = 0; i < 64; i++) {
1299 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1300 s->dct_count[intra] +
1301 s->dct_error_sum[intra][i] / 2) /
1302 (s->dct_error_sum[intra][i] + 1);
1308 * generic function for encode/decode called after coding/decoding
1309 * the header and before a frame is coded/decoded.
1311 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1317 /* mark & release old frames */
1318 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1319 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1320 s->last_picture_ptr != s->next_picture_ptr &&
1321 s->last_picture_ptr->f.data[0]) {
1322 if (s->last_picture_ptr->owner2 == s)
1323 free_frame_buffer(s, s->last_picture_ptr);
1326 /* release forgotten pictures */
1327 /* if (mpeg124/h263) */
1329 for (i = 0; i < s->picture_count; i++) {
1330 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1331 &s->picture[i] != s->last_picture_ptr &&
1332 &s->picture[i] != s->next_picture_ptr &&
1333 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1334 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1335 av_log(avctx, AV_LOG_ERROR,
1336 "releasing zombie picture\n");
1337 free_frame_buffer(s, &s->picture[i]);
1344 ff_release_unused_pictures(s, 1);
1346 if (s->current_picture_ptr &&
1347 s->current_picture_ptr->f.data[0] == NULL) {
1348 // we already have a unused image
1349 // (maybe it was set before reading the header)
1350 pic = s->current_picture_ptr;
1352 i = ff_find_unused_picture(s, 0);
1354 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1357 pic = &s->picture[i];
1360 pic->f.reference = 0;
1362 if (s->codec_id == AV_CODEC_ID_H264)
1363 pic->f.reference = s->picture_structure;
1364 else if (s->pict_type != AV_PICTURE_TYPE_B)
1365 pic->f.reference = 3;
1368 pic->f.coded_picture_number = s->coded_picture_number++;
1370 if (ff_alloc_picture(s, pic, 0) < 0)
1373 s->current_picture_ptr = pic;
1374 // FIXME use only the vars from current_pic
1375 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1376 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1377 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1378 if (s->picture_structure != PICT_FRAME)
1379 s->current_picture_ptr->f.top_field_first =
1380 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1382 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1383 !s->progressive_sequence;
1384 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1387 s->current_picture_ptr->f.pict_type = s->pict_type;
1388 // if (s->flags && CODEC_FLAG_QSCALE)
1389 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1390 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1392 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1394 if (s->pict_type != AV_PICTURE_TYPE_B) {
1395 s->last_picture_ptr = s->next_picture_ptr;
1397 s->next_picture_ptr = s->current_picture_ptr;
1399 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1400 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1401 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1402 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1403 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1404 s->pict_type, s->dropable); */
1406 if (s->codec_id != AV_CODEC_ID_H264) {
1407 if ((s->last_picture_ptr == NULL ||
1408 s->last_picture_ptr->f.data[0] == NULL) &&
1409 (s->pict_type != AV_PICTURE_TYPE_I ||
1410 s->picture_structure != PICT_FRAME)) {
1411 if (s->pict_type != AV_PICTURE_TYPE_I)
1412 av_log(avctx, AV_LOG_ERROR,
1413 "warning: first frame is no keyframe\n");
1414 else if (s->picture_structure != PICT_FRAME)
1415 av_log(avctx, AV_LOG_INFO,
1416 "allocate dummy last picture for field based first keyframe\n");
1418 /* Allocate a dummy frame */
1419 i = ff_find_unused_picture(s, 0);
1421 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1424 s->last_picture_ptr = &s->picture[i];
1425 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1426 s->last_picture_ptr = NULL;
1429 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1430 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1431 s->last_picture_ptr->f.reference = 3;
1433 if ((s->next_picture_ptr == NULL ||
1434 s->next_picture_ptr->f.data[0] == NULL) &&
1435 s->pict_type == AV_PICTURE_TYPE_B) {
1436 /* Allocate a dummy frame */
1437 i = ff_find_unused_picture(s, 0);
1439 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1442 s->next_picture_ptr = &s->picture[i];
1443 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1444 s->next_picture_ptr = NULL;
1447 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1448 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1449 s->next_picture_ptr->f.reference = 3;
1453 if (s->last_picture_ptr)
1454 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1455 if (s->next_picture_ptr)
1456 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1458 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1459 (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3)) {
1460 if (s->next_picture_ptr)
1461 s->next_picture_ptr->owner2 = s;
1462 if (s->last_picture_ptr)
1463 s->last_picture_ptr->owner2 = s;
1466 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1467 s->last_picture_ptr->f.data[0]));
1469 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1471 for (i = 0; i < 4; i++) {
1472 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1473 s->current_picture.f.data[i] +=
1474 s->current_picture.f.linesize[i];
1476 s->current_picture.f.linesize[i] *= 2;
1477 s->last_picture.f.linesize[i] *= 2;
1478 s->next_picture.f.linesize[i] *= 2;
1482 s->err_recognition = avctx->err_recognition;
1484 /* set dequantizer, we can't do it during init as
1485 * it might change for mpeg4 and we can't do it in the header
1486 * decode as init is not called for mpeg4 there yet */
1487 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1488 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1489 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1490 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1491 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1492 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1494 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1495 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1498 if (s->dct_error_sum) {
1499 assert(s->avctx->noise_reduction && s->encoding);
1500 update_noise_reduction(s);
1503 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1504 return ff_xvmc_field_start(s, avctx);
1509 /* generic function for encode/decode called after a
1510 * frame has been coded/decoded. */
1511 void ff_MPV_frame_end(MpegEncContext *s)
1514 /* redraw edges for the frame if decoding didn't complete */
1515 // just to make sure that all data is rendered.
1516 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1517 ff_xvmc_field_end(s);
1518 } else if ((s->error_count || s->encoding) &&
1519 !s->avctx->hwaccel &&
1520 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1521 s->unrestricted_mv &&
1522 s->current_picture.f.reference &&
1524 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1525 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1526 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1527 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1528 s->h_edge_pos, s->v_edge_pos,
1529 EDGE_WIDTH, EDGE_WIDTH,
1530 EDGE_TOP | EDGE_BOTTOM);
1531 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1532 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1533 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1534 EDGE_TOP | EDGE_BOTTOM);
1535 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1536 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1537 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1538 EDGE_TOP | EDGE_BOTTOM);
1543 s->last_pict_type = s->pict_type;
1544 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1545 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1546 s->last_non_b_pict_type = s->pict_type;
1549 /* copy back current_picture variables */
1550 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1551 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1552 s->picture[i] = s->current_picture;
1556 assert(i < MAX_PICTURE_COUNT);
1560 /* release non-reference frames */
1561 for (i = 0; i < s->picture_count; i++) {
1562 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1563 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1564 free_frame_buffer(s, &s->picture[i]);
1568 // clear copies, to avoid confusion
1570 memset(&s->last_picture, 0, sizeof(Picture));
1571 memset(&s->next_picture, 0, sizeof(Picture));
1572 memset(&s->current_picture, 0, sizeof(Picture));
1574 s->avctx->coded_frame = &s->current_picture_ptr->f;
1576 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1577 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1582 * Draw a line from (ex, ey) -> (sx, sy).
1583 * @param w width of the image
1584 * @param h height of the image
1585 * @param stride stride/linesize of the image
1586 * @param color color of the arrow
1588 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1589 int w, int h, int stride, int color)
1593 sx = av_clip(sx, 0, w - 1);
1594 sy = av_clip(sy, 0, h - 1);
1595 ex = av_clip(ex, 0, w - 1);
1596 ey = av_clip(ey, 0, h - 1);
1598 buf[sy * stride + sx] += color;
1600 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1602 FFSWAP(int, sx, ex);
1603 FFSWAP(int, sy, ey);
1605 buf += sx + sy * stride;
1607 f = ((ey - sy) << 16) / ex;
1608 for (x = 0; x = ex; x++) {
1610 fr = (x * f) & 0xFFFF;
1611 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1612 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1616 FFSWAP(int, sx, ex);
1617 FFSWAP(int, sy, ey);
1619 buf += sx + sy * stride;
1622 f = ((ex - sx) << 16) / ey;
1625 for (y = 0; y = ey; y++) {
1627 fr = (y * f) & 0xFFFF;
1628 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1629 buf[y * stride + x + 1] += (color * fr ) >> 16;
1635 * Draw an arrow from (ex, ey) -> (sx, sy).
1636 * @param w width of the image
1637 * @param h height of the image
1638 * @param stride stride/linesize of the image
1639 * @param color color of the arrow
1641 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1642 int ey, int w, int h, int stride, int color)
1646 sx = av_clip(sx, -100, w + 100);
1647 sy = av_clip(sy, -100, h + 100);
1648 ex = av_clip(ex, -100, w + 100);
1649 ey = av_clip(ey, -100, h + 100);
1654 if (dx * dx + dy * dy > 3 * 3) {
1657 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1659 // FIXME subpixel accuracy
1660 rx = ROUNDED_DIV(rx * 3 << 4, length);
1661 ry = ROUNDED_DIV(ry * 3 << 4, length);
1663 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1664 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1666 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1670 * Print debugging info for the given picture.
1672 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1674 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1677 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1680 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1681 switch (pict->pict_type) {
1682 case AV_PICTURE_TYPE_I:
1683 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1685 case AV_PICTURE_TYPE_P:
1686 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1688 case AV_PICTURE_TYPE_B:
1689 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1691 case AV_PICTURE_TYPE_S:
1692 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1694 case AV_PICTURE_TYPE_SI:
1695 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1697 case AV_PICTURE_TYPE_SP:
1698 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1701 for (y = 0; y < s->mb_height; y++) {
1702 for (x = 0; x < s->mb_width; x++) {
1703 if (s->avctx->debug & FF_DEBUG_SKIP) {
1704 int count = s->mbskip_table[x + y * s->mb_stride];
1707 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1709 if (s->avctx->debug & FF_DEBUG_QP) {
1710 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1711 pict->qscale_table[x + y * s->mb_stride]);
1713 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1714 int mb_type = pict->mb_type[x + y * s->mb_stride];
1715 // Type & MV direction
1716 if (IS_PCM(mb_type))
1717 av_log(s->avctx, AV_LOG_DEBUG, "P");
1718 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1719 av_log(s->avctx, AV_LOG_DEBUG, "A");
1720 else if (IS_INTRA4x4(mb_type))
1721 av_log(s->avctx, AV_LOG_DEBUG, "i");
1722 else if (IS_INTRA16x16(mb_type))
1723 av_log(s->avctx, AV_LOG_DEBUG, "I");
1724 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1725 av_log(s->avctx, AV_LOG_DEBUG, "d");
1726 else if (IS_DIRECT(mb_type))
1727 av_log(s->avctx, AV_LOG_DEBUG, "D");
1728 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1729 av_log(s->avctx, AV_LOG_DEBUG, "g");
1730 else if (IS_GMC(mb_type))
1731 av_log(s->avctx, AV_LOG_DEBUG, "G");
1732 else if (IS_SKIP(mb_type))
1733 av_log(s->avctx, AV_LOG_DEBUG, "S");
1734 else if (!USES_LIST(mb_type, 1))
1735 av_log(s->avctx, AV_LOG_DEBUG, ">");
1736 else if (!USES_LIST(mb_type, 0))
1737 av_log(s->avctx, AV_LOG_DEBUG, "<");
1739 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1740 av_log(s->avctx, AV_LOG_DEBUG, "X");
1744 if (IS_8X8(mb_type))
1745 av_log(s->avctx, AV_LOG_DEBUG, "+");
1746 else if (IS_16X8(mb_type))
1747 av_log(s->avctx, AV_LOG_DEBUG, "-");
1748 else if (IS_8X16(mb_type))
1749 av_log(s->avctx, AV_LOG_DEBUG, "|");
1750 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1751 av_log(s->avctx, AV_LOG_DEBUG, " ");
1753 av_log(s->avctx, AV_LOG_DEBUG, "?");
1756 if (IS_INTERLACED(mb_type))
1757 av_log(s->avctx, AV_LOG_DEBUG, "=");
1759 av_log(s->avctx, AV_LOG_DEBUG, " ");
1762 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1766 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1767 (s->avctx->debug_mv)) {
1768 const int shift = 1 + s->quarter_sample;
1772 int h_chroma_shift, v_chroma_shift, block_height;
1773 const int width = s->avctx->width;
1774 const int height = s->avctx->height;
1775 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1776 const int mv_stride = (s->mb_width << mv_sample_log2) +
1777 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1778 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1780 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1781 &h_chroma_shift, &v_chroma_shift);
1782 for (i = 0; i < 3; i++) {
1783 memcpy(s->visualization_buffer[i], pict->data[i],
1784 (i == 0) ? pict->linesize[i] * height:
1785 pict->linesize[i] * height >> v_chroma_shift);
1786 pict->data[i] = s->visualization_buffer[i];
1788 pict->type = FF_BUFFER_TYPE_COPY;
1789 ptr = pict->data[0];
1790 block_height = 16 >> v_chroma_shift;
1792 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1794 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1795 const int mb_index = mb_x + mb_y * s->mb_stride;
1796 if ((s->avctx->debug_mv) && pict->motion_val) {
1798 for (type = 0; type < 3; type++) {
1802 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1803 (pict->pict_type!= AV_PICTURE_TYPE_P))
1808 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1809 (pict->pict_type!= AV_PICTURE_TYPE_B))
1814 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1815 (pict->pict_type!= AV_PICTURE_TYPE_B))
1820 if (!USES_LIST(pict->mb_type[mb_index], direction))
1823 if (IS_8X8(pict->mb_type[mb_index])) {
1825 for (i = 0; i < 4; i++) {
1826 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1827 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1828 int xy = (mb_x * 2 + (i & 1) +
1829 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1830 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1831 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1832 draw_arrow(ptr, sx, sy, mx, my, width,
1833 height, s->linesize, 100);
1835 } else if (IS_16X8(pict->mb_type[mb_index])) {
1837 for (i = 0; i < 2; i++) {
1838 int sx = mb_x * 16 + 8;
1839 int sy = mb_y * 16 + 4 + 8 * i;
1840 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1841 int mx = (pict->motion_val[direction][xy][0] >> shift);
1842 int my = (pict->motion_val[direction][xy][1] >> shift);
1844 if (IS_INTERLACED(pict->mb_type[mb_index]))
1847 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1848 height, s->linesize, 100);
1850 } else if (IS_8X16(pict->mb_type[mb_index])) {
1852 for (i = 0; i < 2; i++) {
1853 int sx = mb_x * 16 + 4 + 8 * i;
1854 int sy = mb_y * 16 + 8;
1855 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1856 int mx = pict->motion_val[direction][xy][0] >> shift;
1857 int my = pict->motion_val[direction][xy][1] >> shift;
1859 if (IS_INTERLACED(pict->mb_type[mb_index]))
1862 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1863 height, s->linesize, 100);
1866 int sx = mb_x * 16 + 8;
1867 int sy = mb_y * 16 + 8;
1868 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1869 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1870 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1871 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1875 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1876 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1877 0x0101010101010101ULL;
1879 for (y = 0; y < block_height; y++) {
1880 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1881 (block_height * mb_y + y) *
1882 pict->linesize[1]) = c;
1883 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1884 (block_height * mb_y + y) *
1885 pict->linesize[2]) = c;
1888 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1890 int mb_type = pict->mb_type[mb_index];
1893 #define COLOR(theta, r) \
1894 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1895 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1899 if (IS_PCM(mb_type)) {
1901 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1902 IS_INTRA16x16(mb_type)) {
1904 } else if (IS_INTRA4x4(mb_type)) {
1906 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1908 } else if (IS_DIRECT(mb_type)) {
1910 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1912 } else if (IS_GMC(mb_type)) {
1914 } else if (IS_SKIP(mb_type)) {
1916 } else if (!USES_LIST(mb_type, 1)) {
1918 } else if (!USES_LIST(mb_type, 0)) {
1921 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1925 u *= 0x0101010101010101ULL;
1926 v *= 0x0101010101010101ULL;
1927 for (y = 0; y < block_height; y++) {
1928 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1929 (block_height * mb_y + y) * pict->linesize[1]) = u;
1930 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1931 (block_height * mb_y + y) * pict->linesize[2]) = v;
1935 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1936 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1937 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1938 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1939 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1941 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1942 for (y = 0; y < 16; y++)
1943 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1944 pict->linesize[0]] ^= 0x80;
1946 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1947 int dm = 1 << (mv_sample_log2 - 2);
1948 for (i = 0; i < 4; i++) {
1949 int sx = mb_x * 16 + 8 * (i & 1);
1950 int sy = mb_y * 16 + 8 * (i >> 1);
1951 int xy = (mb_x * 2 + (i & 1) +
1952 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1954 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1955 if (mv[0] != mv[dm] ||
1956 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1957 for (y = 0; y < 8; y++)
1958 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1959 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1960 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1961 pict->linesize[0]) ^= 0x8080808080808080ULL;
1965 if (IS_INTERLACED(mb_type) &&
1966 s->codec_id == AV_CODEC_ID_H264) {
1970 s->mbskip_table[mb_index] = 0;
1977 * find the lowest MB row referenced in the MVs
1979 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1981 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1982 int my, off, i, mvs;
1984 if (s->picture_structure != PICT_FRAME) goto unhandled;
1986 switch (s->mv_type) {
2000 for (i = 0; i < mvs; i++) {
2001 my = s->mv[dir][i][1]<<qpel_shift;
2002 my_max = FFMAX(my_max, my);
2003 my_min = FFMIN(my_min, my);
2006 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2008 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2010 return s->mb_height-1;
2013 /* put block[] to dest[] */
2014 static inline void put_dct(MpegEncContext *s,
2015 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2017 s->dct_unquantize_intra(s, block, i, qscale);
2018 s->dsp.idct_put (dest, line_size, block);
2021 /* add block[] to dest[] */
2022 static inline void add_dct(MpegEncContext *s,
2023 DCTELEM *block, int i, uint8_t *dest, int line_size)
2025 if (s->block_last_index[i] >= 0) {
2026 s->dsp.idct_add (dest, line_size, block);
2030 static inline void add_dequant_dct(MpegEncContext *s,
2031 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2033 if (s->block_last_index[i] >= 0) {
2034 s->dct_unquantize_inter(s, block, i, qscale);
2036 s->dsp.idct_add (dest, line_size, block);
2041 * Clean dc, ac, coded_block for the current non-intra MB.
2043 void ff_clean_intra_table_entries(MpegEncContext *s)
2045 int wrap = s->b8_stride;
2046 int xy = s->block_index[0];
2049 s->dc_val[0][xy + 1 ] =
2050 s->dc_val[0][xy + wrap] =
2051 s->dc_val[0][xy + 1 + wrap] = 1024;
2053 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2054 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2055 if (s->msmpeg4_version>=3) {
2056 s->coded_block[xy ] =
2057 s->coded_block[xy + 1 ] =
2058 s->coded_block[xy + wrap] =
2059 s->coded_block[xy + 1 + wrap] = 0;
2062 wrap = s->mb_stride;
2063 xy = s->mb_x + s->mb_y * wrap;
2065 s->dc_val[2][xy] = 1024;
2067 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2068 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2070 s->mbintra_table[xy]= 0;
2073 /* generic function called after a macroblock has been parsed by the
2074 decoder or after it has been encoded by the encoder.
2076 Important variables used:
2077 s->mb_intra : true if intra macroblock
2078 s->mv_dir : motion vector direction
2079 s->mv_type : motion vector type
2080 s->mv : motion vector
2081 s->interlaced_dct : true if interlaced dct used (mpeg2)
2083 static av_always_inline
2084 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2087 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2088 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2089 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2093 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2094 /* save DCT coefficients */
2096 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2097 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2099 for(j=0; j<64; j++){
2100 *dct++ = block[i][s->dsp.idct_permutation[j]];
2101 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2103 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2107 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2109 /* update DC predictors for P macroblocks */
2111 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2112 if(s->mbintra_table[mb_xy])
2113 ff_clean_intra_table_entries(s);
2117 s->last_dc[2] = 128 << s->intra_dc_precision;
2120 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2121 s->mbintra_table[mb_xy]=1;
2123 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2124 uint8_t *dest_y, *dest_cb, *dest_cr;
2125 int dct_linesize, dct_offset;
2126 op_pixels_func (*op_pix)[4];
2127 qpel_mc_func (*op_qpix)[16];
2128 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2129 const int uvlinesize = s->current_picture.f.linesize[1];
2130 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2131 const int block_size = 8;
2133 /* avoid copy if macroblock skipped in last frame too */
2134 /* skip only during decoding as we might trash the buffers during encoding a bit */
2136 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2138 if (s->mb_skipped) {
2140 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2142 } else if(!s->current_picture.f.reference) {
2145 *mbskip_ptr = 0; /* not skipped */
2149 dct_linesize = linesize << s->interlaced_dct;
2150 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2154 dest_cb= s->dest[1];
2155 dest_cr= s->dest[2];
2157 dest_y = s->b_scratchpad;
2158 dest_cb= s->b_scratchpad+16*linesize;
2159 dest_cr= s->b_scratchpad+32*linesize;
2163 /* motion handling */
2164 /* decoding or more than one mb_type (MC was already done otherwise) */
2167 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2168 if (s->mv_dir & MV_DIR_FORWARD) {
2169 ff_thread_await_progress(&s->last_picture_ptr->f,
2170 ff_MPV_lowest_referenced_row(s, 0),
2173 if (s->mv_dir & MV_DIR_BACKWARD) {
2174 ff_thread_await_progress(&s->next_picture_ptr->f,
2175 ff_MPV_lowest_referenced_row(s, 1),
2180 op_qpix= s->me.qpel_put;
2181 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2182 op_pix = s->dsp.put_pixels_tab;
2184 op_pix = s->dsp.put_no_rnd_pixels_tab;
2186 if (s->mv_dir & MV_DIR_FORWARD) {
2187 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2188 op_pix = s->dsp.avg_pixels_tab;
2189 op_qpix= s->me.qpel_avg;
2191 if (s->mv_dir & MV_DIR_BACKWARD) {
2192 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2196 /* skip dequant / idct if we are really late ;) */
2197 if(s->avctx->skip_idct){
2198 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2199 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2200 || s->avctx->skip_idct >= AVDISCARD_ALL)
2204 /* add dct residue */
2205 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2206 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2207 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2208 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2209 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2210 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2212 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2213 if (s->chroma_y_shift){
2214 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2215 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2219 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2220 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2221 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2222 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2225 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2226 add_dct(s, block[0], 0, dest_y , dct_linesize);
2227 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2228 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2229 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2231 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2232 if(s->chroma_y_shift){//Chroma420
2233 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2234 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2237 dct_linesize = uvlinesize << s->interlaced_dct;
2238 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2240 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2241 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2242 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2243 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2244 if(!s->chroma_x_shift){//Chroma444
2245 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2246 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2247 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2248 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2253 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2254 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2257 /* dct only in intra block */
2258 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2259 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2260 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2261 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2262 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2264 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2265 if(s->chroma_y_shift){
2266 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2267 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2271 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2272 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2273 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2274 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2278 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2279 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2280 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2281 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2283 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2284 if(s->chroma_y_shift){
2285 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2286 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2289 dct_linesize = uvlinesize << s->interlaced_dct;
2290 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2292 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2293 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2294 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2295 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2296 if(!s->chroma_x_shift){//Chroma444
2297 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2298 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2299 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2300 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2308 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2309 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2310 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2315 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2317 if(s->out_format == FMT_MPEG1) {
2318 MPV_decode_mb_internal(s, block, 1);
2321 MPV_decode_mb_internal(s, block, 0);
2325 * @param h is the normal height, this will be reduced automatically if needed for the last row
2327 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2328 const int field_pic= s->picture_structure != PICT_FRAME;
2334 if (!s->avctx->hwaccel
2335 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2336 && s->unrestricted_mv
2337 && s->current_picture.f.reference
2339 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2340 int sides = 0, edge_h;
2341 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2342 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2343 if (y==0) sides |= EDGE_TOP;
2344 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2346 edge_h= FFMIN(h, s->v_edge_pos - y);
2348 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2349 s->linesize, s->h_edge_pos, edge_h,
2350 EDGE_WIDTH, EDGE_WIDTH, sides);
2351 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2352 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2353 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2354 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2355 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2356 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2359 h= FFMIN(h, s->avctx->height - y);
2361 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2363 if (s->avctx->draw_horiz_band) {
2365 int offset[AV_NUM_DATA_POINTERS];
2368 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2369 src = &s->current_picture_ptr->f;
2370 else if(s->last_picture_ptr)
2371 src = &s->last_picture_ptr->f;
2375 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2376 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2379 offset[0]= y * s->linesize;
2381 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2382 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2388 s->avctx->draw_horiz_band(s->avctx, src, offset,
2389 y, s->picture_structure, h);
2393 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2394 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2395 const int uvlinesize = s->current_picture.f.linesize[1];
2396 const int mb_size= 4;
2398 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2399 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2400 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2401 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2402 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2403 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2404 //block_index is not used by mpeg2, so it is not affected by chroma_format
2406 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2407 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2408 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2410 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2412 if(s->picture_structure==PICT_FRAME){
2413 s->dest[0] += s->mb_y * linesize << mb_size;
2414 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2415 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2417 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2418 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2419 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2420 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2425 void ff_mpeg_flush(AVCodecContext *avctx){
2427 MpegEncContext *s = avctx->priv_data;
2429 if(s==NULL || s->picture==NULL)
2432 for(i=0; i<s->picture_count; i++){
2433 if (s->picture[i].f.data[0] &&
2434 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2435 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2436 free_frame_buffer(s, &s->picture[i]);
2438 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2440 s->mb_x= s->mb_y= 0;
2442 s->parse_context.state= -1;
2443 s->parse_context.frame_start_found= 0;
2444 s->parse_context.overread= 0;
2445 s->parse_context.overread_index= 0;
2446 s->parse_context.index= 0;
2447 s->parse_context.last_index= 0;
2448 s->bitstream_buffer_size=0;
2452 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2453 DCTELEM *block, int n, int qscale)
2455 int i, level, nCoeffs;
2456 const uint16_t *quant_matrix;
2458 nCoeffs= s->block_last_index[n];
2461 block[0] = block[0] * s->y_dc_scale;
2463 block[0] = block[0] * s->c_dc_scale;
2464 /* XXX: only mpeg1 */
2465 quant_matrix = s->intra_matrix;
2466 for(i=1;i<=nCoeffs;i++) {
2467 int j= s->intra_scantable.permutated[i];
2472 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2473 level = (level - 1) | 1;
2476 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2477 level = (level - 1) | 1;
2484 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2485 DCTELEM *block, int n, int qscale)
2487 int i, level, nCoeffs;
2488 const uint16_t *quant_matrix;
2490 nCoeffs= s->block_last_index[n];
2492 quant_matrix = s->inter_matrix;
2493 for(i=0; i<=nCoeffs; i++) {
2494 int j= s->intra_scantable.permutated[i];
2499 level = (((level << 1) + 1) * qscale *
2500 ((int) (quant_matrix[j]))) >> 4;
2501 level = (level - 1) | 1;
2504 level = (((level << 1) + 1) * qscale *
2505 ((int) (quant_matrix[j]))) >> 4;
2506 level = (level - 1) | 1;
2513 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2514 DCTELEM *block, int n, int qscale)
2516 int i, level, nCoeffs;
2517 const uint16_t *quant_matrix;
2519 if(s->alternate_scan) nCoeffs= 63;
2520 else nCoeffs= s->block_last_index[n];
2523 block[0] = block[0] * s->y_dc_scale;
2525 block[0] = block[0] * s->c_dc_scale;
2526 quant_matrix = s->intra_matrix;
2527 for(i=1;i<=nCoeffs;i++) {
2528 int j= s->intra_scantable.permutated[i];
2533 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2536 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2543 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2544 DCTELEM *block, int n, int qscale)
2546 int i, level, nCoeffs;
2547 const uint16_t *quant_matrix;
2550 if(s->alternate_scan) nCoeffs= 63;
2551 else nCoeffs= s->block_last_index[n];
2554 block[0] = block[0] * s->y_dc_scale;
2556 block[0] = block[0] * s->c_dc_scale;
2557 quant_matrix = s->intra_matrix;
2558 for(i=1;i<=nCoeffs;i++) {
2559 int j= s->intra_scantable.permutated[i];
2564 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2567 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2576 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2577 DCTELEM *block, int n, int qscale)
2579 int i, level, nCoeffs;
2580 const uint16_t *quant_matrix;
2583 if(s->alternate_scan) nCoeffs= 63;
2584 else nCoeffs= s->block_last_index[n];
2586 quant_matrix = s->inter_matrix;
2587 for(i=0; i<=nCoeffs; i++) {
2588 int j= s->intra_scantable.permutated[i];
2593 level = (((level << 1) + 1) * qscale *
2594 ((int) (quant_matrix[j]))) >> 4;
2597 level = (((level << 1) + 1) * qscale *
2598 ((int) (quant_matrix[j]))) >> 4;
2607 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2608 DCTELEM *block, int n, int qscale)
2610 int i, level, qmul, qadd;
2613 assert(s->block_last_index[n]>=0);
2619 block[0] = block[0] * s->y_dc_scale;
2621 block[0] = block[0] * s->c_dc_scale;
2622 qadd = (qscale - 1) | 1;
2629 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2631 for(i=1; i<=nCoeffs; i++) {
2635 level = level * qmul - qadd;
2637 level = level * qmul + qadd;
2644 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2645 DCTELEM *block, int n, int qscale)
2647 int i, level, qmul, qadd;
2650 assert(s->block_last_index[n]>=0);
2652 qadd = (qscale - 1) | 1;
2655 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2657 for(i=0; i<=nCoeffs; i++) {
2661 level = level * qmul - qadd;
2663 level = level * qmul + qadd;
2671 * set qscale and update qscale dependent variables.
2673 void ff_set_qscale(MpegEncContext * s, int qscale)
2677 else if (qscale > 31)
2681 s->chroma_qscale= s->chroma_qscale_table[qscale];
2683 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2684 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2687 void ff_MPV_report_decode_progress(MpegEncContext *s)
2689 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2690 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);