2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
38 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
61 /* enable all paranoid tests for rounding, overflows, etc... */
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
128 const enum PixelFormat ff_pixfmt_list_420[] = {
133 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
141 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
143 uint32_t * restrict state)
151 for (i = 0; i < 3; i++) {
152 uint32_t tmp = *state << 8;
153 *state = tmp + *(p++);
154 if (tmp == 0x100 || p == end)
159 if (p[-1] > 1 ) p += 3;
160 else if (p[-2] ) p += 2;
161 else if (p[-3]|(p[-1]-1)) p++;
168 p = FFMIN(p, end) - 4;
174 /* init common dct for both encoder and decoder */
175 av_cold int ff_dct_common_init(MpegEncContext *s)
177 ff_dsputil_init(&s->dsp, s->avctx);
179 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
180 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
181 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
182 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
183 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
184 if (s->flags & CODEC_FLAG_BITEXACT)
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
186 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
189 ff_MPV_common_init_x86(s);
191 ff_MPV_common_init_axp(s);
193 ff_MPV_common_init_mmi(s);
195 ff_MPV_common_init_arm(s);
197 ff_MPV_common_init_altivec(s);
199 ff_MPV_common_init_bfin(s);
202 /* load & permutate scantables
203 * note: only wmv uses different ones
205 if (s->alternate_scan) {
206 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
207 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
209 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
210 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
212 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
213 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
218 void ff_copy_picture(Picture *dst, Picture *src)
221 dst->f.type = FF_BUFFER_TYPE_COPY;
225 * Release a frame buffer
227 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
229 /* WM Image / Screen codecs allocate internal buffers with different
230 * dimensions / colorspaces; ignore user-defined callbacks for these. */
231 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
233 s->codec_id != AV_CODEC_ID_MSS2)
234 ff_thread_release_buffer(s->avctx, &pic->f);
236 avcodec_default_release_buffer(s->avctx, &pic->f);
237 av_freep(&pic->f.hwaccel_picture_private);
241 * Allocate a frame buffer
243 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
247 if (s->avctx->hwaccel) {
248 assert(!pic->f.hwaccel_picture_private);
249 if (s->avctx->hwaccel->priv_data_size) {
250 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
251 if (!pic->f.hwaccel_picture_private) {
252 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
258 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
259 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
260 s->codec_id != AV_CODEC_ID_MSS2)
261 r = ff_thread_get_buffer(s->avctx, &pic->f);
263 r = avcodec_default_get_buffer(s->avctx, &pic->f);
265 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
266 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
267 r, pic->f.type, pic->f.data[0]);
268 av_freep(&pic->f.hwaccel_picture_private);
272 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
273 s->uvlinesize != pic->f.linesize[1])) {
274 av_log(s->avctx, AV_LOG_ERROR,
275 "get_buffer() failed (stride changed)\n");
276 free_frame_buffer(s, pic);
280 if (pic->f.linesize[1] != pic->f.linesize[2]) {
281 av_log(s->avctx, AV_LOG_ERROR,
282 "get_buffer() failed (uv stride mismatch)\n");
283 free_frame_buffer(s, pic);
291 * Allocate a Picture.
292 * The pixels are allocated/set by calling get_buffer() if shared = 0
294 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
296 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
298 // the + 1 is needed so memset(,,stride*height) does not sig11
300 const int mb_array_size = s->mb_stride * s->mb_height;
301 const int b8_array_size = s->b8_stride * s->mb_height * 2;
302 const int b4_array_size = s->b4_stride * s->mb_height * 4;
307 assert(pic->f.data[0]);
308 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
309 pic->f.type = FF_BUFFER_TYPE_SHARED;
311 assert(!pic->f.data[0]);
313 if (alloc_frame_buffer(s, pic) < 0)
316 s->linesize = pic->f.linesize[0];
317 s->uvlinesize = pic->f.linesize[1];
320 if (pic->f.qscale_table == NULL) {
322 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
323 mb_array_size * sizeof(int16_t), fail)
324 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
325 mb_array_size * sizeof(int16_t), fail)
326 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
327 mb_array_size * sizeof(int8_t ), fail)
330 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
331 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
332 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
333 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
335 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
336 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
338 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
339 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
340 if (s->out_format == FMT_H264) {
341 for (i = 0; i < 2; i++) {
342 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
343 2 * (b4_array_size + 4) * sizeof(int16_t),
345 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
346 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
347 4 * mb_array_size * sizeof(uint8_t), fail)
349 pic->f.motion_subsample_log2 = 2;
350 } else if (s->out_format == FMT_H263 || s->encoding ||
351 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
352 for (i = 0; i < 2; i++) {
353 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
354 2 * (b8_array_size + 4) * sizeof(int16_t),
356 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
357 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
358 4 * mb_array_size * sizeof(uint8_t), fail)
360 pic->f.motion_subsample_log2 = 3;
362 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
363 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
364 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
366 pic->f.qstride = s->mb_stride;
367 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
368 1 * sizeof(AVPanScan), fail)
374 fail: // for the FF_ALLOCZ_OR_GOTO macro
376 free_frame_buffer(s, pic);
381 * Deallocate a picture.
383 static void free_picture(MpegEncContext *s, Picture *pic)
387 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
388 free_frame_buffer(s, pic);
391 av_freep(&pic->mb_var);
392 av_freep(&pic->mc_mb_var);
393 av_freep(&pic->mb_mean);
394 av_freep(&pic->f.mbskip_table);
395 av_freep(&pic->qscale_table_base);
396 pic->f.qscale_table = NULL;
397 av_freep(&pic->mb_type_base);
398 pic->f.mb_type = NULL;
399 av_freep(&pic->f.dct_coeff);
400 av_freep(&pic->f.pan_scan);
401 pic->f.mb_type = NULL;
402 for (i = 0; i < 2; i++) {
403 av_freep(&pic->motion_val_base[i]);
404 av_freep(&pic->f.ref_index[i]);
405 pic->f.motion_val[i] = NULL;
408 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
409 for (i = 0; i < 4; i++) {
411 pic->f.data[i] = NULL;
417 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
419 int y_size = s->b8_stride * (2 * s->mb_height + 1);
420 int c_size = s->mb_stride * (s->mb_height + 1);
421 int yc_size = y_size + 2 * c_size;
424 // edge emu needs blocksize + filter length - 1
425 // (= 17x17 for halfpel / 21x21 for h264)
426 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
427 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
429 // FIXME should be linesize instead of s->width * 2
430 // but that is not known before get_buffer()
431 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
432 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
433 s->me.temp = s->me.scratchpad;
434 s->rd_scratchpad = s->me.scratchpad;
435 s->b_scratchpad = s->me.scratchpad;
436 s->obmc_scratchpad = s->me.scratchpad + 16;
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
439 ME_MAP_SIZE * sizeof(uint32_t), fail)
440 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
441 ME_MAP_SIZE * sizeof(uint32_t), fail)
442 if (s->avctx->noise_reduction) {
443 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
444 2 * 64 * sizeof(int), fail)
447 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
448 s->block = s->blocks[0];
450 for (i = 0; i < 12; i++) {
451 s->pblocks[i] = &s->block[i];
454 if (s->out_format == FMT_H263) {
456 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
457 yc_size * sizeof(int16_t) * 16, fail);
458 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
459 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
460 s->ac_val[2] = s->ac_val[1] + c_size;
465 return -1; // free() through ff_MPV_common_end()
468 static void free_duplicate_context(MpegEncContext *s)
473 av_freep(&s->edge_emu_buffer);
474 av_freep(&s->me.scratchpad);
478 s->obmc_scratchpad = NULL;
480 av_freep(&s->dct_error_sum);
481 av_freep(&s->me.map);
482 av_freep(&s->me.score_map);
483 av_freep(&s->blocks);
484 av_freep(&s->ac_val_base);
488 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
490 #define COPY(a) bak->a = src->a
491 COPY(edge_emu_buffer);
496 COPY(obmc_scratchpad);
503 COPY(me.map_generation);
515 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
519 // FIXME copy only needed parts
521 backup_duplicate_context(&bak, dst);
522 memcpy(dst, src, sizeof(MpegEncContext));
523 backup_duplicate_context(dst, &bak);
524 for (i = 0; i < 12; i++) {
525 dst->pblocks[i] = &dst->block[i];
527 // STOP_TIMER("update_duplicate_context")
528 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
531 int ff_mpeg_update_thread_context(AVCodecContext *dst,
532 const AVCodecContext *src)
535 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
537 if (dst == src || !s1->context_initialized)
540 // FIXME can parameters change on I-frames?
541 // in that case dst may need a reinit
542 if (!s->context_initialized) {
543 memcpy(s, s1, sizeof(MpegEncContext));
546 s->picture_range_start += MAX_PICTURE_COUNT;
547 s->picture_range_end += MAX_PICTURE_COUNT;
548 s->bitstream_buffer = NULL;
549 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
551 ff_MPV_common_init(s);
554 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
556 s->context_reinit = 0;
557 s->height = s1->height;
558 s->width = s1->width;
559 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
563 s->avctx->coded_height = s1->avctx->coded_height;
564 s->avctx->coded_width = s1->avctx->coded_width;
565 s->avctx->width = s1->avctx->width;
566 s->avctx->height = s1->avctx->height;
568 s->coded_picture_number = s1->coded_picture_number;
569 s->picture_number = s1->picture_number;
570 s->input_picture_number = s1->input_picture_number;
572 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
573 memcpy(&s->last_picture, &s1->last_picture,
574 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
576 // reset s->picture[].f.extended_data to s->picture[].f.data
577 for (i = 0; i < s->picture_count; i++)
578 s->picture[i].f.extended_data = s->picture[i].f.data;
580 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
581 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
582 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
584 // Error/bug resilience
585 s->next_p_frame_damaged = s1->next_p_frame_damaged;
586 s->workaround_bugs = s1->workaround_bugs;
589 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
590 (char *) &s1->shape - (char *) &s1->time_increment_bits);
593 s->max_b_frames = s1->max_b_frames;
594 s->low_delay = s1->low_delay;
595 s->dropable = s1->dropable;
597 // DivX handling (doesn't work)
598 s->divx_packed = s1->divx_packed;
600 if (s1->bitstream_buffer) {
601 if (s1->bitstream_buffer_size +
602 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
603 av_fast_malloc(&s->bitstream_buffer,
604 &s->allocated_bitstream_buffer_size,
605 s1->allocated_bitstream_buffer_size);
606 s->bitstream_buffer_size = s1->bitstream_buffer_size;
607 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
608 s1->bitstream_buffer_size);
609 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
610 FF_INPUT_BUFFER_PADDING_SIZE);
613 // MPEG2/interlacing info
614 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
615 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
617 if (!s1->first_field) {
618 s->last_pict_type = s1->pict_type;
619 if (s1->current_picture_ptr)
620 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
622 if (s1->pict_type != AV_PICTURE_TYPE_B) {
623 s->last_non_b_pict_type = s1->pict_type;
631 * Set the given MpegEncContext to common defaults
632 * (same for encoding and decoding).
633 * The changed fields will not depend upon the
634 * prior state of the MpegEncContext.
636 void ff_MPV_common_defaults(MpegEncContext *s)
638 s->y_dc_scale_table =
639 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
640 s->chroma_qscale_table = ff_default_chroma_qscale_table;
641 s->progressive_frame = 1;
642 s->progressive_sequence = 1;
643 s->picture_structure = PICT_FRAME;
645 s->coded_picture_number = 0;
646 s->picture_number = 0;
647 s->input_picture_number = 0;
649 s->picture_in_gop_number = 0;
654 s->picture_range_start = 0;
655 s->picture_range_end = MAX_PICTURE_COUNT;
657 s->slice_context_count = 1;
661 * Set the given MpegEncContext to defaults for decoding.
662 * the changed fields will not depend upon
663 * the prior state of the MpegEncContext.
665 void ff_MPV_decode_defaults(MpegEncContext *s)
667 ff_MPV_common_defaults(s);
671 * Initialize and allocates MpegEncContext fields dependent on the resolution.
673 static int init_context_frame(MpegEncContext *s)
675 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
677 s->mb_width = (s->width + 15) / 16;
678 s->mb_stride = s->mb_width + 1;
679 s->b8_stride = s->mb_width * 2 + 1;
680 s->b4_stride = s->mb_width * 4 + 1;
681 mb_array_size = s->mb_height * s->mb_stride;
682 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
684 /* set default edge pos, will be overriden
685 * in decode_header if needed */
686 s->h_edge_pos = s->mb_width * 16;
687 s->v_edge_pos = s->mb_height * 16;
689 s->mb_num = s->mb_width * s->mb_height;
694 s->block_wrap[3] = s->b8_stride;
696 s->block_wrap[5] = s->mb_stride;
698 y_size = s->b8_stride * (2 * s->mb_height + 1);
699 c_size = s->mb_stride * (s->mb_height + 1);
700 yc_size = y_size + 2 * c_size;
702 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
703 fail); // error ressilience code looks cleaner with this
704 for (y = 0; y < s->mb_height; y++)
705 for (x = 0; x < s->mb_width; x++)
706 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
708 s->mb_index2xy[s->mb_height * s->mb_width] =
709 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
712 /* Allocate MV tables */
713 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
714 mv_table_size * 2 * sizeof(int16_t), fail);
715 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
716 mv_table_size * 2 * sizeof(int16_t), fail);
717 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
718 mv_table_size * 2 * sizeof(int16_t), fail);
719 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
720 mv_table_size * 2 * sizeof(int16_t), fail);
721 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
722 mv_table_size * 2 * sizeof(int16_t), fail);
723 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
724 mv_table_size * 2 * sizeof(int16_t), fail);
725 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
726 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
727 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
728 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
730 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
732 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
734 /* Allocate MB type table */
735 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
736 sizeof(uint16_t), fail); // needed for encoding
738 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
741 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
742 mb_array_size * sizeof(float), fail);
743 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
744 mb_array_size * sizeof(float), fail);
748 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
749 mb_array_size * sizeof(uint8_t), fail);
750 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
751 mb_array_size * sizeof(uint8_t), fail);
753 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
754 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
755 /* interlaced direct mode decoding tables */
756 for (i = 0; i < 2; i++) {
758 for (j = 0; j < 2; j++) {
759 for (k = 0; k < 2; k++) {
760 FF_ALLOCZ_OR_GOTO(s->avctx,
761 s->b_field_mv_table_base[i][j][k],
762 mv_table_size * 2 * sizeof(int16_t),
764 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
767 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
768 mb_array_size * 2 * sizeof(uint8_t), fail);
769 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
770 mv_table_size * 2 * sizeof(int16_t), fail);
771 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
774 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
775 mb_array_size * 2 * sizeof(uint8_t), fail);
778 if (s->out_format == FMT_H263) {
780 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
781 s->coded_block = s->coded_block_base + s->b8_stride + 1;
783 /* cbp, ac_pred, pred_dir */
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
785 mb_array_size * sizeof(uint8_t), fail);
786 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
787 mb_array_size * sizeof(uint8_t), fail);
790 if (s->h263_pred || s->h263_plus || !s->encoding) {
792 // MN: we need these for error resilience of intra-frames
793 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
794 yc_size * sizeof(int16_t), fail);
795 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
796 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
797 s->dc_val[2] = s->dc_val[1] + c_size;
798 for (i = 0; i < yc_size; i++)
799 s->dc_val_base[i] = 1024;
802 /* which mb is a intra block */
803 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
804 memset(s->mbintra_table, 1, mb_array_size);
806 /* init macroblock skip table */
807 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
808 // Note the + 1 is for a quicker mpeg4 slice_end detection
810 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
811 s->avctx->debug_mv) {
812 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
813 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
814 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
815 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
816 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
817 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
822 return AVERROR(ENOMEM);
826 * init common structure for both encoder and decoder.
827 * this assumes that some variables like width/height are already set
829 av_cold int ff_MPV_common_init(MpegEncContext *s)
832 int nb_slices = (HAVE_THREADS &&
833 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
834 s->avctx->thread_count : 1;
836 if (s->encoding && s->avctx->slices)
837 nb_slices = s->avctx->slices;
839 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
840 s->mb_height = (s->height + 31) / 32 * 2;
841 else if (s->codec_id != AV_CODEC_ID_H264)
842 s->mb_height = (s->height + 15) / 16;
844 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
845 av_log(s->avctx, AV_LOG_ERROR,
846 "decoding to PIX_FMT_NONE is not supported.\n");
850 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
853 max_slices = FFMIN(MAX_THREADS, s->mb_height);
855 max_slices = MAX_THREADS;
856 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
857 " reducing to %d\n", nb_slices, max_slices);
858 nb_slices = max_slices;
861 if ((s->width || s->height) &&
862 av_image_check_size(s->width, s->height, 0, s->avctx))
865 ff_dct_common_init(s);
867 s->flags = s->avctx->flags;
868 s->flags2 = s->avctx->flags2;
870 if (s->width && s->height) {
871 /* set chroma shifts */
872 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
875 /* convert fourcc to upper case */
876 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
878 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
880 s->avctx->coded_frame = &s->current_picture.f;
883 if (s->msmpeg4_version) {
884 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
885 2 * 2 * (MAX_LEVEL + 1) *
886 (MAX_RUN + 1) * 2 * sizeof(int), fail);
888 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
890 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
891 64 * 32 * sizeof(int), fail);
892 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
893 64 * 32 * sizeof(int), fail);
894 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
895 64 * 32 * 2 * sizeof(uint16_t), fail);
896 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
897 64 * 32 * 2 * sizeof(uint16_t), fail);
898 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
899 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
900 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
901 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
903 if (s->avctx->noise_reduction) {
904 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
905 2 * 64 * sizeof(uint16_t), fail);
910 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
911 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
912 s->picture_count * sizeof(Picture), fail);
913 for (i = 0; i < s->picture_count; i++) {
914 avcodec_get_frame_defaults(&s->picture[i].f);
917 if (s->width && s->height) {
918 if ((err = init_context_frame(s)))
921 s->parse_context.state = -1;
924 s->context_initialized = 1;
925 s->thread_context[0] = s;
927 if (s->width && s->height) {
929 for (i = 1; i < nb_slices; i++) {
930 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
931 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
934 for (i = 0; i < nb_slices; i++) {
935 if (init_duplicate_context(s->thread_context[i], s) < 0)
937 s->thread_context[i]->start_mb_y =
938 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
939 s->thread_context[i]->end_mb_y =
940 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
943 if (init_duplicate_context(s, s) < 0)
946 s->end_mb_y = s->mb_height;
948 s->slice_context_count = nb_slices;
953 ff_MPV_common_end(s);
958 * Frees and resets MpegEncContext fields depending on the resolution.
959 * Is used during resolution changes to avoid a full reinitialization of the
962 static int free_context_frame(MpegEncContext *s)
966 av_freep(&s->mb_type);
967 av_freep(&s->p_mv_table_base);
968 av_freep(&s->b_forw_mv_table_base);
969 av_freep(&s->b_back_mv_table_base);
970 av_freep(&s->b_bidir_forw_mv_table_base);
971 av_freep(&s->b_bidir_back_mv_table_base);
972 av_freep(&s->b_direct_mv_table_base);
973 s->p_mv_table = NULL;
974 s->b_forw_mv_table = NULL;
975 s->b_back_mv_table = NULL;
976 s->b_bidir_forw_mv_table = NULL;
977 s->b_bidir_back_mv_table = NULL;
978 s->b_direct_mv_table = NULL;
979 for (i = 0; i < 2; i++) {
980 for (j = 0; j < 2; j++) {
981 for (k = 0; k < 2; k++) {
982 av_freep(&s->b_field_mv_table_base[i][j][k]);
983 s->b_field_mv_table[i][j][k] = NULL;
985 av_freep(&s->b_field_select_table[i][j]);
986 av_freep(&s->p_field_mv_table_base[i][j]);
987 s->p_field_mv_table[i][j] = NULL;
989 av_freep(&s->p_field_select_table[i]);
992 av_freep(&s->dc_val_base);
993 av_freep(&s->coded_block_base);
994 av_freep(&s->mbintra_table);
995 av_freep(&s->cbp_table);
996 av_freep(&s->pred_dir_table);
998 av_freep(&s->mbskip_table);
1000 av_freep(&s->error_status_table);
1001 av_freep(&s->er_temp_buffer);
1002 av_freep(&s->mb_index2xy);
1003 av_freep(&s->lambda_table);
1004 av_freep(&s->cplx_tab);
1005 av_freep(&s->bits_tab);
1007 s->linesize = s->uvlinesize = 0;
1009 for (i = 0; i < 3; i++)
1010 av_freep(&s->visualization_buffer[i]);
1012 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1013 avcodec_default_free_buffers(s->avctx);
1018 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1022 if (s->slice_context_count > 1) {
1023 for (i = 0; i < s->slice_context_count; i++) {
1024 free_duplicate_context(s->thread_context[i]);
1026 for (i = 1; i < s->slice_context_count; i++) {
1027 av_freep(&s->thread_context[i]);
1030 free_duplicate_context(s);
1032 free_context_frame(s);
1035 for (i = 0; i < s->picture_count; i++) {
1036 s->picture[i].needs_realloc = 1;
1039 s->last_picture_ptr =
1040 s->next_picture_ptr =
1041 s->current_picture_ptr = NULL;
1044 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1045 s->mb_height = (s->height + 31) / 32 * 2;
1046 else if (s->codec_id != AV_CODEC_ID_H264)
1047 s->mb_height = (s->height + 15) / 16;
1049 if ((s->width || s->height) &&
1050 av_image_check_size(s->width, s->height, 0, s->avctx))
1051 return AVERROR_INVALIDDATA;
1053 if ((err = init_context_frame(s)))
1056 s->thread_context[0] = s;
1058 if (s->width && s->height) {
1059 int nb_slices = s->slice_context_count;
1060 if (nb_slices > 1) {
1061 for (i = 1; i < nb_slices; i++) {
1062 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1063 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1066 for (i = 0; i < nb_slices; i++) {
1067 if (init_duplicate_context(s->thread_context[i], s) < 0)
1069 s->thread_context[i]->start_mb_y =
1070 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1071 s->thread_context[i]->end_mb_y =
1072 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1075 if (init_duplicate_context(s, s) < 0)
1078 s->end_mb_y = s->mb_height;
1080 s->slice_context_count = nb_slices;
1085 ff_MPV_common_end(s);
1089 /* init common structure for both encoder and decoder */
1090 void ff_MPV_common_end(MpegEncContext *s)
1094 if (s->slice_context_count > 1) {
1095 for (i = 0; i < s->slice_context_count; i++) {
1096 free_duplicate_context(s->thread_context[i]);
1098 for (i = 1; i < s->slice_context_count; i++) {
1099 av_freep(&s->thread_context[i]);
1101 s->slice_context_count = 1;
1102 } else free_duplicate_context(s);
1104 av_freep(&s->parse_context.buffer);
1105 s->parse_context.buffer_size = 0;
1107 av_freep(&s->bitstream_buffer);
1108 s->allocated_bitstream_buffer_size = 0;
1110 av_freep(&s->avctx->stats_out);
1111 av_freep(&s->ac_stats);
1113 av_freep(&s->q_intra_matrix);
1114 av_freep(&s->q_inter_matrix);
1115 av_freep(&s->q_intra_matrix16);
1116 av_freep(&s->q_inter_matrix16);
1117 av_freep(&s->input_picture);
1118 av_freep(&s->reordered_input_picture);
1119 av_freep(&s->dct_offset);
1121 if (s->picture && !s->avctx->internal->is_copy) {
1122 for (i = 0; i < s->picture_count; i++) {
1123 free_picture(s, &s->picture[i]);
1126 av_freep(&s->picture);
1128 free_context_frame(s);
1130 s->context_initialized = 0;
1131 s->last_picture_ptr =
1132 s->next_picture_ptr =
1133 s->current_picture_ptr = NULL;
1134 s->linesize = s->uvlinesize = 0;
1137 void ff_init_rl(RLTable *rl,
1138 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1140 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1141 uint8_t index_run[MAX_RUN + 1];
1142 int last, run, level, start, end, i;
1144 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1145 if (static_store && rl->max_level[0])
1148 /* compute max_level[], max_run[] and index_run[] */
1149 for (last = 0; last < 2; last++) {
1158 memset(max_level, 0, MAX_RUN + 1);
1159 memset(max_run, 0, MAX_LEVEL + 1);
1160 memset(index_run, rl->n, MAX_RUN + 1);
1161 for (i = start; i < end; i++) {
1162 run = rl->table_run[i];
1163 level = rl->table_level[i];
1164 if (index_run[run] == rl->n)
1166 if (level > max_level[run])
1167 max_level[run] = level;
1168 if (run > max_run[level])
1169 max_run[level] = run;
1172 rl->max_level[last] = static_store[last];
1174 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1175 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1177 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1179 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1180 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1182 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1184 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1185 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1189 void ff_init_vlc_rl(RLTable *rl)
1193 for (q = 0; q < 32; q++) {
1195 int qadd = (q - 1) | 1;
1201 for (i = 0; i < rl->vlc.table_size; i++) {
1202 int code = rl->vlc.table[i][0];
1203 int len = rl->vlc.table[i][1];
1206 if (len == 0) { // illegal code
1209 } else if (len < 0) { // more bits needed
1213 if (code == rl->n) { // esc
1217 run = rl->table_run[code] + 1;
1218 level = rl->table_level[code] * qmul + qadd;
1219 if (code >= rl->last) run += 192;
1222 rl->rl_vlc[q][i].len = len;
1223 rl->rl_vlc[q][i].level = level;
1224 rl->rl_vlc[q][i].run = run;
1229 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1233 /* release non reference frames */
1234 for (i = 0; i < s->picture_count; i++) {
1235 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1236 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1237 (remove_current || &s->picture[i] != s->current_picture_ptr)
1238 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1239 free_frame_buffer(s, &s->picture[i]);
1244 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1246 if (pic->f.data[0] == NULL)
1248 if (pic->needs_realloc)
1249 if (!pic->owner2 || pic->owner2 == s)
1254 static int find_unused_picture(MpegEncContext *s, int shared)
1259 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1260 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1264 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1265 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1268 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1269 if (pic_is_unused(s, &s->picture[i]))
1274 return AVERROR_INVALIDDATA;
1277 int ff_find_unused_picture(MpegEncContext *s, int shared)
1279 int ret = find_unused_picture(s, shared);
1281 if (ret >= 0 && ret < s->picture_range_end) {
1282 if (s->picture[ret].needs_realloc) {
1283 s->picture[ret].needs_realloc = 0;
1284 free_picture(s, &s->picture[ret]);
1285 avcodec_get_frame_defaults(&s->picture[ret].f);
1291 static void update_noise_reduction(MpegEncContext *s)
1295 for (intra = 0; intra < 2; intra++) {
1296 if (s->dct_count[intra] > (1 << 16)) {
1297 for (i = 0; i < 64; i++) {
1298 s->dct_error_sum[intra][i] >>= 1;
1300 s->dct_count[intra] >>= 1;
1303 for (i = 0; i < 64; i++) {
1304 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1305 s->dct_count[intra] +
1306 s->dct_error_sum[intra][i] / 2) /
1307 (s->dct_error_sum[intra][i] + 1);
1313 * generic function for encode/decode called after coding/decoding
1314 * the header and before a frame is coded/decoded.
1316 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1322 /* mark & release old frames */
1323 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1324 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1325 s->last_picture_ptr != s->next_picture_ptr &&
1326 s->last_picture_ptr->f.data[0]) {
1327 if (s->last_picture_ptr->owner2 == s)
1328 free_frame_buffer(s, s->last_picture_ptr);
1331 /* release forgotten pictures */
1332 /* if (mpeg124/h263) */
1334 for (i = 0; i < s->picture_count; i++) {
1335 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1336 &s->picture[i] != s->last_picture_ptr &&
1337 &s->picture[i] != s->next_picture_ptr &&
1338 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1339 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1340 av_log(avctx, AV_LOG_ERROR,
1341 "releasing zombie picture\n");
1342 free_frame_buffer(s, &s->picture[i]);
1349 ff_release_unused_pictures(s, 1);
1351 if (s->current_picture_ptr &&
1352 s->current_picture_ptr->f.data[0] == NULL) {
1353 // we already have a unused image
1354 // (maybe it was set before reading the header)
1355 pic = s->current_picture_ptr;
1357 i = ff_find_unused_picture(s, 0);
1359 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1362 pic = &s->picture[i];
1365 pic->f.reference = 0;
1367 if (s->codec_id == AV_CODEC_ID_H264)
1368 pic->f.reference = s->picture_structure;
1369 else if (s->pict_type != AV_PICTURE_TYPE_B)
1370 pic->f.reference = 3;
1373 pic->f.coded_picture_number = s->coded_picture_number++;
1375 if (ff_alloc_picture(s, pic, 0) < 0)
1378 s->current_picture_ptr = pic;
1379 // FIXME use only the vars from current_pic
1380 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1381 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1382 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1383 if (s->picture_structure != PICT_FRAME)
1384 s->current_picture_ptr->f.top_field_first =
1385 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1387 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1388 !s->progressive_sequence;
1389 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1392 s->current_picture_ptr->f.pict_type = s->pict_type;
1393 // if (s->flags && CODEC_FLAG_QSCALE)
1394 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1395 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1397 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1399 if (s->pict_type != AV_PICTURE_TYPE_B) {
1400 s->last_picture_ptr = s->next_picture_ptr;
1402 s->next_picture_ptr = s->current_picture_ptr;
1404 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1405 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1406 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1407 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1408 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1409 s->pict_type, s->dropable);
1411 if (s->codec_id != AV_CODEC_ID_H264) {
1412 if ((s->last_picture_ptr == NULL ||
1413 s->last_picture_ptr->f.data[0] == NULL) &&
1414 (s->pict_type != AV_PICTURE_TYPE_I ||
1415 s->picture_structure != PICT_FRAME)) {
1416 if (s->pict_type != AV_PICTURE_TYPE_I)
1417 av_log(avctx, AV_LOG_ERROR,
1418 "warning: first frame is no keyframe\n");
1419 else if (s->picture_structure != PICT_FRAME)
1420 av_log(avctx, AV_LOG_INFO,
1421 "allocate dummy last picture for field based first keyframe\n");
1423 /* Allocate a dummy frame */
1424 i = ff_find_unused_picture(s, 0);
1426 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1429 s->last_picture_ptr = &s->picture[i];
1430 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1431 s->last_picture_ptr = NULL;
1434 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1435 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1436 s->last_picture_ptr->f.reference = 3;
1438 if ((s->next_picture_ptr == NULL ||
1439 s->next_picture_ptr->f.data[0] == NULL) &&
1440 s->pict_type == AV_PICTURE_TYPE_B) {
1441 /* Allocate a dummy frame */
1442 i = ff_find_unused_picture(s, 0);
1444 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1447 s->next_picture_ptr = &s->picture[i];
1448 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1449 s->next_picture_ptr = NULL;
1452 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1453 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1454 s->next_picture_ptr->f.reference = 3;
1458 if (s->last_picture_ptr)
1459 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1460 if (s->next_picture_ptr)
1461 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1463 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1464 (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3)) {
1465 if (s->next_picture_ptr)
1466 s->next_picture_ptr->owner2 = s;
1467 if (s->last_picture_ptr)
1468 s->last_picture_ptr->owner2 = s;
1471 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1472 s->last_picture_ptr->f.data[0]));
1474 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1476 for (i = 0; i < 4; i++) {
1477 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1478 s->current_picture.f.data[i] +=
1479 s->current_picture.f.linesize[i];
1481 s->current_picture.f.linesize[i] *= 2;
1482 s->last_picture.f.linesize[i] *= 2;
1483 s->next_picture.f.linesize[i] *= 2;
1487 s->err_recognition = avctx->err_recognition;
1489 /* set dequantizer, we can't do it during init as
1490 * it might change for mpeg4 and we can't do it in the header
1491 * decode as init is not called for mpeg4 there yet */
1492 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1493 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1494 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1495 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1496 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1497 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1499 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1500 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1503 if (s->dct_error_sum) {
1504 assert(s->avctx->noise_reduction && s->encoding);
1505 update_noise_reduction(s);
1508 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1509 return ff_xvmc_field_start(s, avctx);
1514 /* generic function for encode/decode called after a
1515 * frame has been coded/decoded. */
1516 void ff_MPV_frame_end(MpegEncContext *s)
1519 /* redraw edges for the frame if decoding didn't complete */
1520 // just to make sure that all data is rendered.
1521 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1522 ff_xvmc_field_end(s);
1523 } else if ((s->error_count || s->encoding) &&
1524 !s->avctx->hwaccel &&
1525 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1526 s->unrestricted_mv &&
1527 s->current_picture.f.reference &&
1529 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1530 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1531 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1532 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1533 s->h_edge_pos, s->v_edge_pos,
1534 EDGE_WIDTH, EDGE_WIDTH,
1535 EDGE_TOP | EDGE_BOTTOM);
1536 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1537 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1538 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1539 EDGE_TOP | EDGE_BOTTOM);
1540 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1541 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1542 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1543 EDGE_TOP | EDGE_BOTTOM);
1548 s->last_pict_type = s->pict_type;
1549 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1550 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1551 s->last_non_b_pict_type = s->pict_type;
1554 /* copy back current_picture variables */
1555 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1556 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1557 s->picture[i] = s->current_picture;
1561 assert(i < MAX_PICTURE_COUNT);
1565 /* release non-reference frames */
1566 for (i = 0; i < s->picture_count; i++) {
1567 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1568 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1569 free_frame_buffer(s, &s->picture[i]);
1573 // clear copies, to avoid confusion
1575 memset(&s->last_picture, 0, sizeof(Picture));
1576 memset(&s->next_picture, 0, sizeof(Picture));
1577 memset(&s->current_picture, 0, sizeof(Picture));
1579 s->avctx->coded_frame = &s->current_picture_ptr->f;
1581 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1582 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1587 * Draw a line from (ex, ey) -> (sx, sy).
1588 * @param w width of the image
1589 * @param h height of the image
1590 * @param stride stride/linesize of the image
1591 * @param color color of the arrow
1593 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1594 int w, int h, int stride, int color)
1598 sx = av_clip(sx, 0, w - 1);
1599 sy = av_clip(sy, 0, h - 1);
1600 ex = av_clip(ex, 0, w - 1);
1601 ey = av_clip(ey, 0, h - 1);
1603 buf[sy * stride + sx] += color;
1605 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1607 FFSWAP(int, sx, ex);
1608 FFSWAP(int, sy, ey);
1610 buf += sx + sy * stride;
1612 f = ((ey - sy) << 16) / ex;
1613 for (x = 0; x = ex; x++) {
1615 fr = (x * f) & 0xFFFF;
1616 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1617 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1621 FFSWAP(int, sx, ex);
1622 FFSWAP(int, sy, ey);
1624 buf += sx + sy * stride;
1627 f = ((ex - sx) << 16) / ey;
1630 for (y = 0; y = ey; y++) {
1632 fr = (y * f) & 0xFFFF;
1633 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1634 buf[y * stride + x + 1] += (color * fr ) >> 16;
1640 * Draw an arrow from (ex, ey) -> (sx, sy).
1641 * @param w width of the image
1642 * @param h height of the image
1643 * @param stride stride/linesize of the image
1644 * @param color color of the arrow
1646 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1647 int ey, int w, int h, int stride, int color)
1651 sx = av_clip(sx, -100, w + 100);
1652 sy = av_clip(sy, -100, h + 100);
1653 ex = av_clip(ex, -100, w + 100);
1654 ey = av_clip(ey, -100, h + 100);
1659 if (dx * dx + dy * dy > 3 * 3) {
1662 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1664 // FIXME subpixel accuracy
1665 rx = ROUNDED_DIV(rx * 3 << 4, length);
1666 ry = ROUNDED_DIV(ry * 3 << 4, length);
1668 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1669 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1671 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1675 * Print debugging info for the given picture.
1677 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1679 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1682 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1685 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1686 switch (pict->pict_type) {
1687 case AV_PICTURE_TYPE_I:
1688 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1690 case AV_PICTURE_TYPE_P:
1691 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1693 case AV_PICTURE_TYPE_B:
1694 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1696 case AV_PICTURE_TYPE_S:
1697 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1699 case AV_PICTURE_TYPE_SI:
1700 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1702 case AV_PICTURE_TYPE_SP:
1703 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1706 for (y = 0; y < s->mb_height; y++) {
1707 for (x = 0; x < s->mb_width; x++) {
1708 if (s->avctx->debug & FF_DEBUG_SKIP) {
1709 int count = s->mbskip_table[x + y * s->mb_stride];
1712 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1714 if (s->avctx->debug & FF_DEBUG_QP) {
1715 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1716 pict->qscale_table[x + y * s->mb_stride]);
1718 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1719 int mb_type = pict->mb_type[x + y * s->mb_stride];
1720 // Type & MV direction
1721 if (IS_PCM(mb_type))
1722 av_log(s->avctx, AV_LOG_DEBUG, "P");
1723 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1724 av_log(s->avctx, AV_LOG_DEBUG, "A");
1725 else if (IS_INTRA4x4(mb_type))
1726 av_log(s->avctx, AV_LOG_DEBUG, "i");
1727 else if (IS_INTRA16x16(mb_type))
1728 av_log(s->avctx, AV_LOG_DEBUG, "I");
1729 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1730 av_log(s->avctx, AV_LOG_DEBUG, "d");
1731 else if (IS_DIRECT(mb_type))
1732 av_log(s->avctx, AV_LOG_DEBUG, "D");
1733 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1734 av_log(s->avctx, AV_LOG_DEBUG, "g");
1735 else if (IS_GMC(mb_type))
1736 av_log(s->avctx, AV_LOG_DEBUG, "G");
1737 else if (IS_SKIP(mb_type))
1738 av_log(s->avctx, AV_LOG_DEBUG, "S");
1739 else if (!USES_LIST(mb_type, 1))
1740 av_log(s->avctx, AV_LOG_DEBUG, ">");
1741 else if (!USES_LIST(mb_type, 0))
1742 av_log(s->avctx, AV_LOG_DEBUG, "<");
1744 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1745 av_log(s->avctx, AV_LOG_DEBUG, "X");
1749 if (IS_8X8(mb_type))
1750 av_log(s->avctx, AV_LOG_DEBUG, "+");
1751 else if (IS_16X8(mb_type))
1752 av_log(s->avctx, AV_LOG_DEBUG, "-");
1753 else if (IS_8X16(mb_type))
1754 av_log(s->avctx, AV_LOG_DEBUG, "|");
1755 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1756 av_log(s->avctx, AV_LOG_DEBUG, " ");
1758 av_log(s->avctx, AV_LOG_DEBUG, "?");
1761 if (IS_INTERLACED(mb_type))
1762 av_log(s->avctx, AV_LOG_DEBUG, "=");
1764 av_log(s->avctx, AV_LOG_DEBUG, " ");
1767 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1771 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1772 (s->avctx->debug_mv)) {
1773 const int shift = 1 + s->quarter_sample;
1777 int h_chroma_shift, v_chroma_shift, block_height;
1778 const int width = s->avctx->width;
1779 const int height = s->avctx->height;
1780 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1781 const int mv_stride = (s->mb_width << mv_sample_log2) +
1782 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1783 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1785 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1786 &h_chroma_shift, &v_chroma_shift);
1787 for (i = 0; i < 3; i++) {
1788 memcpy(s->visualization_buffer[i], pict->data[i],
1789 (i == 0) ? pict->linesize[i] * height:
1790 pict->linesize[i] * height >> v_chroma_shift);
1791 pict->data[i] = s->visualization_buffer[i];
1793 pict->type = FF_BUFFER_TYPE_COPY;
1794 ptr = pict->data[0];
1795 block_height = 16 >> v_chroma_shift;
1797 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1799 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1800 const int mb_index = mb_x + mb_y * s->mb_stride;
1801 if ((s->avctx->debug_mv) && pict->motion_val) {
1803 for (type = 0; type < 3; type++) {
1807 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1808 (pict->pict_type!= AV_PICTURE_TYPE_P))
1813 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1814 (pict->pict_type!= AV_PICTURE_TYPE_B))
1819 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1820 (pict->pict_type!= AV_PICTURE_TYPE_B))
1825 if (!USES_LIST(pict->mb_type[mb_index], direction))
1828 if (IS_8X8(pict->mb_type[mb_index])) {
1830 for (i = 0; i < 4; i++) {
1831 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1832 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1833 int xy = (mb_x * 2 + (i & 1) +
1834 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1835 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1836 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1837 draw_arrow(ptr, sx, sy, mx, my, width,
1838 height, s->linesize, 100);
1840 } else if (IS_16X8(pict->mb_type[mb_index])) {
1842 for (i = 0; i < 2; i++) {
1843 int sx = mb_x * 16 + 8;
1844 int sy = mb_y * 16 + 4 + 8 * i;
1845 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1846 int mx = (pict->motion_val[direction][xy][0] >> shift);
1847 int my = (pict->motion_val[direction][xy][1] >> shift);
1849 if (IS_INTERLACED(pict->mb_type[mb_index]))
1852 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1853 height, s->linesize, 100);
1855 } else if (IS_8X16(pict->mb_type[mb_index])) {
1857 for (i = 0; i < 2; i++) {
1858 int sx = mb_x * 16 + 4 + 8 * i;
1859 int sy = mb_y * 16 + 8;
1860 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1861 int mx = pict->motion_val[direction][xy][0] >> shift;
1862 int my = pict->motion_val[direction][xy][1] >> shift;
1864 if (IS_INTERLACED(pict->mb_type[mb_index]))
1867 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1868 height, s->linesize, 100);
1871 int sx = mb_x * 16 + 8;
1872 int sy = mb_y * 16 + 8;
1873 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1874 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1875 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1876 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1880 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1881 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1882 0x0101010101010101ULL;
1884 for (y = 0; y < block_height; y++) {
1885 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1886 (block_height * mb_y + y) *
1887 pict->linesize[1]) = c;
1888 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1889 (block_height * mb_y + y) *
1890 pict->linesize[2]) = c;
1893 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1895 int mb_type = pict->mb_type[mb_index];
1898 #define COLOR(theta, r) \
1899 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1900 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1904 if (IS_PCM(mb_type)) {
1906 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1907 IS_INTRA16x16(mb_type)) {
1909 } else if (IS_INTRA4x4(mb_type)) {
1911 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1913 } else if (IS_DIRECT(mb_type)) {
1915 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1917 } else if (IS_GMC(mb_type)) {
1919 } else if (IS_SKIP(mb_type)) {
1921 } else if (!USES_LIST(mb_type, 1)) {
1923 } else if (!USES_LIST(mb_type, 0)) {
1926 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1930 u *= 0x0101010101010101ULL;
1931 v *= 0x0101010101010101ULL;
1932 for (y = 0; y < block_height; y++) {
1933 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1934 (block_height * mb_y + y) * pict->linesize[1]) = u;
1935 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1936 (block_height * mb_y + y) * pict->linesize[2]) = v;
1940 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1941 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1942 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1943 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1944 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1946 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1947 for (y = 0; y < 16; y++)
1948 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1949 pict->linesize[0]] ^= 0x80;
1951 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1952 int dm = 1 << (mv_sample_log2 - 2);
1953 for (i = 0; i < 4; i++) {
1954 int sx = mb_x * 16 + 8 * (i & 1);
1955 int sy = mb_y * 16 + 8 * (i >> 1);
1956 int xy = (mb_x * 2 + (i & 1) +
1957 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1959 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1960 if (mv[0] != mv[dm] ||
1961 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1962 for (y = 0; y < 8; y++)
1963 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1964 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1965 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1966 pict->linesize[0]) ^= 0x8080808080808080ULL;
1970 if (IS_INTERLACED(mb_type) &&
1971 s->codec_id == AV_CODEC_ID_H264) {
1975 s->mbskip_table[mb_index] = 0;
1982 * find the lowest MB row referenced in the MVs
1984 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1986 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1987 int my, off, i, mvs;
1989 if (s->picture_structure != PICT_FRAME) goto unhandled;
1991 switch (s->mv_type) {
2005 for (i = 0; i < mvs; i++) {
2006 my = s->mv[dir][i][1]<<qpel_shift;
2007 my_max = FFMAX(my_max, my);
2008 my_min = FFMIN(my_min, my);
2011 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2013 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2015 return s->mb_height-1;
2018 /* put block[] to dest[] */
2019 static inline void put_dct(MpegEncContext *s,
2020 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2022 s->dct_unquantize_intra(s, block, i, qscale);
2023 s->dsp.idct_put (dest, line_size, block);
2026 /* add block[] to dest[] */
2027 static inline void add_dct(MpegEncContext *s,
2028 DCTELEM *block, int i, uint8_t *dest, int line_size)
2030 if (s->block_last_index[i] >= 0) {
2031 s->dsp.idct_add (dest, line_size, block);
2035 static inline void add_dequant_dct(MpegEncContext *s,
2036 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2038 if (s->block_last_index[i] >= 0) {
2039 s->dct_unquantize_inter(s, block, i, qscale);
2041 s->dsp.idct_add (dest, line_size, block);
2046 * Clean dc, ac, coded_block for the current non-intra MB.
2048 void ff_clean_intra_table_entries(MpegEncContext *s)
2050 int wrap = s->b8_stride;
2051 int xy = s->block_index[0];
2054 s->dc_val[0][xy + 1 ] =
2055 s->dc_val[0][xy + wrap] =
2056 s->dc_val[0][xy + 1 + wrap] = 1024;
2058 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2059 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2060 if (s->msmpeg4_version>=3) {
2061 s->coded_block[xy ] =
2062 s->coded_block[xy + 1 ] =
2063 s->coded_block[xy + wrap] =
2064 s->coded_block[xy + 1 + wrap] = 0;
2067 wrap = s->mb_stride;
2068 xy = s->mb_x + s->mb_y * wrap;
2070 s->dc_val[2][xy] = 1024;
2072 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2073 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2075 s->mbintra_table[xy]= 0;
2078 /* generic function called after a macroblock has been parsed by the
2079 decoder or after it has been encoded by the encoder.
2081 Important variables used:
2082 s->mb_intra : true if intra macroblock
2083 s->mv_dir : motion vector direction
2084 s->mv_type : motion vector type
2085 s->mv : motion vector
2086 s->interlaced_dct : true if interlaced dct used (mpeg2)
2088 static av_always_inline
2089 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2092 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2093 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2094 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2098 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2099 /* save DCT coefficients */
2101 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2102 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2104 for(j=0; j<64; j++){
2105 *dct++ = block[i][s->dsp.idct_permutation[j]];
2106 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2108 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2112 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2114 /* update DC predictors for P macroblocks */
2116 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2117 if(s->mbintra_table[mb_xy])
2118 ff_clean_intra_table_entries(s);
2122 s->last_dc[2] = 128 << s->intra_dc_precision;
2125 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2126 s->mbintra_table[mb_xy]=1;
2128 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2129 uint8_t *dest_y, *dest_cb, *dest_cr;
2130 int dct_linesize, dct_offset;
2131 op_pixels_func (*op_pix)[4];
2132 qpel_mc_func (*op_qpix)[16];
2133 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2134 const int uvlinesize = s->current_picture.f.linesize[1];
2135 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2136 const int block_size = 8;
2138 /* avoid copy if macroblock skipped in last frame too */
2139 /* skip only during decoding as we might trash the buffers during encoding a bit */
2141 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2143 if (s->mb_skipped) {
2145 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2147 } else if(!s->current_picture.f.reference) {
2150 *mbskip_ptr = 0; /* not skipped */
2154 dct_linesize = linesize << s->interlaced_dct;
2155 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2159 dest_cb= s->dest[1];
2160 dest_cr= s->dest[2];
2162 dest_y = s->b_scratchpad;
2163 dest_cb= s->b_scratchpad+16*linesize;
2164 dest_cr= s->b_scratchpad+32*linesize;
2168 /* motion handling */
2169 /* decoding or more than one mb_type (MC was already done otherwise) */
2172 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2173 if (s->mv_dir & MV_DIR_FORWARD) {
2174 ff_thread_await_progress(&s->last_picture_ptr->f,
2175 ff_MPV_lowest_referenced_row(s, 0),
2178 if (s->mv_dir & MV_DIR_BACKWARD) {
2179 ff_thread_await_progress(&s->next_picture_ptr->f,
2180 ff_MPV_lowest_referenced_row(s, 1),
2185 op_qpix= s->me.qpel_put;
2186 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2187 op_pix = s->dsp.put_pixels_tab;
2189 op_pix = s->dsp.put_no_rnd_pixels_tab;
2191 if (s->mv_dir & MV_DIR_FORWARD) {
2192 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2193 op_pix = s->dsp.avg_pixels_tab;
2194 op_qpix= s->me.qpel_avg;
2196 if (s->mv_dir & MV_DIR_BACKWARD) {
2197 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2201 /* skip dequant / idct if we are really late ;) */
2202 if(s->avctx->skip_idct){
2203 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2204 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2205 || s->avctx->skip_idct >= AVDISCARD_ALL)
2209 /* add dct residue */
2210 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2211 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2212 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2213 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2214 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2215 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2217 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2218 if (s->chroma_y_shift){
2219 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2220 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2224 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2225 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2226 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2227 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2230 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2231 add_dct(s, block[0], 0, dest_y , dct_linesize);
2232 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2233 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2234 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2236 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2237 if(s->chroma_y_shift){//Chroma420
2238 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2239 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2242 dct_linesize = uvlinesize << s->interlaced_dct;
2243 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2245 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2246 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2247 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2248 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2249 if(!s->chroma_x_shift){//Chroma444
2250 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2251 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2252 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2253 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2258 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2259 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2262 /* dct only in intra block */
2263 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2264 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2265 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2266 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2267 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2269 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2270 if(s->chroma_y_shift){
2271 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2272 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2276 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2277 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2278 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2279 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2283 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2284 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2285 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2286 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2288 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2289 if(s->chroma_y_shift){
2290 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2291 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2294 dct_linesize = uvlinesize << s->interlaced_dct;
2295 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2297 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2298 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2299 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2300 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2301 if(!s->chroma_x_shift){//Chroma444
2302 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2303 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2304 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2305 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2313 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2314 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2315 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2320 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2322 if(s->out_format == FMT_MPEG1) {
2323 MPV_decode_mb_internal(s, block, 1);
2326 MPV_decode_mb_internal(s, block, 0);
2330 * @param h is the normal height, this will be reduced automatically if needed for the last row
2332 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2333 const int field_pic= s->picture_structure != PICT_FRAME;
2339 if (!s->avctx->hwaccel
2340 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2341 && s->unrestricted_mv
2342 && s->current_picture.f.reference
2344 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2345 int sides = 0, edge_h;
2346 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2347 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2348 if (y==0) sides |= EDGE_TOP;
2349 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2351 edge_h= FFMIN(h, s->v_edge_pos - y);
2353 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2354 s->linesize, s->h_edge_pos, edge_h,
2355 EDGE_WIDTH, EDGE_WIDTH, sides);
2356 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2357 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2358 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2359 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2360 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2361 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2364 h= FFMIN(h, s->avctx->height - y);
2366 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2368 if (s->avctx->draw_horiz_band) {
2370 int offset[AV_NUM_DATA_POINTERS];
2373 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2374 src = &s->current_picture_ptr->f;
2375 else if(s->last_picture_ptr)
2376 src = &s->last_picture_ptr->f;
2380 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2381 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2384 offset[0]= y * s->linesize;
2386 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2387 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2393 s->avctx->draw_horiz_band(s->avctx, src, offset,
2394 y, s->picture_structure, h);
2398 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2399 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2400 const int uvlinesize = s->current_picture.f.linesize[1];
2401 const int mb_size= 4;
2403 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2404 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2405 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2406 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2407 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2408 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2409 //block_index is not used by mpeg2, so it is not affected by chroma_format
2411 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2412 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2413 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2415 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2417 if(s->picture_structure==PICT_FRAME){
2418 s->dest[0] += s->mb_y * linesize << mb_size;
2419 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2420 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2422 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2423 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2424 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2425 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2430 void ff_mpeg_flush(AVCodecContext *avctx){
2432 MpegEncContext *s = avctx->priv_data;
2434 if(s==NULL || s->picture==NULL)
2437 for(i=0; i<s->picture_count; i++){
2438 if (s->picture[i].f.data[0] &&
2439 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2440 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2441 free_frame_buffer(s, &s->picture[i]);
2443 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2445 s->mb_x= s->mb_y= 0;
2447 s->parse_context.state= -1;
2448 s->parse_context.frame_start_found= 0;
2449 s->parse_context.overread= 0;
2450 s->parse_context.overread_index= 0;
2451 s->parse_context.index= 0;
2452 s->parse_context.last_index= 0;
2453 s->bitstream_buffer_size=0;
2457 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2458 DCTELEM *block, int n, int qscale)
2460 int i, level, nCoeffs;
2461 const uint16_t *quant_matrix;
2463 nCoeffs= s->block_last_index[n];
2466 block[0] = block[0] * s->y_dc_scale;
2468 block[0] = block[0] * s->c_dc_scale;
2469 /* XXX: only mpeg1 */
2470 quant_matrix = s->intra_matrix;
2471 for(i=1;i<=nCoeffs;i++) {
2472 int j= s->intra_scantable.permutated[i];
2477 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2478 level = (level - 1) | 1;
2481 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2482 level = (level - 1) | 1;
2489 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2490 DCTELEM *block, int n, int qscale)
2492 int i, level, nCoeffs;
2493 const uint16_t *quant_matrix;
2495 nCoeffs= s->block_last_index[n];
2497 quant_matrix = s->inter_matrix;
2498 for(i=0; i<=nCoeffs; i++) {
2499 int j= s->intra_scantable.permutated[i];
2504 level = (((level << 1) + 1) * qscale *
2505 ((int) (quant_matrix[j]))) >> 4;
2506 level = (level - 1) | 1;
2509 level = (((level << 1) + 1) * qscale *
2510 ((int) (quant_matrix[j]))) >> 4;
2511 level = (level - 1) | 1;
2518 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2519 DCTELEM *block, int n, int qscale)
2521 int i, level, nCoeffs;
2522 const uint16_t *quant_matrix;
2524 if(s->alternate_scan) nCoeffs= 63;
2525 else nCoeffs= s->block_last_index[n];
2528 block[0] = block[0] * s->y_dc_scale;
2530 block[0] = block[0] * s->c_dc_scale;
2531 quant_matrix = s->intra_matrix;
2532 for(i=1;i<=nCoeffs;i++) {
2533 int j= s->intra_scantable.permutated[i];
2538 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2541 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2548 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2549 DCTELEM *block, int n, int qscale)
2551 int i, level, nCoeffs;
2552 const uint16_t *quant_matrix;
2555 if(s->alternate_scan) nCoeffs= 63;
2556 else nCoeffs= s->block_last_index[n];
2559 block[0] = block[0] * s->y_dc_scale;
2561 block[0] = block[0] * s->c_dc_scale;
2562 quant_matrix = s->intra_matrix;
2563 for(i=1;i<=nCoeffs;i++) {
2564 int j= s->intra_scantable.permutated[i];
2569 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2572 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2581 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2582 DCTELEM *block, int n, int qscale)
2584 int i, level, nCoeffs;
2585 const uint16_t *quant_matrix;
2588 if(s->alternate_scan) nCoeffs= 63;
2589 else nCoeffs= s->block_last_index[n];
2591 quant_matrix = s->inter_matrix;
2592 for(i=0; i<=nCoeffs; i++) {
2593 int j= s->intra_scantable.permutated[i];
2598 level = (((level << 1) + 1) * qscale *
2599 ((int) (quant_matrix[j]))) >> 4;
2602 level = (((level << 1) + 1) * qscale *
2603 ((int) (quant_matrix[j]))) >> 4;
2612 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2613 DCTELEM *block, int n, int qscale)
2615 int i, level, qmul, qadd;
2618 assert(s->block_last_index[n]>=0);
2624 block[0] = block[0] * s->y_dc_scale;
2626 block[0] = block[0] * s->c_dc_scale;
2627 qadd = (qscale - 1) | 1;
2634 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2636 for(i=1; i<=nCoeffs; i++) {
2640 level = level * qmul - qadd;
2642 level = level * qmul + qadd;
2649 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2650 DCTELEM *block, int n, int qscale)
2652 int i, level, qmul, qadd;
2655 assert(s->block_last_index[n]>=0);
2657 qadd = (qscale - 1) | 1;
2660 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2662 for(i=0; i<=nCoeffs; i++) {
2666 level = level * qmul - qadd;
2668 level = level * qmul + qadd;
2676 * set qscale and update qscale dependent variables.
2678 void ff_set_qscale(MpegEncContext * s, int qscale)
2682 else if (qscale > 31)
2686 s->chroma_qscale= s->chroma_qscale_table[qscale];
2688 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2689 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2692 void ff_MPV_report_decode_progress(MpegEncContext *s)
2694 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2695 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);