2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
38 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
61 /* enable all paranoid tests for rounding, overflows, etc... */
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
128 const enum PixelFormat ff_pixfmt_list_420[] = {
133 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
141 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
143 uint32_t * restrict state)
151 for (i = 0; i < 3; i++) {
152 uint32_t tmp = *state << 8;
153 *state = tmp + *(p++);
154 if (tmp == 0x100 || p == end)
159 if (p[-1] > 1 ) p += 3;
160 else if (p[-2] ) p += 2;
161 else if (p[-3]|(p[-1]-1)) p++;
168 p = FFMIN(p, end) - 4;
174 /* init common dct for both encoder and decoder */
175 av_cold int ff_dct_common_init(MpegEncContext *s)
177 ff_dsputil_init(&s->dsp, s->avctx);
179 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
180 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
181 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
182 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
183 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
184 if (s->flags & CODEC_FLAG_BITEXACT)
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
186 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
189 ff_MPV_common_init_x86(s);
191 ff_MPV_common_init_axp(s);
193 ff_MPV_common_init_mmi(s);
195 ff_MPV_common_init_arm(s);
197 ff_MPV_common_init_altivec(s);
199 ff_MPV_common_init_bfin(s);
202 /* load & permutate scantables
203 * note: only wmv uses different ones
205 if (s->alternate_scan) {
206 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
207 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
209 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
210 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
212 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
213 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
218 void ff_copy_picture(Picture *dst, Picture *src)
221 dst->f.type = FF_BUFFER_TYPE_COPY;
225 * Release a frame buffer
227 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
229 /* WM Image / Screen codecs allocate internal buffers with different
230 * dimensions / colorspaces; ignore user-defined callbacks for these. */
231 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
233 s->codec_id != AV_CODEC_ID_MSS2)
234 ff_thread_release_buffer(s->avctx, &pic->f);
236 avcodec_default_release_buffer(s->avctx, &pic->f);
237 av_freep(&pic->f.hwaccel_picture_private);
241 * Allocate a frame buffer
243 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
247 if (s->avctx->hwaccel) {
248 assert(!pic->f.hwaccel_picture_private);
249 if (s->avctx->hwaccel->priv_data_size) {
250 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
251 if (!pic->f.hwaccel_picture_private) {
252 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
258 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
259 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
260 s->codec_id != AV_CODEC_ID_MSS2)
261 r = ff_thread_get_buffer(s->avctx, &pic->f);
263 r = avcodec_default_get_buffer(s->avctx, &pic->f);
265 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
266 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
267 r, pic->f.type, pic->f.data[0]);
268 av_freep(&pic->f.hwaccel_picture_private);
272 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
273 s->uvlinesize != pic->f.linesize[1])) {
274 av_log(s->avctx, AV_LOG_ERROR,
275 "get_buffer() failed (stride changed)\n");
276 free_frame_buffer(s, pic);
280 if (pic->f.linesize[1] != pic->f.linesize[2]) {
281 av_log(s->avctx, AV_LOG_ERROR,
282 "get_buffer() failed (uv stride mismatch)\n");
283 free_frame_buffer(s, pic);
291 * Allocate a Picture.
292 * The pixels are allocated/set by calling get_buffer() if shared = 0
294 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
296 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
298 // the + 1 is needed so memset(,,stride*height) does not sig11
300 const int mb_array_size = s->mb_stride * s->mb_height;
301 const int b8_array_size = s->b8_stride * s->mb_height * 2;
302 const int b4_array_size = s->b4_stride * s->mb_height * 4;
307 assert(pic->f.data[0]);
308 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
309 pic->f.type = FF_BUFFER_TYPE_SHARED;
311 assert(!pic->f.data[0]);
313 if (alloc_frame_buffer(s, pic) < 0)
316 s->linesize = pic->f.linesize[0];
317 s->uvlinesize = pic->f.linesize[1];
320 if (pic->f.qscale_table == NULL) {
322 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
323 mb_array_size * sizeof(int16_t), fail)
324 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
325 mb_array_size * sizeof(int16_t), fail)
326 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
327 mb_array_size * sizeof(int8_t ), fail)
330 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
331 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
332 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
333 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
335 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
336 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
338 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
339 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
340 if (s->out_format == FMT_H264) {
341 for (i = 0; i < 2; i++) {
342 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
343 2 * (b4_array_size + 4) * sizeof(int16_t),
345 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
346 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
347 4 * mb_array_size * sizeof(uint8_t), fail)
349 pic->f.motion_subsample_log2 = 2;
350 } else if (s->out_format == FMT_H263 || s->encoding ||
351 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
352 for (i = 0; i < 2; i++) {
353 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
354 2 * (b8_array_size + 4) * sizeof(int16_t),
356 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
357 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
358 4 * mb_array_size * sizeof(uint8_t), fail)
360 pic->f.motion_subsample_log2 = 3;
362 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
363 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
364 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
366 pic->f.qstride = s->mb_stride;
367 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
368 1 * sizeof(AVPanScan), fail)
374 fail: // for the FF_ALLOCZ_OR_GOTO macro
376 free_frame_buffer(s, pic);
381 * Deallocate a picture.
383 static void free_picture(MpegEncContext *s, Picture *pic)
387 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
388 free_frame_buffer(s, pic);
391 av_freep(&pic->mb_var);
392 av_freep(&pic->mc_mb_var);
393 av_freep(&pic->mb_mean);
394 av_freep(&pic->f.mbskip_table);
395 av_freep(&pic->qscale_table_base);
396 pic->f.qscale_table = NULL;
397 av_freep(&pic->mb_type_base);
398 pic->f.mb_type = NULL;
399 av_freep(&pic->f.dct_coeff);
400 av_freep(&pic->f.pan_scan);
401 pic->f.mb_type = NULL;
402 for (i = 0; i < 2; i++) {
403 av_freep(&pic->motion_val_base[i]);
404 av_freep(&pic->f.ref_index[i]);
405 pic->f.motion_val[i] = NULL;
408 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
409 for (i = 0; i < 4; i++) {
411 pic->f.data[i] = NULL;
417 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
419 int y_size = s->b8_stride * (2 * s->mb_height + 1);
420 int c_size = s->mb_stride * (s->mb_height + 1);
421 int yc_size = y_size + 2 * c_size;
424 // edge emu needs blocksize + filter length - 1
425 // (= 17x17 for halfpel / 21x21 for h264)
426 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
427 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
429 // FIXME should be linesize instead of s->width * 2
430 // but that is not known before get_buffer()
431 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
432 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
433 s->me.temp = s->me.scratchpad;
434 s->rd_scratchpad = s->me.scratchpad;
435 s->b_scratchpad = s->me.scratchpad;
436 s->obmc_scratchpad = s->me.scratchpad + 16;
438 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
439 ME_MAP_SIZE * sizeof(uint32_t), fail)
440 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
441 ME_MAP_SIZE * sizeof(uint32_t), fail)
442 if (s->avctx->noise_reduction) {
443 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
444 2 * 64 * sizeof(int), fail)
447 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
448 s->block = s->blocks[0];
450 for (i = 0; i < 12; i++) {
451 s->pblocks[i] = &s->block[i];
454 if (s->out_format == FMT_H263) {
456 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
457 yc_size * sizeof(int16_t) * 16, fail);
458 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
459 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
460 s->ac_val[2] = s->ac_val[1] + c_size;
465 return -1; // free() through ff_MPV_common_end()
468 static void free_duplicate_context(MpegEncContext *s)
473 av_freep(&s->edge_emu_buffer);
474 av_freep(&s->me.scratchpad);
478 s->obmc_scratchpad = NULL;
480 av_freep(&s->dct_error_sum);
481 av_freep(&s->me.map);
482 av_freep(&s->me.score_map);
483 av_freep(&s->blocks);
484 av_freep(&s->ac_val_base);
488 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
490 #define COPY(a) bak->a = src->a
491 COPY(edge_emu_buffer);
496 COPY(obmc_scratchpad);
503 COPY(me.map_generation);
515 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
519 // FIXME copy only needed parts
521 backup_duplicate_context(&bak, dst);
522 memcpy(dst, src, sizeof(MpegEncContext));
523 backup_duplicate_context(dst, &bak);
524 for (i = 0; i < 12; i++) {
525 dst->pblocks[i] = &dst->block[i];
527 // STOP_TIMER("update_duplicate_context")
528 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
531 int ff_mpeg_update_thread_context(AVCodecContext *dst,
532 const AVCodecContext *src)
534 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
536 if (dst == src || !s1->context_initialized)
539 // FIXME can parameters change on I-frames?
540 // in that case dst may need a reinit
541 if (!s->context_initialized) {
542 memcpy(s, s1, sizeof(MpegEncContext));
545 s->picture_range_start += MAX_PICTURE_COUNT;
546 s->picture_range_end += MAX_PICTURE_COUNT;
547 s->bitstream_buffer = NULL;
548 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
550 ff_MPV_common_init(s);
553 s->avctx->coded_height = s1->avctx->coded_height;
554 s->avctx->coded_width = s1->avctx->coded_width;
555 s->avctx->width = s1->avctx->width;
556 s->avctx->height = s1->avctx->height;
558 s->coded_picture_number = s1->coded_picture_number;
559 s->picture_number = s1->picture_number;
560 s->input_picture_number = s1->input_picture_number;
562 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
563 memcpy(&s->last_picture, &s1->last_picture,
564 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
566 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
567 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
568 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
570 // Error/bug resilience
571 s->next_p_frame_damaged = s1->next_p_frame_damaged;
572 s->workaround_bugs = s1->workaround_bugs;
575 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
576 (char *) &s1->shape - (char *) &s1->time_increment_bits);
579 s->max_b_frames = s1->max_b_frames;
580 s->low_delay = s1->low_delay;
581 s->dropable = s1->dropable;
583 // DivX handling (doesn't work)
584 s->divx_packed = s1->divx_packed;
586 if (s1->bitstream_buffer) {
587 if (s1->bitstream_buffer_size +
588 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
589 av_fast_malloc(&s->bitstream_buffer,
590 &s->allocated_bitstream_buffer_size,
591 s1->allocated_bitstream_buffer_size);
592 s->bitstream_buffer_size = s1->bitstream_buffer_size;
593 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
594 s1->bitstream_buffer_size);
595 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
596 FF_INPUT_BUFFER_PADDING_SIZE);
599 // MPEG2/interlacing info
600 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
601 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
603 if (!s1->first_field) {
604 s->last_pict_type = s1->pict_type;
605 if (s1->current_picture_ptr)
606 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
608 if (s1->pict_type != AV_PICTURE_TYPE_B) {
609 s->last_non_b_pict_type = s1->pict_type;
617 * Set the given MpegEncContext to common defaults
618 * (same for encoding and decoding).
619 * The changed fields will not depend upon the
620 * prior state of the MpegEncContext.
622 void ff_MPV_common_defaults(MpegEncContext *s)
624 s->y_dc_scale_table =
625 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
626 s->chroma_qscale_table = ff_default_chroma_qscale_table;
627 s->progressive_frame = 1;
628 s->progressive_sequence = 1;
629 s->picture_structure = PICT_FRAME;
631 s->coded_picture_number = 0;
632 s->picture_number = 0;
633 s->input_picture_number = 0;
635 s->picture_in_gop_number = 0;
640 s->picture_range_start = 0;
641 s->picture_range_end = MAX_PICTURE_COUNT;
643 s->slice_context_count = 1;
647 * Set the given MpegEncContext to defaults for decoding.
648 * the changed fields will not depend upon
649 * the prior state of the MpegEncContext.
651 void ff_MPV_decode_defaults(MpegEncContext *s)
653 ff_MPV_common_defaults(s);
657 * Initialize and allocates MpegEncContext fields dependent on the resolution.
659 static int init_context_frame(MpegEncContext *s)
661 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
663 s->mb_width = (s->width + 15) / 16;
664 s->mb_stride = s->mb_width + 1;
665 s->b8_stride = s->mb_width * 2 + 1;
666 s->b4_stride = s->mb_width * 4 + 1;
667 mb_array_size = s->mb_height * s->mb_stride;
668 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
670 /* set default edge pos, will be overriden
671 * in decode_header if needed */
672 s->h_edge_pos = s->mb_width * 16;
673 s->v_edge_pos = s->mb_height * 16;
675 s->mb_num = s->mb_width * s->mb_height;
680 s->block_wrap[3] = s->b8_stride;
682 s->block_wrap[5] = s->mb_stride;
684 y_size = s->b8_stride * (2 * s->mb_height + 1);
685 c_size = s->mb_stride * (s->mb_height + 1);
686 yc_size = y_size + 2 * c_size;
688 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
689 fail); // error ressilience code looks cleaner with this
690 for (y = 0; y < s->mb_height; y++)
691 for (x = 0; x < s->mb_width; x++)
692 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
694 s->mb_index2xy[s->mb_height * s->mb_width] =
695 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
698 /* Allocate MV tables */
699 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
700 mv_table_size * 2 * sizeof(int16_t), fail);
701 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
702 mv_table_size * 2 * sizeof(int16_t), fail);
703 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
704 mv_table_size * 2 * sizeof(int16_t), fail);
705 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
706 mv_table_size * 2 * sizeof(int16_t), fail);
707 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
708 mv_table_size * 2 * sizeof(int16_t), fail);
709 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
710 mv_table_size * 2 * sizeof(int16_t), fail);
711 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
712 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
713 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
714 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
716 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
718 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
720 /* Allocate MB type table */
721 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
722 sizeof(uint16_t), fail); // needed for encoding
724 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
727 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
728 mb_array_size * sizeof(float), fail);
729 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
730 mb_array_size * sizeof(float), fail);
734 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
735 mb_array_size * sizeof(uint8_t), fail);
736 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
737 mb_array_size * sizeof(uint8_t), fail);
739 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
740 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
741 /* interlaced direct mode decoding tables */
742 for (i = 0; i < 2; i++) {
744 for (j = 0; j < 2; j++) {
745 for (k = 0; k < 2; k++) {
746 FF_ALLOCZ_OR_GOTO(s->avctx,
747 s->b_field_mv_table_base[i][j][k],
748 mv_table_size * 2 * sizeof(int16_t),
750 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
753 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
754 mb_array_size * 2 * sizeof(uint8_t), fail);
755 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
756 mv_table_size * 2 * sizeof(int16_t), fail);
757 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
760 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
761 mb_array_size * 2 * sizeof(uint8_t), fail);
764 if (s->out_format == FMT_H263) {
766 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
767 s->coded_block = s->coded_block_base + s->b8_stride + 1;
769 /* cbp, ac_pred, pred_dir */
770 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
771 mb_array_size * sizeof(uint8_t), fail);
772 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
773 mb_array_size * sizeof(uint8_t), fail);
776 if (s->h263_pred || s->h263_plus || !s->encoding) {
778 // MN: we need these for error resilience of intra-frames
779 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
780 yc_size * sizeof(int16_t), fail);
781 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
782 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
783 s->dc_val[2] = s->dc_val[1] + c_size;
784 for (i = 0; i < yc_size; i++)
785 s->dc_val_base[i] = 1024;
788 /* which mb is a intra block */
789 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
790 memset(s->mbintra_table, 1, mb_array_size);
792 /* init macroblock skip table */
793 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
794 // Note the + 1 is for a quicker mpeg4 slice_end detection
796 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
797 s->avctx->debug_mv) {
798 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
799 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
800 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
801 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
802 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
803 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
808 return AVERROR(ENOMEM);
812 * init common structure for both encoder and decoder.
813 * this assumes that some variables like width/height are already set
815 av_cold int ff_MPV_common_init(MpegEncContext *s)
818 int nb_slices = (HAVE_THREADS &&
819 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
820 s->avctx->thread_count : 1;
822 if (s->encoding && s->avctx->slices)
823 nb_slices = s->avctx->slices;
825 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
826 s->mb_height = (s->height + 31) / 32 * 2;
827 else if (s->codec_id != AV_CODEC_ID_H264)
828 s->mb_height = (s->height + 15) / 16;
830 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
831 av_log(s->avctx, AV_LOG_ERROR,
832 "decoding to PIX_FMT_NONE is not supported.\n");
836 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
839 max_slices = FFMIN(MAX_THREADS, s->mb_height);
841 max_slices = MAX_THREADS;
842 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
843 " reducing to %d\n", nb_slices, max_slices);
844 nb_slices = max_slices;
847 if ((s->width || s->height) &&
848 av_image_check_size(s->width, s->height, 0, s->avctx))
851 ff_dct_common_init(s);
853 s->flags = s->avctx->flags;
854 s->flags2 = s->avctx->flags2;
856 if (s->width && s->height) {
857 /* set chroma shifts */
858 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
861 /* convert fourcc to upper case */
862 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
864 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
866 s->avctx->coded_frame = &s->current_picture.f;
869 if (s->msmpeg4_version) {
870 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
871 2 * 2 * (MAX_LEVEL + 1) *
872 (MAX_RUN + 1) * 2 * sizeof(int), fail);
874 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
876 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
877 64 * 32 * sizeof(int), fail);
878 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
879 64 * 32 * sizeof(int), fail);
880 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
881 64 * 32 * 2 * sizeof(uint16_t), fail);
882 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
883 64 * 32 * 2 * sizeof(uint16_t), fail);
884 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
885 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
886 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
887 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
889 if (s->avctx->noise_reduction) {
890 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
891 2 * 64 * sizeof(uint16_t), fail);
896 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
897 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
898 s->picture_count * sizeof(Picture), fail);
899 for (i = 0; i < s->picture_count; i++) {
900 avcodec_get_frame_defaults(&s->picture[i].f);
903 if (s->width && s->height) {
904 if ((err = init_context_frame(s)))
907 s->parse_context.state = -1;
910 s->context_initialized = 1;
911 s->thread_context[0] = s;
913 if (s->width && s->height) {
915 for (i = 1; i < nb_slices; i++) {
916 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
917 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
920 for (i = 0; i < nb_slices; i++) {
921 if (init_duplicate_context(s->thread_context[i], s) < 0)
923 s->thread_context[i]->start_mb_y =
924 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
925 s->thread_context[i]->end_mb_y =
926 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
929 if (init_duplicate_context(s, s) < 0)
932 s->end_mb_y = s->mb_height;
934 s->slice_context_count = nb_slices;
939 ff_MPV_common_end(s);
944 * Frees and resets MpegEncContext fields depending on the resolution.
945 * Is used during resolution changes to avoid a full reinitialization of the
948 static int free_context_frame(MpegEncContext *s)
952 av_freep(&s->mb_type);
953 av_freep(&s->p_mv_table_base);
954 av_freep(&s->b_forw_mv_table_base);
955 av_freep(&s->b_back_mv_table_base);
956 av_freep(&s->b_bidir_forw_mv_table_base);
957 av_freep(&s->b_bidir_back_mv_table_base);
958 av_freep(&s->b_direct_mv_table_base);
959 s->p_mv_table = NULL;
960 s->b_forw_mv_table = NULL;
961 s->b_back_mv_table = NULL;
962 s->b_bidir_forw_mv_table = NULL;
963 s->b_bidir_back_mv_table = NULL;
964 s->b_direct_mv_table = NULL;
965 for (i = 0; i < 2; i++) {
966 for (j = 0; j < 2; j++) {
967 for (k = 0; k < 2; k++) {
968 av_freep(&s->b_field_mv_table_base[i][j][k]);
969 s->b_field_mv_table[i][j][k] = NULL;
971 av_freep(&s->b_field_select_table[i][j]);
972 av_freep(&s->p_field_mv_table_base[i][j]);
973 s->p_field_mv_table[i][j] = NULL;
975 av_freep(&s->p_field_select_table[i]);
978 av_freep(&s->dc_val_base);
979 av_freep(&s->coded_block_base);
980 av_freep(&s->mbintra_table);
981 av_freep(&s->cbp_table);
982 av_freep(&s->pred_dir_table);
984 av_freep(&s->mbskip_table);
986 av_freep(&s->error_status_table);
987 av_freep(&s->er_temp_buffer);
988 av_freep(&s->mb_index2xy);
989 av_freep(&s->lambda_table);
990 av_freep(&s->cplx_tab);
991 av_freep(&s->bits_tab);
993 s->linesize = s->uvlinesize = 0;
995 for (i = 0; i < 3; i++)
996 av_freep(&s->visualization_buffer[i]);
998 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
999 avcodec_default_free_buffers(s->avctx);
1004 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1008 if (s->slice_context_count > 1) {
1009 for (i = 0; i < s->slice_context_count; i++) {
1010 free_duplicate_context(s->thread_context[i]);
1012 for (i = 1; i < s->slice_context_count; i++) {
1013 av_freep(&s->thread_context[i]);
1016 free_duplicate_context(s);
1018 free_context_frame(s);
1021 for (i = 0; i < s->picture_count; i++) {
1022 s->picture[i].needs_realloc = 1;
1025 s->last_picture_ptr =
1026 s->next_picture_ptr =
1027 s->current_picture_ptr = NULL;
1030 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1031 s->mb_height = (s->height + 31) / 32 * 2;
1032 else if (s->codec_id != AV_CODEC_ID_H264)
1033 s->mb_height = (s->height + 15) / 16;
1035 if ((s->width || s->height) &&
1036 av_image_check_size(s->width, s->height, 0, s->avctx))
1037 return AVERROR_INVALIDDATA;
1039 if ((err = init_context_frame(s)))
1042 s->thread_context[0] = s;
1044 if (s->width && s->height) {
1045 int nb_slices = s->slice_context_count;
1046 if (nb_slices > 1) {
1047 for (i = 1; i < nb_slices; i++) {
1048 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1049 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1052 for (i = 0; i < nb_slices; i++) {
1053 if (init_duplicate_context(s->thread_context[i], s) < 0)
1055 s->thread_context[i]->start_mb_y =
1056 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1057 s->thread_context[i]->end_mb_y =
1058 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1061 if (init_duplicate_context(s, s) < 0)
1064 s->end_mb_y = s->mb_height;
1066 s->slice_context_count = nb_slices;
1071 ff_MPV_common_end(s);
1075 /* init common structure for both encoder and decoder */
1076 void ff_MPV_common_end(MpegEncContext *s)
1080 if (s->slice_context_count > 1) {
1081 for (i = 0; i < s->slice_context_count; i++) {
1082 free_duplicate_context(s->thread_context[i]);
1084 for (i = 1; i < s->slice_context_count; i++) {
1085 av_freep(&s->thread_context[i]);
1087 s->slice_context_count = 1;
1088 } else free_duplicate_context(s);
1090 av_freep(&s->parse_context.buffer);
1091 s->parse_context.buffer_size = 0;
1093 av_freep(&s->bitstream_buffer);
1094 s->allocated_bitstream_buffer_size = 0;
1096 av_freep(&s->avctx->stats_out);
1097 av_freep(&s->ac_stats);
1099 av_freep(&s->q_intra_matrix);
1100 av_freep(&s->q_inter_matrix);
1101 av_freep(&s->q_intra_matrix16);
1102 av_freep(&s->q_inter_matrix16);
1103 av_freep(&s->input_picture);
1104 av_freep(&s->reordered_input_picture);
1105 av_freep(&s->dct_offset);
1107 free_context_frame(s);
1109 if (s->picture && !s->avctx->internal->is_copy) {
1110 for (i = 0; i < s->picture_count; i++) {
1111 free_picture(s, &s->picture[i]);
1114 av_freep(&s->picture);
1115 s->context_initialized = 0;
1116 s->last_picture_ptr =
1117 s->next_picture_ptr =
1118 s->current_picture_ptr = NULL;
1119 s->linesize = s->uvlinesize = 0;
1122 void ff_init_rl(RLTable *rl,
1123 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1125 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1126 uint8_t index_run[MAX_RUN + 1];
1127 int last, run, level, start, end, i;
1129 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1130 if (static_store && rl->max_level[0])
1133 /* compute max_level[], max_run[] and index_run[] */
1134 for (last = 0; last < 2; last++) {
1143 memset(max_level, 0, MAX_RUN + 1);
1144 memset(max_run, 0, MAX_LEVEL + 1);
1145 memset(index_run, rl->n, MAX_RUN + 1);
1146 for (i = start; i < end; i++) {
1147 run = rl->table_run[i];
1148 level = rl->table_level[i];
1149 if (index_run[run] == rl->n)
1151 if (level > max_level[run])
1152 max_level[run] = level;
1153 if (run > max_run[level])
1154 max_run[level] = run;
1157 rl->max_level[last] = static_store[last];
1159 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1160 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1162 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1164 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1165 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1167 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1169 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1170 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1174 void ff_init_vlc_rl(RLTable *rl)
1178 for (q = 0; q < 32; q++) {
1180 int qadd = (q - 1) | 1;
1186 for (i = 0; i < rl->vlc.table_size; i++) {
1187 int code = rl->vlc.table[i][0];
1188 int len = rl->vlc.table[i][1];
1191 if (len == 0) { // illegal code
1194 } else if (len < 0) { // more bits needed
1198 if (code == rl->n) { // esc
1202 run = rl->table_run[code] + 1;
1203 level = rl->table_level[code] * qmul + qadd;
1204 if (code >= rl->last) run += 192;
1207 rl->rl_vlc[q][i].len = len;
1208 rl->rl_vlc[q][i].level = level;
1209 rl->rl_vlc[q][i].run = run;
1214 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1218 /* release non reference frames */
1219 for (i = 0; i < s->picture_count; i++) {
1220 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1221 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1222 (remove_current || &s->picture[i] != s->current_picture_ptr)
1223 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1224 free_frame_buffer(s, &s->picture[i]);
1229 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1231 if (pic->f.data[0] == NULL)
1233 if (pic->needs_realloc)
1234 if (!pic->owner2 || pic->owner2 == s)
1239 static int find_unused_picture(MpegEncContext *s, int shared)
1244 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1245 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1249 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1250 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1253 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1254 if (pic_is_unused(s, &s->picture[i]))
1259 return AVERROR_INVALIDDATA;
1262 int ff_find_unused_picture(MpegEncContext *s, int shared)
1264 int ret = find_unused_picture(s, shared);
1266 if (ret >= 0 && ret < s->picture_range_end) {
1267 if (s->picture[ret].needs_realloc) {
1268 s->picture[ret].needs_realloc = 0;
1269 free_picture(s, &s->picture[ret]);
1270 avcodec_get_frame_defaults(&s->picture[ret].f);
1276 static void update_noise_reduction(MpegEncContext *s)
1280 for (intra = 0; intra < 2; intra++) {
1281 if (s->dct_count[intra] > (1 << 16)) {
1282 for (i = 0; i < 64; i++) {
1283 s->dct_error_sum[intra][i] >>= 1;
1285 s->dct_count[intra] >>= 1;
1288 for (i = 0; i < 64; i++) {
1289 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1290 s->dct_count[intra] +
1291 s->dct_error_sum[intra][i] / 2) /
1292 (s->dct_error_sum[intra][i] + 1);
1298 * generic function for encode/decode called after coding/decoding
1299 * the header and before a frame is coded/decoded.
1301 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1307 /* mark & release old frames */
1308 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1309 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1310 s->last_picture_ptr != s->next_picture_ptr &&
1311 s->last_picture_ptr->f.data[0]) {
1312 if (s->last_picture_ptr->owner2 == s)
1313 free_frame_buffer(s, s->last_picture_ptr);
1316 /* release forgotten pictures */
1317 /* if (mpeg124/h263) */
1319 for (i = 0; i < s->picture_count; i++) {
1320 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1321 &s->picture[i] != s->last_picture_ptr &&
1322 &s->picture[i] != s->next_picture_ptr &&
1323 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1324 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1325 av_log(avctx, AV_LOG_ERROR,
1326 "releasing zombie picture\n");
1327 free_frame_buffer(s, &s->picture[i]);
1334 ff_release_unused_pictures(s, 1);
1336 if (s->current_picture_ptr &&
1337 s->current_picture_ptr->f.data[0] == NULL) {
1338 // we already have a unused image
1339 // (maybe it was set before reading the header)
1340 pic = s->current_picture_ptr;
1342 i = ff_find_unused_picture(s, 0);
1343 pic = &s->picture[i];
1346 pic->f.reference = 0;
1348 if (s->codec_id == AV_CODEC_ID_H264)
1349 pic->f.reference = s->picture_structure;
1350 else if (s->pict_type != AV_PICTURE_TYPE_B)
1351 pic->f.reference = 3;
1354 pic->f.coded_picture_number = s->coded_picture_number++;
1356 if (ff_alloc_picture(s, pic, 0) < 0)
1359 s->current_picture_ptr = pic;
1360 // FIXME use only the vars from current_pic
1361 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1362 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1363 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1364 if (s->picture_structure != PICT_FRAME)
1365 s->current_picture_ptr->f.top_field_first =
1366 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1368 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1369 !s->progressive_sequence;
1370 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1373 s->current_picture_ptr->f.pict_type = s->pict_type;
1374 // if (s->flags && CODEC_FLAG_QSCALE)
1375 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1376 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1378 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1380 if (s->pict_type != AV_PICTURE_TYPE_B) {
1381 s->last_picture_ptr = s->next_picture_ptr;
1383 s->next_picture_ptr = s->current_picture_ptr;
1385 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1386 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1387 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1388 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1389 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1390 s->pict_type, s->dropable); */
1392 if (s->codec_id != AV_CODEC_ID_H264) {
1393 if ((s->last_picture_ptr == NULL ||
1394 s->last_picture_ptr->f.data[0] == NULL) &&
1395 (s->pict_type != AV_PICTURE_TYPE_I ||
1396 s->picture_structure != PICT_FRAME)) {
1397 if (s->pict_type != AV_PICTURE_TYPE_I)
1398 av_log(avctx, AV_LOG_ERROR,
1399 "warning: first frame is no keyframe\n");
1400 else if (s->picture_structure != PICT_FRAME)
1401 av_log(avctx, AV_LOG_INFO,
1402 "allocate dummy last picture for field based first keyframe\n");
1404 /* Allocate a dummy frame */
1405 i = ff_find_unused_picture(s, 0);
1406 s->last_picture_ptr = &s->picture[i];
1407 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1408 s->last_picture_ptr = NULL;
1411 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1412 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1413 s->last_picture_ptr->f.reference = 3;
1415 if ((s->next_picture_ptr == NULL ||
1416 s->next_picture_ptr->f.data[0] == NULL) &&
1417 s->pict_type == AV_PICTURE_TYPE_B) {
1418 /* Allocate a dummy frame */
1419 i = ff_find_unused_picture(s, 0);
1420 s->next_picture_ptr = &s->picture[i];
1421 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1422 s->next_picture_ptr = NULL;
1425 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1426 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1427 s->next_picture_ptr->f.reference = 3;
1431 if (s->last_picture_ptr)
1432 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1433 if (s->next_picture_ptr)
1434 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1436 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1437 (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3)) {
1438 if (s->next_picture_ptr)
1439 s->next_picture_ptr->owner2 = s;
1440 if (s->last_picture_ptr)
1441 s->last_picture_ptr->owner2 = s;
1444 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1445 s->last_picture_ptr->f.data[0]));
1447 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1449 for (i = 0; i < 4; i++) {
1450 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1451 s->current_picture.f.data[i] +=
1452 s->current_picture.f.linesize[i];
1454 s->current_picture.f.linesize[i] *= 2;
1455 s->last_picture.f.linesize[i] *= 2;
1456 s->next_picture.f.linesize[i] *= 2;
1460 s->err_recognition = avctx->err_recognition;
1462 /* set dequantizer, we can't do it during init as
1463 * it might change for mpeg4 and we can't do it in the header
1464 * decode as init is not called for mpeg4 there yet */
1465 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1466 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1467 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1468 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1469 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1470 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1472 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1473 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1476 if (s->dct_error_sum) {
1477 assert(s->avctx->noise_reduction && s->encoding);
1478 update_noise_reduction(s);
1481 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1482 return ff_xvmc_field_start(s, avctx);
1487 /* generic function for encode/decode called after a
1488 * frame has been coded/decoded. */
1489 void ff_MPV_frame_end(MpegEncContext *s)
1492 /* redraw edges for the frame if decoding didn't complete */
1493 // just to make sure that all data is rendered.
1494 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1495 ff_xvmc_field_end(s);
1496 } else if ((s->error_count || s->encoding) &&
1497 !s->avctx->hwaccel &&
1498 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1499 s->unrestricted_mv &&
1500 s->current_picture.f.reference &&
1502 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1503 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1504 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1505 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1506 s->h_edge_pos, s->v_edge_pos,
1507 EDGE_WIDTH, EDGE_WIDTH,
1508 EDGE_TOP | EDGE_BOTTOM);
1509 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1510 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1511 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1512 EDGE_TOP | EDGE_BOTTOM);
1513 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1514 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1515 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1516 EDGE_TOP | EDGE_BOTTOM);
1521 s->last_pict_type = s->pict_type;
1522 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1523 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1524 s->last_non_b_pict_type = s->pict_type;
1527 /* copy back current_picture variables */
1528 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1529 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1530 s->picture[i] = s->current_picture;
1534 assert(i < MAX_PICTURE_COUNT);
1538 /* release non-reference frames */
1539 for (i = 0; i < s->picture_count; i++) {
1540 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1541 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1542 free_frame_buffer(s, &s->picture[i]);
1546 // clear copies, to avoid confusion
1548 memset(&s->last_picture, 0, sizeof(Picture));
1549 memset(&s->next_picture, 0, sizeof(Picture));
1550 memset(&s->current_picture, 0, sizeof(Picture));
1552 s->avctx->coded_frame = &s->current_picture_ptr->f;
1554 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1555 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1560 * Draw a line from (ex, ey) -> (sx, sy).
1561 * @param w width of the image
1562 * @param h height of the image
1563 * @param stride stride/linesize of the image
1564 * @param color color of the arrow
1566 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1567 int w, int h, int stride, int color)
1571 sx = av_clip(sx, 0, w - 1);
1572 sy = av_clip(sy, 0, h - 1);
1573 ex = av_clip(ex, 0, w - 1);
1574 ey = av_clip(ey, 0, h - 1);
1576 buf[sy * stride + sx] += color;
1578 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1580 FFSWAP(int, sx, ex);
1581 FFSWAP(int, sy, ey);
1583 buf += sx + sy * stride;
1585 f = ((ey - sy) << 16) / ex;
1586 for (x = 0; x = ex; x++) {
1588 fr = (x * f) & 0xFFFF;
1589 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1590 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1594 FFSWAP(int, sx, ex);
1595 FFSWAP(int, sy, ey);
1597 buf += sx + sy * stride;
1600 f = ((ex - sx) << 16) / ey;
1603 for (y = 0; y = ey; y++) {
1605 fr = (y * f) & 0xFFFF;
1606 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1607 buf[y * stride + x + 1] += (color * fr ) >> 16;
1613 * Draw an arrow from (ex, ey) -> (sx, sy).
1614 * @param w width of the image
1615 * @param h height of the image
1616 * @param stride stride/linesize of the image
1617 * @param color color of the arrow
1619 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1620 int ey, int w, int h, int stride, int color)
1624 sx = av_clip(sx, -100, w + 100);
1625 sy = av_clip(sy, -100, h + 100);
1626 ex = av_clip(ex, -100, w + 100);
1627 ey = av_clip(ey, -100, h + 100);
1632 if (dx * dx + dy * dy > 3 * 3) {
1635 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1637 // FIXME subpixel accuracy
1638 rx = ROUNDED_DIV(rx * 3 << 4, length);
1639 ry = ROUNDED_DIV(ry * 3 << 4, length);
1641 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1642 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1644 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1648 * Print debugging info for the given picture.
1650 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1652 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1655 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1658 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1659 switch (pict->pict_type) {
1660 case AV_PICTURE_TYPE_I:
1661 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1663 case AV_PICTURE_TYPE_P:
1664 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1666 case AV_PICTURE_TYPE_B:
1667 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1669 case AV_PICTURE_TYPE_S:
1670 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1672 case AV_PICTURE_TYPE_SI:
1673 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1675 case AV_PICTURE_TYPE_SP:
1676 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1679 for (y = 0; y < s->mb_height; y++) {
1680 for (x = 0; x < s->mb_width; x++) {
1681 if (s->avctx->debug & FF_DEBUG_SKIP) {
1682 int count = s->mbskip_table[x + y * s->mb_stride];
1685 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1687 if (s->avctx->debug & FF_DEBUG_QP) {
1688 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1689 pict->qscale_table[x + y * s->mb_stride]);
1691 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1692 int mb_type = pict->mb_type[x + y * s->mb_stride];
1693 // Type & MV direction
1694 if (IS_PCM(mb_type))
1695 av_log(s->avctx, AV_LOG_DEBUG, "P");
1696 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1697 av_log(s->avctx, AV_LOG_DEBUG, "A");
1698 else if (IS_INTRA4x4(mb_type))
1699 av_log(s->avctx, AV_LOG_DEBUG, "i");
1700 else if (IS_INTRA16x16(mb_type))
1701 av_log(s->avctx, AV_LOG_DEBUG, "I");
1702 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1703 av_log(s->avctx, AV_LOG_DEBUG, "d");
1704 else if (IS_DIRECT(mb_type))
1705 av_log(s->avctx, AV_LOG_DEBUG, "D");
1706 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1707 av_log(s->avctx, AV_LOG_DEBUG, "g");
1708 else if (IS_GMC(mb_type))
1709 av_log(s->avctx, AV_LOG_DEBUG, "G");
1710 else if (IS_SKIP(mb_type))
1711 av_log(s->avctx, AV_LOG_DEBUG, "S");
1712 else if (!USES_LIST(mb_type, 1))
1713 av_log(s->avctx, AV_LOG_DEBUG, ">");
1714 else if (!USES_LIST(mb_type, 0))
1715 av_log(s->avctx, AV_LOG_DEBUG, "<");
1717 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1718 av_log(s->avctx, AV_LOG_DEBUG, "X");
1722 if (IS_8X8(mb_type))
1723 av_log(s->avctx, AV_LOG_DEBUG, "+");
1724 else if (IS_16X8(mb_type))
1725 av_log(s->avctx, AV_LOG_DEBUG, "-");
1726 else if (IS_8X16(mb_type))
1727 av_log(s->avctx, AV_LOG_DEBUG, "|");
1728 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1729 av_log(s->avctx, AV_LOG_DEBUG, " ");
1731 av_log(s->avctx, AV_LOG_DEBUG, "?");
1734 if (IS_INTERLACED(mb_type))
1735 av_log(s->avctx, AV_LOG_DEBUG, "=");
1737 av_log(s->avctx, AV_LOG_DEBUG, " ");
1739 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1741 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1745 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1746 (s->avctx->debug_mv)) {
1747 const int shift = 1 + s->quarter_sample;
1751 int h_chroma_shift, v_chroma_shift, block_height;
1752 const int width = s->avctx->width;
1753 const int height = s->avctx->height;
1754 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1755 const int mv_stride = (s->mb_width << mv_sample_log2) +
1756 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1757 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1759 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1760 &h_chroma_shift, &v_chroma_shift);
1761 for (i = 0; i < 3; i++) {
1762 memcpy(s->visualization_buffer[i], pict->data[i],
1763 (i == 0) ? pict->linesize[i] * height:
1764 pict->linesize[i] * height >> v_chroma_shift);
1765 pict->data[i] = s->visualization_buffer[i];
1767 pict->type = FF_BUFFER_TYPE_COPY;
1768 ptr = pict->data[0];
1769 block_height = 16 >> v_chroma_shift;
1771 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1773 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1774 const int mb_index = mb_x + mb_y * s->mb_stride;
1775 if ((s->avctx->debug_mv) && pict->motion_val) {
1777 for (type = 0; type < 3; type++) {
1781 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1782 (pict->pict_type!= AV_PICTURE_TYPE_P))
1787 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1788 (pict->pict_type!= AV_PICTURE_TYPE_B))
1793 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1794 (pict->pict_type!= AV_PICTURE_TYPE_B))
1799 if (!USES_LIST(pict->mb_type[mb_index], direction))
1802 if (IS_8X8(pict->mb_type[mb_index])) {
1804 for (i = 0; i < 4; i++) {
1805 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1806 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1807 int xy = (mb_x * 2 + (i & 1) +
1808 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1809 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1810 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1811 draw_arrow(ptr, sx, sy, mx, my, width,
1812 height, s->linesize, 100);
1814 } else if (IS_16X8(pict->mb_type[mb_index])) {
1816 for (i = 0; i < 2; i++) {
1817 int sx = mb_x * 16 + 8;
1818 int sy = mb_y * 16 + 4 + 8 * i;
1819 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1820 int mx = (pict->motion_val[direction][xy][0] >> shift);
1821 int my = (pict->motion_val[direction][xy][1] >> shift);
1823 if (IS_INTERLACED(pict->mb_type[mb_index]))
1826 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1827 height, s->linesize, 100);
1829 } else if (IS_8X16(pict->mb_type[mb_index])) {
1831 for (i = 0; i < 2; i++) {
1832 int sx = mb_x * 16 + 4 + 8 * i;
1833 int sy = mb_y * 16 + 8;
1834 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1835 int mx = pict->motion_val[direction][xy][0] >> shift;
1836 int my = pict->motion_val[direction][xy][1] >> shift;
1838 if (IS_INTERLACED(pict->mb_type[mb_index]))
1841 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1842 height, s->linesize, 100);
1845 int sx = mb_x * 16 + 8;
1846 int sy = mb_y * 16 + 8;
1847 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1848 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1849 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1850 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1854 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1855 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1856 0x0101010101010101ULL;
1858 for (y = 0; y < block_height; y++) {
1859 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1860 (block_height * mb_y + y) *
1861 pict->linesize[1]) = c;
1862 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1863 (block_height * mb_y + y) *
1864 pict->linesize[2]) = c;
1867 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1869 int mb_type = pict->mb_type[mb_index];
1872 #define COLOR(theta, r) \
1873 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1874 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1878 if (IS_PCM(mb_type)) {
1880 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1881 IS_INTRA16x16(mb_type)) {
1883 } else if (IS_INTRA4x4(mb_type)) {
1885 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1887 } else if (IS_DIRECT(mb_type)) {
1889 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1891 } else if (IS_GMC(mb_type)) {
1893 } else if (IS_SKIP(mb_type)) {
1895 } else if (!USES_LIST(mb_type, 1)) {
1897 } else if (!USES_LIST(mb_type, 0)) {
1900 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1904 u *= 0x0101010101010101ULL;
1905 v *= 0x0101010101010101ULL;
1906 for (y = 0; y < block_height; y++) {
1907 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1908 (block_height * mb_y + y) * pict->linesize[1]) = u;
1909 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1910 (block_height * mb_y + y) * pict->linesize[2]) = v;
1914 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1915 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1916 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1917 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1918 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1920 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1921 for (y = 0; y < 16; y++)
1922 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1923 pict->linesize[0]] ^= 0x80;
1925 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1926 int dm = 1 << (mv_sample_log2 - 2);
1927 for (i = 0; i < 4; i++) {
1928 int sx = mb_x * 16 + 8 * (i & 1);
1929 int sy = mb_y * 16 + 8 * (i >> 1);
1930 int xy = (mb_x * 2 + (i & 1) +
1931 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1933 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1934 if (mv[0] != mv[dm] ||
1935 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1936 for (y = 0; y < 8; y++)
1937 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1938 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1939 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1940 pict->linesize[0]) ^= 0x8080808080808080ULL;
1944 if (IS_INTERLACED(mb_type) &&
1945 s->codec_id == AV_CODEC_ID_H264) {
1949 s->mbskip_table[mb_index] = 0;
1956 * find the lowest MB row referenced in the MVs
1958 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1960 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1961 int my, off, i, mvs;
1963 if (s->picture_structure != PICT_FRAME) goto unhandled;
1965 switch (s->mv_type) {
1979 for (i = 0; i < mvs; i++) {
1980 my = s->mv[dir][i][1]<<qpel_shift;
1981 my_max = FFMAX(my_max, my);
1982 my_min = FFMIN(my_min, my);
1985 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1987 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1989 return s->mb_height-1;
1992 /* put block[] to dest[] */
1993 static inline void put_dct(MpegEncContext *s,
1994 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1996 s->dct_unquantize_intra(s, block, i, qscale);
1997 s->dsp.idct_put (dest, line_size, block);
2000 /* add block[] to dest[] */
2001 static inline void add_dct(MpegEncContext *s,
2002 DCTELEM *block, int i, uint8_t *dest, int line_size)
2004 if (s->block_last_index[i] >= 0) {
2005 s->dsp.idct_add (dest, line_size, block);
2009 static inline void add_dequant_dct(MpegEncContext *s,
2010 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2012 if (s->block_last_index[i] >= 0) {
2013 s->dct_unquantize_inter(s, block, i, qscale);
2015 s->dsp.idct_add (dest, line_size, block);
2020 * Clean dc, ac, coded_block for the current non-intra MB.
2022 void ff_clean_intra_table_entries(MpegEncContext *s)
2024 int wrap = s->b8_stride;
2025 int xy = s->block_index[0];
2028 s->dc_val[0][xy + 1 ] =
2029 s->dc_val[0][xy + wrap] =
2030 s->dc_val[0][xy + 1 + wrap] = 1024;
2032 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2033 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2034 if (s->msmpeg4_version>=3) {
2035 s->coded_block[xy ] =
2036 s->coded_block[xy + 1 ] =
2037 s->coded_block[xy + wrap] =
2038 s->coded_block[xy + 1 + wrap] = 0;
2041 wrap = s->mb_stride;
2042 xy = s->mb_x + s->mb_y * wrap;
2044 s->dc_val[2][xy] = 1024;
2046 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2047 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2049 s->mbintra_table[xy]= 0;
2052 /* generic function called after a macroblock has been parsed by the
2053 decoder or after it has been encoded by the encoder.
2055 Important variables used:
2056 s->mb_intra : true if intra macroblock
2057 s->mv_dir : motion vector direction
2058 s->mv_type : motion vector type
2059 s->mv : motion vector
2060 s->interlaced_dct : true if interlaced dct used (mpeg2)
2062 static av_always_inline
2063 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2066 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2067 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2068 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2072 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2073 /* save DCT coefficients */
2075 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2076 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2078 for(j=0; j<64; j++){
2079 *dct++ = block[i][s->dsp.idct_permutation[j]];
2080 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2082 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2086 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2088 /* update DC predictors for P macroblocks */
2090 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2091 if(s->mbintra_table[mb_xy])
2092 ff_clean_intra_table_entries(s);
2096 s->last_dc[2] = 128 << s->intra_dc_precision;
2099 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2100 s->mbintra_table[mb_xy]=1;
2102 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2103 uint8_t *dest_y, *dest_cb, *dest_cr;
2104 int dct_linesize, dct_offset;
2105 op_pixels_func (*op_pix)[4];
2106 qpel_mc_func (*op_qpix)[16];
2107 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2108 const int uvlinesize = s->current_picture.f.linesize[1];
2109 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2110 const int block_size = 8;
2112 /* avoid copy if macroblock skipped in last frame too */
2113 /* skip only during decoding as we might trash the buffers during encoding a bit */
2115 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2117 if (s->mb_skipped) {
2119 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2121 } else if(!s->current_picture.f.reference) {
2124 *mbskip_ptr = 0; /* not skipped */
2128 dct_linesize = linesize << s->interlaced_dct;
2129 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2133 dest_cb= s->dest[1];
2134 dest_cr= s->dest[2];
2136 dest_y = s->b_scratchpad;
2137 dest_cb= s->b_scratchpad+16*linesize;
2138 dest_cr= s->b_scratchpad+32*linesize;
2142 /* motion handling */
2143 /* decoding or more than one mb_type (MC was already done otherwise) */
2146 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2147 if (s->mv_dir & MV_DIR_FORWARD) {
2148 ff_thread_await_progress(&s->last_picture_ptr->f,
2149 ff_MPV_lowest_referenced_row(s, 0),
2152 if (s->mv_dir & MV_DIR_BACKWARD) {
2153 ff_thread_await_progress(&s->next_picture_ptr->f,
2154 ff_MPV_lowest_referenced_row(s, 1),
2159 op_qpix= s->me.qpel_put;
2160 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2161 op_pix = s->dsp.put_pixels_tab;
2163 op_pix = s->dsp.put_no_rnd_pixels_tab;
2165 if (s->mv_dir & MV_DIR_FORWARD) {
2166 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2167 op_pix = s->dsp.avg_pixels_tab;
2168 op_qpix= s->me.qpel_avg;
2170 if (s->mv_dir & MV_DIR_BACKWARD) {
2171 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2175 /* skip dequant / idct if we are really late ;) */
2176 if(s->avctx->skip_idct){
2177 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2178 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2179 || s->avctx->skip_idct >= AVDISCARD_ALL)
2183 /* add dct residue */
2184 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2185 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2186 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2187 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2188 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2189 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2191 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2192 if (s->chroma_y_shift){
2193 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2194 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2198 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2199 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2200 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2201 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2204 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2205 add_dct(s, block[0], 0, dest_y , dct_linesize);
2206 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2207 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2208 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2210 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2211 if(s->chroma_y_shift){//Chroma420
2212 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2213 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2216 dct_linesize = uvlinesize << s->interlaced_dct;
2217 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2219 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2220 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2221 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2222 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2223 if(!s->chroma_x_shift){//Chroma444
2224 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2225 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2226 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2227 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2232 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2233 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2236 /* dct only in intra block */
2237 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2238 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2239 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2240 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2241 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2243 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2244 if(s->chroma_y_shift){
2245 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2246 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2250 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2251 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2252 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2253 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2257 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2258 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2259 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2260 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2262 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2263 if(s->chroma_y_shift){
2264 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2265 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2268 dct_linesize = uvlinesize << s->interlaced_dct;
2269 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2271 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2272 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2273 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2274 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2275 if(!s->chroma_x_shift){//Chroma444
2276 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2277 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2278 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2279 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2287 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2288 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2289 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2294 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2296 if(s->out_format == FMT_MPEG1) {
2297 MPV_decode_mb_internal(s, block, 1);
2300 MPV_decode_mb_internal(s, block, 0);
2304 * @param h is the normal height, this will be reduced automatically if needed for the last row
2306 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2307 const int field_pic= s->picture_structure != PICT_FRAME;
2313 if (!s->avctx->hwaccel
2314 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2315 && s->unrestricted_mv
2316 && s->current_picture.f.reference
2318 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2319 int sides = 0, edge_h;
2320 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2321 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2322 if (y==0) sides |= EDGE_TOP;
2323 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2325 edge_h= FFMIN(h, s->v_edge_pos - y);
2327 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2328 s->linesize, s->h_edge_pos, edge_h,
2329 EDGE_WIDTH, EDGE_WIDTH, sides);
2330 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2331 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2332 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2333 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2334 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2335 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2338 h= FFMIN(h, s->avctx->height - y);
2340 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2342 if (s->avctx->draw_horiz_band) {
2344 int offset[AV_NUM_DATA_POINTERS];
2347 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2348 src = &s->current_picture_ptr->f;
2349 else if(s->last_picture_ptr)
2350 src = &s->last_picture_ptr->f;
2354 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2355 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2358 offset[0]= y * s->linesize;
2360 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2361 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2367 s->avctx->draw_horiz_band(s->avctx, src, offset,
2368 y, s->picture_structure, h);
2372 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2373 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2374 const int uvlinesize = s->current_picture.f.linesize[1];
2375 const int mb_size= 4;
2377 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2378 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2379 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2380 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2381 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2382 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2383 //block_index is not used by mpeg2, so it is not affected by chroma_format
2385 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2386 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2387 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2389 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2391 if(s->picture_structure==PICT_FRAME){
2392 s->dest[0] += s->mb_y * linesize << mb_size;
2393 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2394 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2396 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2397 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2398 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2399 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2404 void ff_mpeg_flush(AVCodecContext *avctx){
2406 MpegEncContext *s = avctx->priv_data;
2408 if(s==NULL || s->picture==NULL)
2411 for(i=0; i<s->picture_count; i++){
2412 if (s->picture[i].f.data[0] &&
2413 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2414 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2415 free_frame_buffer(s, &s->picture[i]);
2417 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2419 s->mb_x= s->mb_y= 0;
2421 s->parse_context.state= -1;
2422 s->parse_context.frame_start_found= 0;
2423 s->parse_context.overread= 0;
2424 s->parse_context.overread_index= 0;
2425 s->parse_context.index= 0;
2426 s->parse_context.last_index= 0;
2427 s->bitstream_buffer_size=0;
2431 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2432 DCTELEM *block, int n, int qscale)
2434 int i, level, nCoeffs;
2435 const uint16_t *quant_matrix;
2437 nCoeffs= s->block_last_index[n];
2440 block[0] = block[0] * s->y_dc_scale;
2442 block[0] = block[0] * s->c_dc_scale;
2443 /* XXX: only mpeg1 */
2444 quant_matrix = s->intra_matrix;
2445 for(i=1;i<=nCoeffs;i++) {
2446 int j= s->intra_scantable.permutated[i];
2451 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2452 level = (level - 1) | 1;
2455 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2456 level = (level - 1) | 1;
2463 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2464 DCTELEM *block, int n, int qscale)
2466 int i, level, nCoeffs;
2467 const uint16_t *quant_matrix;
2469 nCoeffs= s->block_last_index[n];
2471 quant_matrix = s->inter_matrix;
2472 for(i=0; i<=nCoeffs; i++) {
2473 int j= s->intra_scantable.permutated[i];
2478 level = (((level << 1) + 1) * qscale *
2479 ((int) (quant_matrix[j]))) >> 4;
2480 level = (level - 1) | 1;
2483 level = (((level << 1) + 1) * qscale *
2484 ((int) (quant_matrix[j]))) >> 4;
2485 level = (level - 1) | 1;
2492 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2493 DCTELEM *block, int n, int qscale)
2495 int i, level, nCoeffs;
2496 const uint16_t *quant_matrix;
2498 if(s->alternate_scan) nCoeffs= 63;
2499 else nCoeffs= s->block_last_index[n];
2502 block[0] = block[0] * s->y_dc_scale;
2504 block[0] = block[0] * s->c_dc_scale;
2505 quant_matrix = s->intra_matrix;
2506 for(i=1;i<=nCoeffs;i++) {
2507 int j= s->intra_scantable.permutated[i];
2512 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2515 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2522 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2523 DCTELEM *block, int n, int qscale)
2525 int i, level, nCoeffs;
2526 const uint16_t *quant_matrix;
2529 if(s->alternate_scan) nCoeffs= 63;
2530 else nCoeffs= s->block_last_index[n];
2533 block[0] = block[0] * s->y_dc_scale;
2535 block[0] = block[0] * s->c_dc_scale;
2536 quant_matrix = s->intra_matrix;
2537 for(i=1;i<=nCoeffs;i++) {
2538 int j= s->intra_scantable.permutated[i];
2543 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2546 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2555 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2556 DCTELEM *block, int n, int qscale)
2558 int i, level, nCoeffs;
2559 const uint16_t *quant_matrix;
2562 if(s->alternate_scan) nCoeffs= 63;
2563 else nCoeffs= s->block_last_index[n];
2565 quant_matrix = s->inter_matrix;
2566 for(i=0; i<=nCoeffs; i++) {
2567 int j= s->intra_scantable.permutated[i];
2572 level = (((level << 1) + 1) * qscale *
2573 ((int) (quant_matrix[j]))) >> 4;
2576 level = (((level << 1) + 1) * qscale *
2577 ((int) (quant_matrix[j]))) >> 4;
2586 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2587 DCTELEM *block, int n, int qscale)
2589 int i, level, qmul, qadd;
2592 assert(s->block_last_index[n]>=0);
2598 block[0] = block[0] * s->y_dc_scale;
2600 block[0] = block[0] * s->c_dc_scale;
2601 qadd = (qscale - 1) | 1;
2608 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2610 for(i=1; i<=nCoeffs; i++) {
2614 level = level * qmul - qadd;
2616 level = level * qmul + qadd;
2623 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2624 DCTELEM *block, int n, int qscale)
2626 int i, level, qmul, qadd;
2629 assert(s->block_last_index[n]>=0);
2631 qadd = (qscale - 1) | 1;
2634 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2636 for(i=0; i<=nCoeffs; i++) {
2640 level = level * qmul - qadd;
2642 level = level * qmul + qadd;
2650 * set qscale and update qscale dependent variables.
2652 void ff_set_qscale(MpegEncContext * s, int qscale)
2656 else if (qscale > 31)
2660 s->chroma_qscale= s->chroma_qscale_table[qscale];
2662 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2663 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2666 void ff_MPV_report_decode_progress(MpegEncContext *s)
2668 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2669 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);