2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
38 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 DCTELEM *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
61 /* enable all paranoid tests for rounding, overflows, etc... */
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
128 const enum AVPixelFormat ff_pixfmt_list_420[] = {
133 const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
134 AV_PIX_FMT_DXVA2_VLD,
135 AV_PIX_FMT_VAAPI_VLD,
141 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
143 uint32_t * restrict state)
151 for (i = 0; i < 3; i++) {
152 uint32_t tmp = *state << 8;
153 *state = tmp + *(p++);
154 if (tmp == 0x100 || p == end)
159 if (p[-1] > 1 ) p += 3;
160 else if (p[-2] ) p += 2;
161 else if (p[-3]|(p[-1]-1)) p++;
168 p = FFMIN(p, end) - 4;
174 /* init common dct for both encoder and decoder */
175 av_cold int ff_dct_common_init(MpegEncContext *s)
177 ff_dsputil_init(&s->dsp, s->avctx);
179 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
180 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
181 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
182 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
183 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
184 if (s->flags & CODEC_FLAG_BITEXACT)
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
186 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
189 ff_MPV_common_init_x86(s);
191 ff_MPV_common_init_axp(s);
193 ff_MPV_common_init_arm(s);
195 ff_MPV_common_init_altivec(s);
197 ff_MPV_common_init_bfin(s);
200 /* load & permutate scantables
201 * note: only wmv uses different ones
203 if (s->alternate_scan) {
204 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
205 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
207 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
208 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
210 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
216 void ff_copy_picture(Picture *dst, Picture *src)
219 dst->f.type = FF_BUFFER_TYPE_COPY;
223 * Release a frame buffer
225 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
227 /* WM Image / Screen codecs allocate internal buffers with different
228 * dimensions / colorspaces; ignore user-defined callbacks for these. */
229 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
230 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
231 s->codec_id != AV_CODEC_ID_MSS2)
232 ff_thread_release_buffer(s->avctx, &pic->f);
234 avcodec_default_release_buffer(s->avctx, &pic->f);
235 av_freep(&pic->f.hwaccel_picture_private);
238 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
240 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
242 // edge emu needs blocksize + filter length - 1
243 // (= 17x17 for halfpel / 21x21 for h264)
244 // linesize * interlaced * MBsize
245 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 21,
248 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
250 s->me.temp = s->me.scratchpad;
251 s->rd_scratchpad = s->me.scratchpad;
252 s->b_scratchpad = s->me.scratchpad;
253 s->obmc_scratchpad = s->me.scratchpad + 16;
257 av_freep(&s->edge_emu_buffer);
258 return AVERROR(ENOMEM);
262 * Allocate a frame buffer
264 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
268 if (s->avctx->hwaccel) {
269 assert(!pic->f.hwaccel_picture_private);
270 if (s->avctx->hwaccel->priv_data_size) {
271 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
272 if (!pic->f.hwaccel_picture_private) {
273 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
279 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
280 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
281 s->codec_id != AV_CODEC_ID_MSS2)
282 r = ff_thread_get_buffer(s->avctx, &pic->f);
284 r = avcodec_default_get_buffer(s->avctx, &pic->f);
286 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
287 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
288 r, pic->f.type, pic->f.data[0]);
289 av_freep(&pic->f.hwaccel_picture_private);
293 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
294 s->uvlinesize != pic->f.linesize[1])) {
295 av_log(s->avctx, AV_LOG_ERROR,
296 "get_buffer() failed (stride changed)\n");
297 free_frame_buffer(s, pic);
301 if (pic->f.linesize[1] != pic->f.linesize[2]) {
302 av_log(s->avctx, AV_LOG_ERROR,
303 "get_buffer() failed (uv stride mismatch)\n");
304 free_frame_buffer(s, pic);
308 if (!s->edge_emu_buffer &&
309 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
310 av_log(s->avctx, AV_LOG_ERROR,
311 "get_buffer() failed to allocate context scratch buffers.\n");
312 free_frame_buffer(s, pic);
320 * Allocate a Picture.
321 * The pixels are allocated/set by calling get_buffer() if shared = 0
323 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
325 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
327 // the + 1 is needed so memset(,,stride*height) does not sig11
329 const int mb_array_size = s->mb_stride * s->mb_height;
330 const int b8_array_size = s->b8_stride * s->mb_height * 2;
331 const int b4_array_size = s->b4_stride * s->mb_height * 4;
336 assert(pic->f.data[0]);
337 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
338 pic->f.type = FF_BUFFER_TYPE_SHARED;
340 assert(!pic->f.data[0]);
342 if (alloc_frame_buffer(s, pic) < 0)
345 s->linesize = pic->f.linesize[0];
346 s->uvlinesize = pic->f.linesize[1];
349 if (pic->f.qscale_table == NULL) {
351 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
352 mb_array_size * sizeof(int16_t), fail)
353 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
354 mb_array_size * sizeof(int16_t), fail)
355 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
356 mb_array_size * sizeof(int8_t ), fail)
359 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
360 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
361 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
362 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
364 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
365 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
367 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
368 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
369 if (s->out_format == FMT_H264) {
370 for (i = 0; i < 2; i++) {
371 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
372 2 * (b4_array_size + 4) * sizeof(int16_t),
374 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
375 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
376 4 * mb_array_size * sizeof(uint8_t), fail)
378 pic->f.motion_subsample_log2 = 2;
379 } else if (s->out_format == FMT_H263 || s->encoding ||
380 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
381 for (i = 0; i < 2; i++) {
382 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
383 2 * (b8_array_size + 4) * sizeof(int16_t),
385 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
386 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
387 4 * mb_array_size * sizeof(uint8_t), fail)
389 pic->f.motion_subsample_log2 = 3;
391 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
392 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
393 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
395 pic->f.qstride = s->mb_stride;
396 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
397 1 * sizeof(AVPanScan), fail)
403 fail: // for the FF_ALLOCZ_OR_GOTO macro
405 free_frame_buffer(s, pic);
410 * Deallocate a picture.
412 static void free_picture(MpegEncContext *s, Picture *pic)
416 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
417 free_frame_buffer(s, pic);
420 av_freep(&pic->mb_var);
421 av_freep(&pic->mc_mb_var);
422 av_freep(&pic->mb_mean);
423 av_freep(&pic->f.mbskip_table);
424 av_freep(&pic->qscale_table_base);
425 pic->f.qscale_table = NULL;
426 av_freep(&pic->mb_type_base);
427 pic->f.mb_type = NULL;
428 av_freep(&pic->f.dct_coeff);
429 av_freep(&pic->f.pan_scan);
430 pic->f.mb_type = NULL;
431 for (i = 0; i < 2; i++) {
432 av_freep(&pic->motion_val_base[i]);
433 av_freep(&pic->f.ref_index[i]);
434 pic->f.motion_val[i] = NULL;
437 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
438 for (i = 0; i < 4; i++) {
440 pic->f.data[i] = NULL;
446 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
448 int y_size = s->b8_stride * (2 * s->mb_height + 1);
449 int c_size = s->mb_stride * (s->mb_height + 1);
450 int yc_size = y_size + 2 * c_size;
458 s->obmc_scratchpad = NULL;
461 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
462 ME_MAP_SIZE * sizeof(uint32_t), fail)
463 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
464 ME_MAP_SIZE * sizeof(uint32_t), fail)
465 if (s->avctx->noise_reduction) {
466 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
467 2 * 64 * sizeof(int), fail)
470 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
471 s->block = s->blocks[0];
473 for (i = 0; i < 12; i++) {
474 s->pblocks[i] = &s->block[i];
477 if (s->out_format == FMT_H263) {
479 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
480 yc_size * sizeof(int16_t) * 16, fail);
481 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
482 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
483 s->ac_val[2] = s->ac_val[1] + c_size;
488 return -1; // free() through ff_MPV_common_end()
491 static void free_duplicate_context(MpegEncContext *s)
496 av_freep(&s->edge_emu_buffer);
497 av_freep(&s->me.scratchpad);
501 s->obmc_scratchpad = NULL;
503 av_freep(&s->dct_error_sum);
504 av_freep(&s->me.map);
505 av_freep(&s->me.score_map);
506 av_freep(&s->blocks);
507 av_freep(&s->ac_val_base);
511 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
513 #define COPY(a) bak->a = src->a
514 COPY(edge_emu_buffer);
519 COPY(obmc_scratchpad);
526 COPY(me.map_generation);
538 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
542 // FIXME copy only needed parts
544 backup_duplicate_context(&bak, dst);
545 memcpy(dst, src, sizeof(MpegEncContext));
546 backup_duplicate_context(dst, &bak);
547 for (i = 0; i < 12; i++) {
548 dst->pblocks[i] = &dst->block[i];
550 if (!dst->edge_emu_buffer &&
551 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
552 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
553 "scratch buffers.\n");
556 // STOP_TIMER("update_duplicate_context")
557 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
561 int ff_mpeg_update_thread_context(AVCodecContext *dst,
562 const AVCodecContext *src)
565 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
567 if (dst == src || !s1->context_initialized)
570 // FIXME can parameters change on I-frames?
571 // in that case dst may need a reinit
572 if (!s->context_initialized) {
573 memcpy(s, s1, sizeof(MpegEncContext));
576 s->picture_range_start += MAX_PICTURE_COUNT;
577 s->picture_range_end += MAX_PICTURE_COUNT;
578 s->bitstream_buffer = NULL;
579 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
581 ff_MPV_common_init(s);
584 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
586 s->context_reinit = 0;
587 s->height = s1->height;
588 s->width = s1->width;
589 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
593 s->avctx->coded_height = s1->avctx->coded_height;
594 s->avctx->coded_width = s1->avctx->coded_width;
595 s->avctx->width = s1->avctx->width;
596 s->avctx->height = s1->avctx->height;
598 s->coded_picture_number = s1->coded_picture_number;
599 s->picture_number = s1->picture_number;
600 s->input_picture_number = s1->input_picture_number;
602 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
603 memcpy(&s->last_picture, &s1->last_picture,
604 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
606 // reset s->picture[].f.extended_data to s->picture[].f.data
607 for (i = 0; i < s->picture_count; i++)
608 s->picture[i].f.extended_data = s->picture[i].f.data;
610 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
611 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
612 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
614 // Error/bug resilience
615 s->next_p_frame_damaged = s1->next_p_frame_damaged;
616 s->workaround_bugs = s1->workaround_bugs;
619 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
620 (char *) &s1->shape - (char *) &s1->time_increment_bits);
623 s->max_b_frames = s1->max_b_frames;
624 s->low_delay = s1->low_delay;
625 s->droppable = s1->droppable;
627 // DivX handling (doesn't work)
628 s->divx_packed = s1->divx_packed;
630 if (s1->bitstream_buffer) {
631 if (s1->bitstream_buffer_size +
632 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
633 av_fast_malloc(&s->bitstream_buffer,
634 &s->allocated_bitstream_buffer_size,
635 s1->allocated_bitstream_buffer_size);
636 s->bitstream_buffer_size = s1->bitstream_buffer_size;
637 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
638 s1->bitstream_buffer_size);
639 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
640 FF_INPUT_BUFFER_PADDING_SIZE);
643 // linesize dependend scratch buffer allocation
644 if (!s->edge_emu_buffer)
646 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
647 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
648 "scratch buffers.\n");
649 return AVERROR(ENOMEM);
652 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
653 "be allocated due to unknown size.\n");
657 // MPEG2/interlacing info
658 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
659 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
661 if (!s1->first_field) {
662 s->last_pict_type = s1->pict_type;
663 if (s1->current_picture_ptr)
664 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
666 if (s1->pict_type != AV_PICTURE_TYPE_B) {
667 s->last_non_b_pict_type = s1->pict_type;
675 * Set the given MpegEncContext to common defaults
676 * (same for encoding and decoding).
677 * The changed fields will not depend upon the
678 * prior state of the MpegEncContext.
680 void ff_MPV_common_defaults(MpegEncContext *s)
682 s->y_dc_scale_table =
683 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
684 s->chroma_qscale_table = ff_default_chroma_qscale_table;
685 s->progressive_frame = 1;
686 s->progressive_sequence = 1;
687 s->picture_structure = PICT_FRAME;
689 s->coded_picture_number = 0;
690 s->picture_number = 0;
691 s->input_picture_number = 0;
693 s->picture_in_gop_number = 0;
698 s->picture_range_start = 0;
699 s->picture_range_end = MAX_PICTURE_COUNT;
701 s->slice_context_count = 1;
705 * Set the given MpegEncContext to defaults for decoding.
706 * the changed fields will not depend upon
707 * the prior state of the MpegEncContext.
709 void ff_MPV_decode_defaults(MpegEncContext *s)
711 ff_MPV_common_defaults(s);
715 * Initialize and allocates MpegEncContext fields dependent on the resolution.
717 static int init_context_frame(MpegEncContext *s)
719 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
721 s->mb_width = (s->width + 15) / 16;
722 s->mb_stride = s->mb_width + 1;
723 s->b8_stride = s->mb_width * 2 + 1;
724 s->b4_stride = s->mb_width * 4 + 1;
725 mb_array_size = s->mb_height * s->mb_stride;
726 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
728 /* set default edge pos, will be overriden
729 * in decode_header if needed */
730 s->h_edge_pos = s->mb_width * 16;
731 s->v_edge_pos = s->mb_height * 16;
733 s->mb_num = s->mb_width * s->mb_height;
738 s->block_wrap[3] = s->b8_stride;
740 s->block_wrap[5] = s->mb_stride;
742 y_size = s->b8_stride * (2 * s->mb_height + 1);
743 c_size = s->mb_stride * (s->mb_height + 1);
744 yc_size = y_size + 2 * c_size;
746 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
747 fail); // error ressilience code looks cleaner with this
748 for (y = 0; y < s->mb_height; y++)
749 for (x = 0; x < s->mb_width; x++)
750 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
752 s->mb_index2xy[s->mb_height * s->mb_width] =
753 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
756 /* Allocate MV tables */
757 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
758 mv_table_size * 2 * sizeof(int16_t), fail);
759 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
760 mv_table_size * 2 * sizeof(int16_t), fail);
761 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
762 mv_table_size * 2 * sizeof(int16_t), fail);
763 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
764 mv_table_size * 2 * sizeof(int16_t), fail);
765 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
766 mv_table_size * 2 * sizeof(int16_t), fail);
767 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
768 mv_table_size * 2 * sizeof(int16_t), fail);
769 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
770 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
771 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
772 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
774 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
776 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
778 /* Allocate MB type table */
779 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
780 sizeof(uint16_t), fail); // needed for encoding
782 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
785 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
786 mb_array_size * sizeof(float), fail);
787 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
788 mb_array_size * sizeof(float), fail);
792 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
793 mb_array_size * sizeof(uint8_t), fail);
794 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
795 mb_array_size * sizeof(uint8_t), fail);
797 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
798 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
799 /* interlaced direct mode decoding tables */
800 for (i = 0; i < 2; i++) {
802 for (j = 0; j < 2; j++) {
803 for (k = 0; k < 2; k++) {
804 FF_ALLOCZ_OR_GOTO(s->avctx,
805 s->b_field_mv_table_base[i][j][k],
806 mv_table_size * 2 * sizeof(int16_t),
808 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
811 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
812 mb_array_size * 2 * sizeof(uint8_t), fail);
813 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
814 mv_table_size * 2 * sizeof(int16_t), fail);
815 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
818 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
819 mb_array_size * 2 * sizeof(uint8_t), fail);
822 if (s->out_format == FMT_H263) {
824 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
825 s->coded_block = s->coded_block_base + s->b8_stride + 1;
827 /* cbp, ac_pred, pred_dir */
828 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
829 mb_array_size * sizeof(uint8_t), fail);
830 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
831 mb_array_size * sizeof(uint8_t), fail);
834 if (s->h263_pred || s->h263_plus || !s->encoding) {
836 // MN: we need these for error resilience of intra-frames
837 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
838 yc_size * sizeof(int16_t), fail);
839 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
840 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
841 s->dc_val[2] = s->dc_val[1] + c_size;
842 for (i = 0; i < yc_size; i++)
843 s->dc_val_base[i] = 1024;
846 /* which mb is a intra block */
847 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
848 memset(s->mbintra_table, 1, mb_array_size);
850 /* init macroblock skip table */
851 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
852 // Note the + 1 is for a quicker mpeg4 slice_end detection
854 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
855 s->avctx->debug_mv) {
856 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
857 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
858 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
859 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
860 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
861 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
866 return AVERROR(ENOMEM);
870 * init common structure for both encoder and decoder.
871 * this assumes that some variables like width/height are already set
873 av_cold int ff_MPV_common_init(MpegEncContext *s)
876 int nb_slices = (HAVE_THREADS &&
877 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
878 s->avctx->thread_count : 1;
880 if (s->encoding && s->avctx->slices)
881 nb_slices = s->avctx->slices;
883 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
884 s->mb_height = (s->height + 31) / 32 * 2;
885 else if (s->codec_id != AV_CODEC_ID_H264)
886 s->mb_height = (s->height + 15) / 16;
888 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
889 av_log(s->avctx, AV_LOG_ERROR,
890 "decoding to AV_PIX_FMT_NONE is not supported.\n");
894 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
897 max_slices = FFMIN(MAX_THREADS, s->mb_height);
899 max_slices = MAX_THREADS;
900 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
901 " reducing to %d\n", nb_slices, max_slices);
902 nb_slices = max_slices;
905 if ((s->width || s->height) &&
906 av_image_check_size(s->width, s->height, 0, s->avctx))
909 ff_dct_common_init(s);
911 s->flags = s->avctx->flags;
912 s->flags2 = s->avctx->flags2;
914 if (s->width && s->height) {
915 /* set chroma shifts */
916 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
920 /* convert fourcc to upper case */
921 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
923 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
925 s->avctx->coded_frame = &s->current_picture.f;
928 if (s->msmpeg4_version) {
929 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
930 2 * 2 * (MAX_LEVEL + 1) *
931 (MAX_RUN + 1) * 2 * sizeof(int), fail);
933 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
935 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
936 64 * 32 * sizeof(int), fail);
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
938 64 * 32 * sizeof(int), fail);
939 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
940 64 * 32 * 2 * sizeof(uint16_t), fail);
941 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
942 64 * 32 * 2 * sizeof(uint16_t), fail);
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
944 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
946 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
948 if (s->avctx->noise_reduction) {
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
950 2 * 64 * sizeof(uint16_t), fail);
955 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
956 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
957 s->picture_count * sizeof(Picture), fail);
958 for (i = 0; i < s->picture_count; i++) {
959 avcodec_get_frame_defaults(&s->picture[i].f);
962 if (s->width && s->height) {
963 if (init_context_frame(s))
966 s->parse_context.state = -1;
969 s->context_initialized = 1;
970 s->thread_context[0] = s;
972 if (s->width && s->height) {
974 for (i = 1; i < nb_slices; i++) {
975 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
976 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
979 for (i = 0; i < nb_slices; i++) {
980 if (init_duplicate_context(s->thread_context[i], s) < 0)
982 s->thread_context[i]->start_mb_y =
983 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
984 s->thread_context[i]->end_mb_y =
985 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
988 if (init_duplicate_context(s, s) < 0)
991 s->end_mb_y = s->mb_height;
993 s->slice_context_count = nb_slices;
998 ff_MPV_common_end(s);
1003 * Frees and resets MpegEncContext fields depending on the resolution.
1004 * Is used during resolution changes to avoid a full reinitialization of the
1007 static int free_context_frame(MpegEncContext *s)
1011 av_freep(&s->mb_type);
1012 av_freep(&s->p_mv_table_base);
1013 av_freep(&s->b_forw_mv_table_base);
1014 av_freep(&s->b_back_mv_table_base);
1015 av_freep(&s->b_bidir_forw_mv_table_base);
1016 av_freep(&s->b_bidir_back_mv_table_base);
1017 av_freep(&s->b_direct_mv_table_base);
1018 s->p_mv_table = NULL;
1019 s->b_forw_mv_table = NULL;
1020 s->b_back_mv_table = NULL;
1021 s->b_bidir_forw_mv_table = NULL;
1022 s->b_bidir_back_mv_table = NULL;
1023 s->b_direct_mv_table = NULL;
1024 for (i = 0; i < 2; i++) {
1025 for (j = 0; j < 2; j++) {
1026 for (k = 0; k < 2; k++) {
1027 av_freep(&s->b_field_mv_table_base[i][j][k]);
1028 s->b_field_mv_table[i][j][k] = NULL;
1030 av_freep(&s->b_field_select_table[i][j]);
1031 av_freep(&s->p_field_mv_table_base[i][j]);
1032 s->p_field_mv_table[i][j] = NULL;
1034 av_freep(&s->p_field_select_table[i]);
1037 av_freep(&s->dc_val_base);
1038 av_freep(&s->coded_block_base);
1039 av_freep(&s->mbintra_table);
1040 av_freep(&s->cbp_table);
1041 av_freep(&s->pred_dir_table);
1043 av_freep(&s->mbskip_table);
1045 av_freep(&s->error_status_table);
1046 av_freep(&s->er_temp_buffer);
1047 av_freep(&s->mb_index2xy);
1048 av_freep(&s->lambda_table);
1049 av_freep(&s->cplx_tab);
1050 av_freep(&s->bits_tab);
1052 s->linesize = s->uvlinesize = 0;
1054 for (i = 0; i < 3; i++)
1055 av_freep(&s->visualization_buffer[i]);
1060 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1064 if (s->slice_context_count > 1) {
1065 for (i = 0; i < s->slice_context_count; i++) {
1066 free_duplicate_context(s->thread_context[i]);
1068 for (i = 1; i < s->slice_context_count; i++) {
1069 av_freep(&s->thread_context[i]);
1072 free_duplicate_context(s);
1074 free_context_frame(s);
1077 for (i = 0; i < s->picture_count; i++) {
1078 s->picture[i].needs_realloc = 1;
1081 s->last_picture_ptr =
1082 s->next_picture_ptr =
1083 s->current_picture_ptr = NULL;
1086 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1087 s->mb_height = (s->height + 31) / 32 * 2;
1088 else if (s->codec_id != AV_CODEC_ID_H264)
1089 s->mb_height = (s->height + 15) / 16;
1091 if ((s->width || s->height) &&
1092 av_image_check_size(s->width, s->height, 0, s->avctx))
1093 return AVERROR_INVALIDDATA;
1095 if ((err = init_context_frame(s)))
1098 s->thread_context[0] = s;
1100 if (s->width && s->height) {
1101 int nb_slices = s->slice_context_count;
1102 if (nb_slices > 1) {
1103 for (i = 1; i < nb_slices; i++) {
1104 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1105 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1108 for (i = 0; i < nb_slices; i++) {
1109 if (init_duplicate_context(s->thread_context[i], s) < 0)
1111 s->thread_context[i]->start_mb_y =
1112 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1113 s->thread_context[i]->end_mb_y =
1114 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1117 if (init_duplicate_context(s, s) < 0)
1120 s->end_mb_y = s->mb_height;
1122 s->slice_context_count = nb_slices;
1127 ff_MPV_common_end(s);
1131 /* init common structure for both encoder and decoder */
1132 void ff_MPV_common_end(MpegEncContext *s)
1136 if (s->slice_context_count > 1) {
1137 for (i = 0; i < s->slice_context_count; i++) {
1138 free_duplicate_context(s->thread_context[i]);
1140 for (i = 1; i < s->slice_context_count; i++) {
1141 av_freep(&s->thread_context[i]);
1143 s->slice_context_count = 1;
1144 } else free_duplicate_context(s);
1146 av_freep(&s->parse_context.buffer);
1147 s->parse_context.buffer_size = 0;
1149 av_freep(&s->bitstream_buffer);
1150 s->allocated_bitstream_buffer_size = 0;
1152 av_freep(&s->avctx->stats_out);
1153 av_freep(&s->ac_stats);
1155 av_freep(&s->q_intra_matrix);
1156 av_freep(&s->q_inter_matrix);
1157 av_freep(&s->q_intra_matrix16);
1158 av_freep(&s->q_inter_matrix16);
1159 av_freep(&s->input_picture);
1160 av_freep(&s->reordered_input_picture);
1161 av_freep(&s->dct_offset);
1163 if (s->picture && !s->avctx->internal->is_copy) {
1164 for (i = 0; i < s->picture_count; i++) {
1165 free_picture(s, &s->picture[i]);
1168 av_freep(&s->picture);
1170 free_context_frame(s);
1172 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1173 avcodec_default_free_buffers(s->avctx);
1175 s->context_initialized = 0;
1176 s->last_picture_ptr =
1177 s->next_picture_ptr =
1178 s->current_picture_ptr = NULL;
1179 s->linesize = s->uvlinesize = 0;
1182 void ff_init_rl(RLTable *rl,
1183 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1185 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1186 uint8_t index_run[MAX_RUN + 1];
1187 int last, run, level, start, end, i;
1189 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1190 if (static_store && rl->max_level[0])
1193 /* compute max_level[], max_run[] and index_run[] */
1194 for (last = 0; last < 2; last++) {
1203 memset(max_level, 0, MAX_RUN + 1);
1204 memset(max_run, 0, MAX_LEVEL + 1);
1205 memset(index_run, rl->n, MAX_RUN + 1);
1206 for (i = start; i < end; i++) {
1207 run = rl->table_run[i];
1208 level = rl->table_level[i];
1209 if (index_run[run] == rl->n)
1211 if (level > max_level[run])
1212 max_level[run] = level;
1213 if (run > max_run[level])
1214 max_run[level] = run;
1217 rl->max_level[last] = static_store[last];
1219 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1220 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1222 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1224 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1225 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1227 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1229 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1230 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1234 void ff_init_vlc_rl(RLTable *rl)
1238 for (q = 0; q < 32; q++) {
1240 int qadd = (q - 1) | 1;
1246 for (i = 0; i < rl->vlc.table_size; i++) {
1247 int code = rl->vlc.table[i][0];
1248 int len = rl->vlc.table[i][1];
1251 if (len == 0) { // illegal code
1254 } else if (len < 0) { // more bits needed
1258 if (code == rl->n) { // esc
1262 run = rl->table_run[code] + 1;
1263 level = rl->table_level[code] * qmul + qadd;
1264 if (code >= rl->last) run += 192;
1267 rl->rl_vlc[q][i].len = len;
1268 rl->rl_vlc[q][i].level = level;
1269 rl->rl_vlc[q][i].run = run;
1274 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1278 /* release non reference frames */
1279 for (i = 0; i < s->picture_count; i++) {
1280 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1281 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1282 (remove_current || &s->picture[i] != s->current_picture_ptr)
1283 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1284 free_frame_buffer(s, &s->picture[i]);
1289 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1291 if (pic->f.data[0] == NULL)
1293 if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
1294 if (!pic->owner2 || pic->owner2 == s)
1299 static int find_unused_picture(MpegEncContext *s, int shared)
1304 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1305 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1309 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1310 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1313 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1314 if (pic_is_unused(s, &s->picture[i]))
1319 return AVERROR_INVALIDDATA;
1322 int ff_find_unused_picture(MpegEncContext *s, int shared)
1324 int ret = find_unused_picture(s, shared);
1326 if (ret >= 0 && ret < s->picture_range_end) {
1327 if (s->picture[ret].needs_realloc) {
1328 s->picture[ret].needs_realloc = 0;
1329 free_picture(s, &s->picture[ret]);
1330 avcodec_get_frame_defaults(&s->picture[ret].f);
1336 static void update_noise_reduction(MpegEncContext *s)
1340 for (intra = 0; intra < 2; intra++) {
1341 if (s->dct_count[intra] > (1 << 16)) {
1342 for (i = 0; i < 64; i++) {
1343 s->dct_error_sum[intra][i] >>= 1;
1345 s->dct_count[intra] >>= 1;
1348 for (i = 0; i < 64; i++) {
1349 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1350 s->dct_count[intra] +
1351 s->dct_error_sum[intra][i] / 2) /
1352 (s->dct_error_sum[intra][i] + 1);
1358 * generic function for encode/decode called after coding/decoding
1359 * the header and before a frame is coded/decoded.
1361 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1367 /* mark & release old frames */
1368 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1369 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1370 s->last_picture_ptr != s->next_picture_ptr &&
1371 s->last_picture_ptr->f.data[0]) {
1372 if (s->last_picture_ptr->owner2 == s)
1373 free_frame_buffer(s, s->last_picture_ptr);
1376 /* release forgotten pictures */
1377 /* if (mpeg124/h263) */
1379 for (i = 0; i < s->picture_count; i++) {
1380 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1381 &s->picture[i] != s->last_picture_ptr &&
1382 &s->picture[i] != s->next_picture_ptr &&
1383 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1384 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1385 av_log(avctx, AV_LOG_ERROR,
1386 "releasing zombie picture\n");
1387 free_frame_buffer(s, &s->picture[i]);
1394 ff_release_unused_pictures(s, 1);
1396 if (s->current_picture_ptr &&
1397 s->current_picture_ptr->f.data[0] == NULL) {
1398 // we already have a unused image
1399 // (maybe it was set before reading the header)
1400 pic = s->current_picture_ptr;
1402 i = ff_find_unused_picture(s, 0);
1404 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1407 pic = &s->picture[i];
1410 pic->f.reference = 0;
1411 if (!s->droppable) {
1412 if (s->codec_id == AV_CODEC_ID_H264)
1413 pic->f.reference = s->picture_structure;
1414 else if (s->pict_type != AV_PICTURE_TYPE_B)
1415 pic->f.reference = 3;
1418 pic->f.coded_picture_number = s->coded_picture_number++;
1420 if (ff_alloc_picture(s, pic, 0) < 0)
1423 s->current_picture_ptr = pic;
1424 // FIXME use only the vars from current_pic
1425 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1426 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1427 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1428 if (s->picture_structure != PICT_FRAME)
1429 s->current_picture_ptr->f.top_field_first =
1430 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1432 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1433 !s->progressive_sequence;
1434 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1437 s->current_picture_ptr->f.pict_type = s->pict_type;
1438 // if (s->flags && CODEC_FLAG_QSCALE)
1439 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1440 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1442 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1444 if (s->pict_type != AV_PICTURE_TYPE_B) {
1445 s->last_picture_ptr = s->next_picture_ptr;
1447 s->next_picture_ptr = s->current_picture_ptr;
1449 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1450 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1451 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1452 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1453 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1454 s->pict_type, s->droppable);
1456 if (s->codec_id != AV_CODEC_ID_H264) {
1457 if ((s->last_picture_ptr == NULL ||
1458 s->last_picture_ptr->f.data[0] == NULL) &&
1459 (s->pict_type != AV_PICTURE_TYPE_I ||
1460 s->picture_structure != PICT_FRAME)) {
1461 if (s->pict_type != AV_PICTURE_TYPE_I)
1462 av_log(avctx, AV_LOG_ERROR,
1463 "warning: first frame is no keyframe\n");
1464 else if (s->picture_structure != PICT_FRAME)
1465 av_log(avctx, AV_LOG_INFO,
1466 "allocate dummy last picture for field based first keyframe\n");
1468 /* Allocate a dummy frame */
1469 i = ff_find_unused_picture(s, 0);
1471 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1474 s->last_picture_ptr = &s->picture[i];
1475 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1476 s->last_picture_ptr = NULL;
1479 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1480 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1481 s->last_picture_ptr->f.reference = 3;
1483 if ((s->next_picture_ptr == NULL ||
1484 s->next_picture_ptr->f.data[0] == NULL) &&
1485 s->pict_type == AV_PICTURE_TYPE_B) {
1486 /* Allocate a dummy frame */
1487 i = ff_find_unused_picture(s, 0);
1489 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1492 s->next_picture_ptr = &s->picture[i];
1493 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1494 s->next_picture_ptr = NULL;
1497 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1498 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1499 s->next_picture_ptr->f.reference = 3;
1503 if (s->last_picture_ptr)
1504 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1505 if (s->next_picture_ptr)
1506 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1508 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1509 if (s->next_picture_ptr)
1510 s->next_picture_ptr->owner2 = s;
1511 if (s->last_picture_ptr)
1512 s->last_picture_ptr->owner2 = s;
1515 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1516 s->last_picture_ptr->f.data[0]));
1518 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1520 for (i = 0; i < 4; i++) {
1521 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1522 s->current_picture.f.data[i] +=
1523 s->current_picture.f.linesize[i];
1525 s->current_picture.f.linesize[i] *= 2;
1526 s->last_picture.f.linesize[i] *= 2;
1527 s->next_picture.f.linesize[i] *= 2;
1531 s->err_recognition = avctx->err_recognition;
1533 /* set dequantizer, we can't do it during init as
1534 * it might change for mpeg4 and we can't do it in the header
1535 * decode as init is not called for mpeg4 there yet */
1536 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1537 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1538 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1539 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1540 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1541 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1543 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1544 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1547 if (s->dct_error_sum) {
1548 assert(s->avctx->noise_reduction && s->encoding);
1549 update_noise_reduction(s);
1552 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1553 return ff_xvmc_field_start(s, avctx);
1558 /* generic function for encode/decode called after a
1559 * frame has been coded/decoded. */
1560 void ff_MPV_frame_end(MpegEncContext *s)
1563 /* redraw edges for the frame if decoding didn't complete */
1564 // just to make sure that all data is rendered.
1565 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1566 ff_xvmc_field_end(s);
1567 } else if ((s->error_count || s->encoding) &&
1568 !s->avctx->hwaccel &&
1569 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1570 s->unrestricted_mv &&
1571 s->current_picture.f.reference &&
1573 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1574 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1575 int hshift = desc->log2_chroma_w;
1576 int vshift = desc->log2_chroma_h;
1577 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1578 s->h_edge_pos, s->v_edge_pos,
1579 EDGE_WIDTH, EDGE_WIDTH,
1580 EDGE_TOP | EDGE_BOTTOM);
1581 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1582 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1583 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1584 EDGE_TOP | EDGE_BOTTOM);
1585 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1586 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1587 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1588 EDGE_TOP | EDGE_BOTTOM);
1593 s->last_pict_type = s->pict_type;
1594 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1595 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1596 s->last_non_b_pict_type = s->pict_type;
1599 /* copy back current_picture variables */
1600 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1601 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1602 s->picture[i] = s->current_picture;
1606 assert(i < MAX_PICTURE_COUNT);
1610 /* release non-reference frames */
1611 for (i = 0; i < s->picture_count; i++) {
1612 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1613 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1614 free_frame_buffer(s, &s->picture[i]);
1618 // clear copies, to avoid confusion
1620 memset(&s->last_picture, 0, sizeof(Picture));
1621 memset(&s->next_picture, 0, sizeof(Picture));
1622 memset(&s->current_picture, 0, sizeof(Picture));
1624 s->avctx->coded_frame = &s->current_picture_ptr->f;
1626 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1627 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1632 * Draw a line from (ex, ey) -> (sx, sy).
1633 * @param w width of the image
1634 * @param h height of the image
1635 * @param stride stride/linesize of the image
1636 * @param color color of the arrow
1638 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1639 int w, int h, int stride, int color)
1643 sx = av_clip(sx, 0, w - 1);
1644 sy = av_clip(sy, 0, h - 1);
1645 ex = av_clip(ex, 0, w - 1);
1646 ey = av_clip(ey, 0, h - 1);
1648 buf[sy * stride + sx] += color;
1650 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1652 FFSWAP(int, sx, ex);
1653 FFSWAP(int, sy, ey);
1655 buf += sx + sy * stride;
1657 f = ((ey - sy) << 16) / ex;
1658 for (x = 0; x = ex; x++) {
1660 fr = (x * f) & 0xFFFF;
1661 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1662 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1666 FFSWAP(int, sx, ex);
1667 FFSWAP(int, sy, ey);
1669 buf += sx + sy * stride;
1672 f = ((ex - sx) << 16) / ey;
1675 for (y = 0; y = ey; y++) {
1677 fr = (y * f) & 0xFFFF;
1678 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1679 buf[y * stride + x + 1] += (color * fr ) >> 16;
1685 * Draw an arrow from (ex, ey) -> (sx, sy).
1686 * @param w width of the image
1687 * @param h height of the image
1688 * @param stride stride/linesize of the image
1689 * @param color color of the arrow
1691 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1692 int ey, int w, int h, int stride, int color)
1696 sx = av_clip(sx, -100, w + 100);
1697 sy = av_clip(sy, -100, h + 100);
1698 ex = av_clip(ex, -100, w + 100);
1699 ey = av_clip(ey, -100, h + 100);
1704 if (dx * dx + dy * dy > 3 * 3) {
1707 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1709 // FIXME subpixel accuracy
1710 rx = ROUNDED_DIV(rx * 3 << 4, length);
1711 ry = ROUNDED_DIV(ry * 3 << 4, length);
1713 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1714 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1716 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1720 * Print debugging info for the given picture.
1722 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1724 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1727 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1730 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1731 switch (pict->pict_type) {
1732 case AV_PICTURE_TYPE_I:
1733 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1735 case AV_PICTURE_TYPE_P:
1736 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1738 case AV_PICTURE_TYPE_B:
1739 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1741 case AV_PICTURE_TYPE_S:
1742 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1744 case AV_PICTURE_TYPE_SI:
1745 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1747 case AV_PICTURE_TYPE_SP:
1748 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1751 for (y = 0; y < s->mb_height; y++) {
1752 for (x = 0; x < s->mb_width; x++) {
1753 if (s->avctx->debug & FF_DEBUG_SKIP) {
1754 int count = s->mbskip_table[x + y * s->mb_stride];
1757 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1759 if (s->avctx->debug & FF_DEBUG_QP) {
1760 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1761 pict->qscale_table[x + y * s->mb_stride]);
1763 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1764 int mb_type = pict->mb_type[x + y * s->mb_stride];
1765 // Type & MV direction
1766 if (IS_PCM(mb_type))
1767 av_log(s->avctx, AV_LOG_DEBUG, "P");
1768 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1769 av_log(s->avctx, AV_LOG_DEBUG, "A");
1770 else if (IS_INTRA4x4(mb_type))
1771 av_log(s->avctx, AV_LOG_DEBUG, "i");
1772 else if (IS_INTRA16x16(mb_type))
1773 av_log(s->avctx, AV_LOG_DEBUG, "I");
1774 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1775 av_log(s->avctx, AV_LOG_DEBUG, "d");
1776 else if (IS_DIRECT(mb_type))
1777 av_log(s->avctx, AV_LOG_DEBUG, "D");
1778 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1779 av_log(s->avctx, AV_LOG_DEBUG, "g");
1780 else if (IS_GMC(mb_type))
1781 av_log(s->avctx, AV_LOG_DEBUG, "G");
1782 else if (IS_SKIP(mb_type))
1783 av_log(s->avctx, AV_LOG_DEBUG, "S");
1784 else if (!USES_LIST(mb_type, 1))
1785 av_log(s->avctx, AV_LOG_DEBUG, ">");
1786 else if (!USES_LIST(mb_type, 0))
1787 av_log(s->avctx, AV_LOG_DEBUG, "<");
1789 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1790 av_log(s->avctx, AV_LOG_DEBUG, "X");
1794 if (IS_8X8(mb_type))
1795 av_log(s->avctx, AV_LOG_DEBUG, "+");
1796 else if (IS_16X8(mb_type))
1797 av_log(s->avctx, AV_LOG_DEBUG, "-");
1798 else if (IS_8X16(mb_type))
1799 av_log(s->avctx, AV_LOG_DEBUG, "|");
1800 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1801 av_log(s->avctx, AV_LOG_DEBUG, " ");
1803 av_log(s->avctx, AV_LOG_DEBUG, "?");
1806 if (IS_INTERLACED(mb_type))
1807 av_log(s->avctx, AV_LOG_DEBUG, "=");
1809 av_log(s->avctx, AV_LOG_DEBUG, " ");
1812 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1816 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1817 (s->avctx->debug_mv)) {
1818 const int shift = 1 + s->quarter_sample;
1822 int h_chroma_shift, v_chroma_shift, block_height;
1823 const int width = s->avctx->width;
1824 const int height = s->avctx->height;
1825 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1826 const int mv_stride = (s->mb_width << mv_sample_log2) +
1827 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1828 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1830 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1831 &h_chroma_shift, &v_chroma_shift);
1832 for (i = 0; i < 3; i++) {
1833 memcpy(s->visualization_buffer[i], pict->data[i],
1834 (i == 0) ? pict->linesize[i] * height:
1835 pict->linesize[i] * height >> v_chroma_shift);
1836 pict->data[i] = s->visualization_buffer[i];
1838 pict->type = FF_BUFFER_TYPE_COPY;
1839 ptr = pict->data[0];
1840 block_height = 16 >> v_chroma_shift;
1842 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1844 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1845 const int mb_index = mb_x + mb_y * s->mb_stride;
1846 if ((s->avctx->debug_mv) && pict->motion_val) {
1848 for (type = 0; type < 3; type++) {
1852 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1853 (pict->pict_type!= AV_PICTURE_TYPE_P))
1858 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1859 (pict->pict_type!= AV_PICTURE_TYPE_B))
1864 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1865 (pict->pict_type!= AV_PICTURE_TYPE_B))
1870 if (!USES_LIST(pict->mb_type[mb_index], direction))
1873 if (IS_8X8(pict->mb_type[mb_index])) {
1875 for (i = 0; i < 4; i++) {
1876 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1877 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1878 int xy = (mb_x * 2 + (i & 1) +
1879 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1880 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1881 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1882 draw_arrow(ptr, sx, sy, mx, my, width,
1883 height, s->linesize, 100);
1885 } else if (IS_16X8(pict->mb_type[mb_index])) {
1887 for (i = 0; i < 2; i++) {
1888 int sx = mb_x * 16 + 8;
1889 int sy = mb_y * 16 + 4 + 8 * i;
1890 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1891 int mx = (pict->motion_val[direction][xy][0] >> shift);
1892 int my = (pict->motion_val[direction][xy][1] >> shift);
1894 if (IS_INTERLACED(pict->mb_type[mb_index]))
1897 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1898 height, s->linesize, 100);
1900 } else if (IS_8X16(pict->mb_type[mb_index])) {
1902 for (i = 0; i < 2; i++) {
1903 int sx = mb_x * 16 + 4 + 8 * i;
1904 int sy = mb_y * 16 + 8;
1905 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1906 int mx = pict->motion_val[direction][xy][0] >> shift;
1907 int my = pict->motion_val[direction][xy][1] >> shift;
1909 if (IS_INTERLACED(pict->mb_type[mb_index]))
1912 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1913 height, s->linesize, 100);
1916 int sx = mb_x * 16 + 8;
1917 int sy = mb_y * 16 + 8;
1918 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1919 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1920 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1921 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1925 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1926 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1927 0x0101010101010101ULL;
1929 for (y = 0; y < block_height; y++) {
1930 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1931 (block_height * mb_y + y) *
1932 pict->linesize[1]) = c;
1933 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1934 (block_height * mb_y + y) *
1935 pict->linesize[2]) = c;
1938 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1940 int mb_type = pict->mb_type[mb_index];
1943 #define COLOR(theta, r) \
1944 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1945 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1949 if (IS_PCM(mb_type)) {
1951 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1952 IS_INTRA16x16(mb_type)) {
1954 } else if (IS_INTRA4x4(mb_type)) {
1956 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1958 } else if (IS_DIRECT(mb_type)) {
1960 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1962 } else if (IS_GMC(mb_type)) {
1964 } else if (IS_SKIP(mb_type)) {
1966 } else if (!USES_LIST(mb_type, 1)) {
1968 } else if (!USES_LIST(mb_type, 0)) {
1971 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1975 u *= 0x0101010101010101ULL;
1976 v *= 0x0101010101010101ULL;
1977 for (y = 0; y < block_height; y++) {
1978 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1979 (block_height * mb_y + y) * pict->linesize[1]) = u;
1980 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1981 (block_height * mb_y + y) * pict->linesize[2]) = v;
1985 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1986 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1987 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1988 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1989 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1991 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1992 for (y = 0; y < 16; y++)
1993 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1994 pict->linesize[0]] ^= 0x80;
1996 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1997 int dm = 1 << (mv_sample_log2 - 2);
1998 for (i = 0; i < 4; i++) {
1999 int sx = mb_x * 16 + 8 * (i & 1);
2000 int sy = mb_y * 16 + 8 * (i >> 1);
2001 int xy = (mb_x * 2 + (i & 1) +
2002 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2004 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
2005 if (mv[0] != mv[dm] ||
2006 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2007 for (y = 0; y < 8; y++)
2008 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2009 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2010 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2011 pict->linesize[0]) ^= 0x8080808080808080ULL;
2015 if (IS_INTERLACED(mb_type) &&
2016 s->codec_id == AV_CODEC_ID_H264) {
2020 s->mbskip_table[mb_index] = 0;
2027 * find the lowest MB row referenced in the MVs
2029 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2031 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2032 int my, off, i, mvs;
2034 if (s->picture_structure != PICT_FRAME || s->mcsel)
2037 switch (s->mv_type) {
2051 for (i = 0; i < mvs; i++) {
2052 my = s->mv[dir][i][1]<<qpel_shift;
2053 my_max = FFMAX(my_max, my);
2054 my_min = FFMIN(my_min, my);
2057 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2059 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2061 return s->mb_height-1;
2064 /* put block[] to dest[] */
2065 static inline void put_dct(MpegEncContext *s,
2066 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2068 s->dct_unquantize_intra(s, block, i, qscale);
2069 s->dsp.idct_put (dest, line_size, block);
2072 /* add block[] to dest[] */
2073 static inline void add_dct(MpegEncContext *s,
2074 DCTELEM *block, int i, uint8_t *dest, int line_size)
2076 if (s->block_last_index[i] >= 0) {
2077 s->dsp.idct_add (dest, line_size, block);
2081 static inline void add_dequant_dct(MpegEncContext *s,
2082 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2084 if (s->block_last_index[i] >= 0) {
2085 s->dct_unquantize_inter(s, block, i, qscale);
2087 s->dsp.idct_add (dest, line_size, block);
2092 * Clean dc, ac, coded_block for the current non-intra MB.
2094 void ff_clean_intra_table_entries(MpegEncContext *s)
2096 int wrap = s->b8_stride;
2097 int xy = s->block_index[0];
2100 s->dc_val[0][xy + 1 ] =
2101 s->dc_val[0][xy + wrap] =
2102 s->dc_val[0][xy + 1 + wrap] = 1024;
2104 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2105 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2106 if (s->msmpeg4_version>=3) {
2107 s->coded_block[xy ] =
2108 s->coded_block[xy + 1 ] =
2109 s->coded_block[xy + wrap] =
2110 s->coded_block[xy + 1 + wrap] = 0;
2113 wrap = s->mb_stride;
2114 xy = s->mb_x + s->mb_y * wrap;
2116 s->dc_val[2][xy] = 1024;
2118 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2119 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2121 s->mbintra_table[xy]= 0;
2124 /* generic function called after a macroblock has been parsed by the
2125 decoder or after it has been encoded by the encoder.
2127 Important variables used:
2128 s->mb_intra : true if intra macroblock
2129 s->mv_dir : motion vector direction
2130 s->mv_type : motion vector type
2131 s->mv : motion vector
2132 s->interlaced_dct : true if interlaced dct used (mpeg2)
2134 static av_always_inline
2135 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2138 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2139 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2140 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2144 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2145 /* save DCT coefficients */
2147 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2148 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2150 for(j=0; j<64; j++){
2151 *dct++ = block[i][s->dsp.idct_permutation[j]];
2152 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2154 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2158 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2160 /* update DC predictors for P macroblocks */
2162 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2163 if(s->mbintra_table[mb_xy])
2164 ff_clean_intra_table_entries(s);
2168 s->last_dc[2] = 128 << s->intra_dc_precision;
2171 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2172 s->mbintra_table[mb_xy]=1;
2174 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2175 uint8_t *dest_y, *dest_cb, *dest_cr;
2176 int dct_linesize, dct_offset;
2177 op_pixels_func (*op_pix)[4];
2178 qpel_mc_func (*op_qpix)[16];
2179 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2180 const int uvlinesize = s->current_picture.f.linesize[1];
2181 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2182 const int block_size = 8;
2184 /* avoid copy if macroblock skipped in last frame too */
2185 /* skip only during decoding as we might trash the buffers during encoding a bit */
2187 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2189 if (s->mb_skipped) {
2191 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2193 } else if(!s->current_picture.f.reference) {
2196 *mbskip_ptr = 0; /* not skipped */
2200 dct_linesize = linesize << s->interlaced_dct;
2201 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2205 dest_cb= s->dest[1];
2206 dest_cr= s->dest[2];
2208 dest_y = s->b_scratchpad;
2209 dest_cb= s->b_scratchpad+16*linesize;
2210 dest_cr= s->b_scratchpad+32*linesize;
2214 /* motion handling */
2215 /* decoding or more than one mb_type (MC was already done otherwise) */
2218 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2219 if (s->mv_dir & MV_DIR_FORWARD) {
2220 ff_thread_await_progress(&s->last_picture_ptr->f,
2221 ff_MPV_lowest_referenced_row(s, 0),
2224 if (s->mv_dir & MV_DIR_BACKWARD) {
2225 ff_thread_await_progress(&s->next_picture_ptr->f,
2226 ff_MPV_lowest_referenced_row(s, 1),
2231 op_qpix= s->me.qpel_put;
2232 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2233 op_pix = s->dsp.put_pixels_tab;
2235 op_pix = s->dsp.put_no_rnd_pixels_tab;
2237 if (s->mv_dir & MV_DIR_FORWARD) {
2238 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2239 op_pix = s->dsp.avg_pixels_tab;
2240 op_qpix= s->me.qpel_avg;
2242 if (s->mv_dir & MV_DIR_BACKWARD) {
2243 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2247 /* skip dequant / idct if we are really late ;) */
2248 if(s->avctx->skip_idct){
2249 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2250 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2251 || s->avctx->skip_idct >= AVDISCARD_ALL)
2255 /* add dct residue */
2256 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2257 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2258 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2259 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2260 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2261 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2263 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2264 if (s->chroma_y_shift){
2265 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2266 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2270 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2271 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2272 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2273 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2276 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2277 add_dct(s, block[0], 0, dest_y , dct_linesize);
2278 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2279 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2280 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2282 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2283 if(s->chroma_y_shift){//Chroma420
2284 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2285 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2288 dct_linesize = uvlinesize << s->interlaced_dct;
2289 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2291 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2292 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2293 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2294 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2295 if(!s->chroma_x_shift){//Chroma444
2296 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2297 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2298 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2299 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2304 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2305 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2308 /* dct only in intra block */
2309 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2310 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2311 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2312 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2313 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2315 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2316 if(s->chroma_y_shift){
2317 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2318 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2322 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2323 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2324 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2325 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2329 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2330 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2331 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2332 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2334 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2335 if(s->chroma_y_shift){
2336 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2337 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2340 dct_linesize = uvlinesize << s->interlaced_dct;
2341 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2343 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2344 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2345 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2346 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2347 if(!s->chroma_x_shift){//Chroma444
2348 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2349 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2350 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2351 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2359 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2360 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2361 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2366 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2368 if(s->out_format == FMT_MPEG1) {
2369 MPV_decode_mb_internal(s, block, 1);
2372 MPV_decode_mb_internal(s, block, 0);
2376 * @param h is the normal height, this will be reduced automatically if needed for the last row
2378 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2379 const int field_pic= s->picture_structure != PICT_FRAME;
2385 if (!s->avctx->hwaccel
2386 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2387 && s->unrestricted_mv
2388 && s->current_picture.f.reference
2390 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2391 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
2392 int sides = 0, edge_h;
2393 int hshift = desc->log2_chroma_w;
2394 int vshift = desc->log2_chroma_h;
2395 if (y==0) sides |= EDGE_TOP;
2396 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2398 edge_h= FFMIN(h, s->v_edge_pos - y);
2400 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2401 s->linesize, s->h_edge_pos, edge_h,
2402 EDGE_WIDTH, EDGE_WIDTH, sides);
2403 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2404 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2405 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2406 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2407 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2408 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2411 h= FFMIN(h, s->avctx->height - y);
2413 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2415 if (s->avctx->draw_horiz_band) {
2417 int offset[AV_NUM_DATA_POINTERS];
2420 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2421 src = &s->current_picture_ptr->f;
2422 else if(s->last_picture_ptr)
2423 src = &s->last_picture_ptr->f;
2427 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2428 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2431 offset[0]= y * s->linesize;
2433 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2434 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2440 s->avctx->draw_horiz_band(s->avctx, src, offset,
2441 y, s->picture_structure, h);
2445 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2446 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2447 const int uvlinesize = s->current_picture.f.linesize[1];
2448 const int mb_size= 4;
2450 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2451 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2452 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2453 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2454 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2455 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2456 //block_index is not used by mpeg2, so it is not affected by chroma_format
2458 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2459 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2460 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2462 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2464 if(s->picture_structure==PICT_FRAME){
2465 s->dest[0] += s->mb_y * linesize << mb_size;
2466 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2467 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2469 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2470 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2471 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2472 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2477 void ff_mpeg_flush(AVCodecContext *avctx){
2479 MpegEncContext *s = avctx->priv_data;
2481 if(s==NULL || s->picture==NULL)
2484 for(i=0; i<s->picture_count; i++){
2485 if (s->picture[i].f.data[0] &&
2486 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2487 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2488 free_frame_buffer(s, &s->picture[i]);
2490 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2492 s->mb_x= s->mb_y= 0;
2494 s->parse_context.state= -1;
2495 s->parse_context.frame_start_found= 0;
2496 s->parse_context.overread= 0;
2497 s->parse_context.overread_index= 0;
2498 s->parse_context.index= 0;
2499 s->parse_context.last_index= 0;
2500 s->bitstream_buffer_size=0;
2504 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2505 DCTELEM *block, int n, int qscale)
2507 int i, level, nCoeffs;
2508 const uint16_t *quant_matrix;
2510 nCoeffs= s->block_last_index[n];
2513 block[0] = block[0] * s->y_dc_scale;
2515 block[0] = block[0] * s->c_dc_scale;
2516 /* XXX: only mpeg1 */
2517 quant_matrix = s->intra_matrix;
2518 for(i=1;i<=nCoeffs;i++) {
2519 int j= s->intra_scantable.permutated[i];
2524 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2525 level = (level - 1) | 1;
2528 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2529 level = (level - 1) | 1;
2536 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2537 DCTELEM *block, int n, int qscale)
2539 int i, level, nCoeffs;
2540 const uint16_t *quant_matrix;
2542 nCoeffs= s->block_last_index[n];
2544 quant_matrix = s->inter_matrix;
2545 for(i=0; i<=nCoeffs; i++) {
2546 int j= s->intra_scantable.permutated[i];
2551 level = (((level << 1) + 1) * qscale *
2552 ((int) (quant_matrix[j]))) >> 4;
2553 level = (level - 1) | 1;
2556 level = (((level << 1) + 1) * qscale *
2557 ((int) (quant_matrix[j]))) >> 4;
2558 level = (level - 1) | 1;
2565 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2566 DCTELEM *block, int n, int qscale)
2568 int i, level, nCoeffs;
2569 const uint16_t *quant_matrix;
2571 if(s->alternate_scan) nCoeffs= 63;
2572 else nCoeffs= s->block_last_index[n];
2575 block[0] = block[0] * s->y_dc_scale;
2577 block[0] = block[0] * s->c_dc_scale;
2578 quant_matrix = s->intra_matrix;
2579 for(i=1;i<=nCoeffs;i++) {
2580 int j= s->intra_scantable.permutated[i];
2585 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2588 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2595 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2596 DCTELEM *block, int n, int qscale)
2598 int i, level, nCoeffs;
2599 const uint16_t *quant_matrix;
2602 if(s->alternate_scan) nCoeffs= 63;
2603 else nCoeffs= s->block_last_index[n];
2606 block[0] = block[0] * s->y_dc_scale;
2608 block[0] = block[0] * s->c_dc_scale;
2609 quant_matrix = s->intra_matrix;
2610 for(i=1;i<=nCoeffs;i++) {
2611 int j= s->intra_scantable.permutated[i];
2616 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2619 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2628 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2629 DCTELEM *block, int n, int qscale)
2631 int i, level, nCoeffs;
2632 const uint16_t *quant_matrix;
2635 if(s->alternate_scan) nCoeffs= 63;
2636 else nCoeffs= s->block_last_index[n];
2638 quant_matrix = s->inter_matrix;
2639 for(i=0; i<=nCoeffs; i++) {
2640 int j= s->intra_scantable.permutated[i];
2645 level = (((level << 1) + 1) * qscale *
2646 ((int) (quant_matrix[j]))) >> 4;
2649 level = (((level << 1) + 1) * qscale *
2650 ((int) (quant_matrix[j]))) >> 4;
2659 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2660 DCTELEM *block, int n, int qscale)
2662 int i, level, qmul, qadd;
2665 assert(s->block_last_index[n]>=0);
2671 block[0] = block[0] * s->y_dc_scale;
2673 block[0] = block[0] * s->c_dc_scale;
2674 qadd = (qscale - 1) | 1;
2681 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2683 for(i=1; i<=nCoeffs; i++) {
2687 level = level * qmul - qadd;
2689 level = level * qmul + qadd;
2696 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2697 DCTELEM *block, int n, int qscale)
2699 int i, level, qmul, qadd;
2702 assert(s->block_last_index[n]>=0);
2704 qadd = (qscale - 1) | 1;
2707 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2709 for(i=0; i<=nCoeffs; i++) {
2713 level = level * qmul - qadd;
2715 level = level * qmul + qadd;
2723 * set qscale and update qscale dependent variables.
2725 void ff_set_qscale(MpegEncContext * s, int qscale)
2729 else if (qscale > 31)
2733 s->chroma_qscale= s->chroma_qscale_table[qscale];
2735 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2736 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2739 void ff_MPV_report_decode_progress(MpegEncContext *s)
2741 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2742 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);