2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
38 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
61 /* enable all paranoid tests for rounding, overflows, etc... */
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
128 const enum AVPixelFormat ff_pixfmt_list_420[] = {
133 const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
135 AV_PIX_FMT_DXVA2_VLD,
138 AV_PIX_FMT_VAAPI_VLD,
150 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
152 int mb_x, int mb_y, int mb_intra, int mb_skipped)
154 MpegEncContext *s = opaque;
157 s->mv_type = mv_type;
158 s->mb_intra = mb_intra;
159 s->mb_skipped = mb_skipped;
162 memcpy(s->mv, mv, sizeof(*mv));
164 ff_init_block_index(s);
165 ff_update_block_index(s);
167 s->dsp.clear_blocks(s->block[0]);
169 s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
170 s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
171 s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
174 ff_MPV_decode_mb(s, s->block);
177 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
179 uint32_t * restrict state)
187 for (i = 0; i < 3; i++) {
188 uint32_t tmp = *state << 8;
189 *state = tmp + *(p++);
190 if (tmp == 0x100 || p == end)
195 if (p[-1] > 1 ) p += 3;
196 else if (p[-2] ) p += 2;
197 else if (p[-3]|(p[-1]-1)) p++;
204 p = FFMIN(p, end) - 4;
210 /* init common dct for both encoder and decoder */
211 av_cold int ff_dct_common_init(MpegEncContext *s)
213 ff_dsputil_init(&s->dsp, s->avctx);
214 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
216 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
217 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
218 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
219 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
220 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
221 if (s->flags & CODEC_FLAG_BITEXACT)
222 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
223 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
226 ff_MPV_common_init_x86(s);
228 ff_MPV_common_init_axp(s);
230 ff_MPV_common_init_arm(s);
232 ff_MPV_common_init_altivec(s);
234 ff_MPV_common_init_bfin(s);
237 /* load & permutate scantables
238 * note: only wmv uses different ones
240 if (s->alternate_scan) {
241 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
242 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
244 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
245 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
247 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
248 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
253 void ff_copy_picture(Picture *dst, Picture *src)
256 dst->f.type = FF_BUFFER_TYPE_COPY;
260 * Release a frame buffer
262 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
264 /* WM Image / Screen codecs allocate internal buffers with different
265 * dimensions / colorspaces; ignore user-defined callbacks for these. */
266 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
267 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
268 s->codec_id != AV_CODEC_ID_MSS2)
269 ff_thread_release_buffer(s->avctx, &pic->f);
271 avcodec_default_release_buffer(s->avctx, &pic->f);
272 av_freep(&pic->f.hwaccel_picture_private);
275 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
277 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
279 // edge emu needs blocksize + filter length - 1
280 // (= 17x17 for halfpel / 21x21 for h264)
281 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
282 // at uvlinesize. It supports only YUV420 so 24x24 is enough
283 // linesize * interlaced * MBsize
284 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
287 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
289 s->me.temp = s->me.scratchpad;
290 s->rd_scratchpad = s->me.scratchpad;
291 s->b_scratchpad = s->me.scratchpad;
292 s->obmc_scratchpad = s->me.scratchpad + 16;
296 av_freep(&s->edge_emu_buffer);
297 return AVERROR(ENOMEM);
301 * Allocate a frame buffer
303 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
307 if (s->avctx->hwaccel) {
308 assert(!pic->f.hwaccel_picture_private);
309 if (s->avctx->hwaccel->priv_data_size) {
310 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
311 if (!pic->f.hwaccel_picture_private) {
312 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
318 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
319 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
320 s->codec_id != AV_CODEC_ID_MSS2)
321 r = ff_thread_get_buffer(s->avctx, &pic->f);
323 r = avcodec_default_get_buffer(s->avctx, &pic->f);
325 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
326 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
327 r, pic->f.type, pic->f.data[0]);
328 av_freep(&pic->f.hwaccel_picture_private);
332 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
333 s->uvlinesize != pic->f.linesize[1])) {
334 av_log(s->avctx, AV_LOG_ERROR,
335 "get_buffer() failed (stride changed)\n");
336 free_frame_buffer(s, pic);
340 if (pic->f.linesize[1] != pic->f.linesize[2]) {
341 av_log(s->avctx, AV_LOG_ERROR,
342 "get_buffer() failed (uv stride mismatch)\n");
343 free_frame_buffer(s, pic);
347 if (!s->edge_emu_buffer &&
348 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
349 av_log(s->avctx, AV_LOG_ERROR,
350 "get_buffer() failed to allocate context scratch buffers.\n");
351 free_frame_buffer(s, pic);
359 * Allocate a Picture.
360 * The pixels are allocated/set by calling get_buffer() if shared = 0
362 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
364 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
366 // the + 1 is needed so memset(,,stride*height) does not sig11
368 const int mb_array_size = s->mb_stride * s->mb_height;
369 const int b8_array_size = s->b8_stride * s->mb_height * 2;
370 const int b4_array_size = s->b4_stride * s->mb_height * 4;
375 assert(pic->f.data[0]);
376 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
377 pic->f.type = FF_BUFFER_TYPE_SHARED;
379 assert(!pic->f.data[0]);
381 if (alloc_frame_buffer(s, pic) < 0)
384 s->linesize = pic->f.linesize[0];
385 s->uvlinesize = pic->f.linesize[1];
388 if (pic->f.qscale_table == NULL) {
390 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
391 mb_array_size * sizeof(int16_t), fail)
392 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
393 mb_array_size * sizeof(int16_t), fail)
394 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
395 mb_array_size * sizeof(int8_t ), fail)
398 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
399 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
400 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
401 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
403 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
404 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
406 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
407 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
408 if (s->out_format == FMT_H264) {
409 for (i = 0; i < 2; i++) {
410 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
411 2 * (b4_array_size + 4) * sizeof(int16_t),
413 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
414 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
415 4 * mb_array_size * sizeof(uint8_t), fail)
417 pic->f.motion_subsample_log2 = 2;
418 } else if (s->out_format == FMT_H263 || s->encoding ||
419 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
420 for (i = 0; i < 2; i++) {
421 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
422 2 * (b8_array_size + 4) * sizeof(int16_t),
424 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
425 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
426 4 * mb_array_size * sizeof(uint8_t), fail)
428 pic->f.motion_subsample_log2 = 3;
430 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
431 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
432 64 * mb_array_size * sizeof(int16_t) * 6, fail)
434 pic->f.qstride = s->mb_stride;
435 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
436 1 * sizeof(AVPanScan), fail)
442 fail: // for the FF_ALLOCZ_OR_GOTO macro
444 free_frame_buffer(s, pic);
449 * Deallocate a picture.
451 static void free_picture(MpegEncContext *s, Picture *pic)
455 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
456 free_frame_buffer(s, pic);
459 av_freep(&pic->mb_var);
460 av_freep(&pic->mc_mb_var);
461 av_freep(&pic->mb_mean);
462 av_freep(&pic->f.mbskip_table);
463 av_freep(&pic->qscale_table_base);
464 pic->f.qscale_table = NULL;
465 av_freep(&pic->mb_type_base);
466 pic->f.mb_type = NULL;
467 av_freep(&pic->f.dct_coeff);
468 av_freep(&pic->f.pan_scan);
469 pic->f.mb_type = NULL;
470 for (i = 0; i < 2; i++) {
471 av_freep(&pic->motion_val_base[i]);
472 av_freep(&pic->f.ref_index[i]);
473 pic->f.motion_val[i] = NULL;
476 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
477 for (i = 0; i < 4; i++) {
479 pic->f.data[i] = NULL;
485 static int init_duplicate_context(MpegEncContext *s)
487 int y_size = s->b8_stride * (2 * s->mb_height + 1);
488 int c_size = s->mb_stride * (s->mb_height + 1);
489 int yc_size = y_size + 2 * c_size;
497 s->obmc_scratchpad = NULL;
500 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
501 ME_MAP_SIZE * sizeof(uint32_t), fail)
502 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
503 ME_MAP_SIZE * sizeof(uint32_t), fail)
504 if (s->avctx->noise_reduction) {
505 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
506 2 * 64 * sizeof(int), fail)
509 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
510 s->block = s->blocks[0];
512 for (i = 0; i < 12; i++) {
513 s->pblocks[i] = &s->block[i];
516 if (s->out_format == FMT_H263) {
518 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
519 yc_size * sizeof(int16_t) * 16, fail);
520 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
521 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
522 s->ac_val[2] = s->ac_val[1] + c_size;
527 return -1; // free() through ff_MPV_common_end()
530 static void free_duplicate_context(MpegEncContext *s)
535 av_freep(&s->edge_emu_buffer);
536 av_freep(&s->me.scratchpad);
540 s->obmc_scratchpad = NULL;
542 av_freep(&s->dct_error_sum);
543 av_freep(&s->me.map);
544 av_freep(&s->me.score_map);
545 av_freep(&s->blocks);
546 av_freep(&s->ac_val_base);
550 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
552 #define COPY(a) bak->a = src->a
553 COPY(edge_emu_buffer);
558 COPY(obmc_scratchpad);
565 COPY(me.map_generation);
577 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
581 // FIXME copy only needed parts
583 backup_duplicate_context(&bak, dst);
584 memcpy(dst, src, sizeof(MpegEncContext));
585 backup_duplicate_context(dst, &bak);
586 for (i = 0; i < 12; i++) {
587 dst->pblocks[i] = &dst->block[i];
589 if (!dst->edge_emu_buffer &&
590 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
591 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
592 "scratch buffers.\n");
595 // STOP_TIMER("update_duplicate_context")
596 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
600 int ff_mpeg_update_thread_context(AVCodecContext *dst,
601 const AVCodecContext *src)
604 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
606 if (dst == src || !s1->context_initialized)
609 // FIXME can parameters change on I-frames?
610 // in that case dst may need a reinit
611 if (!s->context_initialized) {
612 memcpy(s, s1, sizeof(MpegEncContext));
615 s->picture_range_start += MAX_PICTURE_COUNT;
616 s->picture_range_end += MAX_PICTURE_COUNT;
617 s->bitstream_buffer = NULL;
618 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
620 ff_MPV_common_init(s);
623 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
625 s->context_reinit = 0;
626 s->height = s1->height;
627 s->width = s1->width;
628 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
632 s->avctx->coded_height = s1->avctx->coded_height;
633 s->avctx->coded_width = s1->avctx->coded_width;
634 s->avctx->width = s1->avctx->width;
635 s->avctx->height = s1->avctx->height;
637 s->coded_picture_number = s1->coded_picture_number;
638 s->picture_number = s1->picture_number;
639 s->input_picture_number = s1->input_picture_number;
641 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
642 memcpy(&s->last_picture, &s1->last_picture,
643 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
645 // reset s->picture[].f.extended_data to s->picture[].f.data
646 for (i = 0; i < s->picture_count; i++)
647 s->picture[i].f.extended_data = s->picture[i].f.data;
649 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
650 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
651 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
653 // Error/bug resilience
654 s->next_p_frame_damaged = s1->next_p_frame_damaged;
655 s->workaround_bugs = s1->workaround_bugs;
658 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
659 (char *) &s1->shape - (char *) &s1->time_increment_bits);
662 s->max_b_frames = s1->max_b_frames;
663 s->low_delay = s1->low_delay;
664 s->droppable = s1->droppable;
666 // DivX handling (doesn't work)
667 s->divx_packed = s1->divx_packed;
669 if (s1->bitstream_buffer) {
670 if (s1->bitstream_buffer_size +
671 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
672 av_fast_malloc(&s->bitstream_buffer,
673 &s->allocated_bitstream_buffer_size,
674 s1->allocated_bitstream_buffer_size);
675 s->bitstream_buffer_size = s1->bitstream_buffer_size;
676 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
677 s1->bitstream_buffer_size);
678 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
679 FF_INPUT_BUFFER_PADDING_SIZE);
682 // linesize dependend scratch buffer allocation
683 if (!s->edge_emu_buffer)
685 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
686 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
687 "scratch buffers.\n");
688 return AVERROR(ENOMEM);
691 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
692 "be allocated due to unknown size.\n");
696 // MPEG2/interlacing info
697 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
698 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
700 if (!s1->first_field) {
701 s->last_pict_type = s1->pict_type;
702 if (s1->current_picture_ptr)
703 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
705 if (s1->pict_type != AV_PICTURE_TYPE_B) {
706 s->last_non_b_pict_type = s1->pict_type;
714 * Set the given MpegEncContext to common defaults
715 * (same for encoding and decoding).
716 * The changed fields will not depend upon the
717 * prior state of the MpegEncContext.
719 void ff_MPV_common_defaults(MpegEncContext *s)
721 s->y_dc_scale_table =
722 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
723 s->chroma_qscale_table = ff_default_chroma_qscale_table;
724 s->progressive_frame = 1;
725 s->progressive_sequence = 1;
726 s->picture_structure = PICT_FRAME;
728 s->coded_picture_number = 0;
729 s->picture_number = 0;
730 s->input_picture_number = 0;
732 s->picture_in_gop_number = 0;
737 s->picture_range_start = 0;
738 s->picture_range_end = MAX_PICTURE_COUNT;
740 s->slice_context_count = 1;
744 * Set the given MpegEncContext to defaults for decoding.
745 * the changed fields will not depend upon
746 * the prior state of the MpegEncContext.
748 void ff_MPV_decode_defaults(MpegEncContext *s)
750 ff_MPV_common_defaults(s);
753 static int init_er(MpegEncContext *s)
755 ERContext *er = &s->er;
756 int mb_array_size = s->mb_height * s->mb_stride;
759 er->avctx = s->avctx;
762 er->mb_index2xy = s->mb_index2xy;
763 er->mb_num = s->mb_num;
764 er->mb_width = s->mb_width;
765 er->mb_height = s->mb_height;
766 er->mb_stride = s->mb_stride;
767 er->b8_stride = s->b8_stride;
769 er->er_temp_buffer = av_malloc(s->mb_height * s->mb_stride);
770 er->error_status_table = av_mallocz(mb_array_size);
771 if (!er->er_temp_buffer || !er->error_status_table)
774 er->mbskip_table = s->mbskip_table;
775 er->mbintra_table = s->mbintra_table;
777 for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
778 er->dc_val[i] = s->dc_val[i];
780 er->decode_mb = mpeg_er_decode_mb;
785 av_freep(&er->er_temp_buffer);
786 av_freep(&er->error_status_table);
787 return AVERROR(ENOMEM);
791 * Initialize and allocates MpegEncContext fields dependent on the resolution.
793 static int init_context_frame(MpegEncContext *s)
795 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
797 s->mb_width = (s->width + 15) / 16;
798 s->mb_stride = s->mb_width + 1;
799 s->b8_stride = s->mb_width * 2 + 1;
800 s->b4_stride = s->mb_width * 4 + 1;
801 mb_array_size = s->mb_height * s->mb_stride;
802 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
804 /* set default edge pos, will be overriden
805 * in decode_header if needed */
806 s->h_edge_pos = s->mb_width * 16;
807 s->v_edge_pos = s->mb_height * 16;
809 s->mb_num = s->mb_width * s->mb_height;
814 s->block_wrap[3] = s->b8_stride;
816 s->block_wrap[5] = s->mb_stride;
818 y_size = s->b8_stride * (2 * s->mb_height + 1);
819 c_size = s->mb_stride * (s->mb_height + 1);
820 yc_size = y_size + 2 * c_size;
822 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
823 fail); // error ressilience code looks cleaner with this
824 for (y = 0; y < s->mb_height; y++)
825 for (x = 0; x < s->mb_width; x++)
826 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
828 s->mb_index2xy[s->mb_height * s->mb_width] =
829 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
832 /* Allocate MV tables */
833 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
834 mv_table_size * 2 * sizeof(int16_t), fail);
835 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
836 mv_table_size * 2 * sizeof(int16_t), fail);
837 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
838 mv_table_size * 2 * sizeof(int16_t), fail);
839 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
840 mv_table_size * 2 * sizeof(int16_t), fail);
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
842 mv_table_size * 2 * sizeof(int16_t), fail);
843 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
844 mv_table_size * 2 * sizeof(int16_t), fail);
845 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
846 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
847 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
848 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
850 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
852 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
854 /* Allocate MB type table */
855 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
856 sizeof(uint16_t), fail); // needed for encoding
858 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
861 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
862 mb_array_size * sizeof(float), fail);
863 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
864 mb_array_size * sizeof(float), fail);
868 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
869 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
870 /* interlaced direct mode decoding tables */
871 for (i = 0; i < 2; i++) {
873 for (j = 0; j < 2; j++) {
874 for (k = 0; k < 2; k++) {
875 FF_ALLOCZ_OR_GOTO(s->avctx,
876 s->b_field_mv_table_base[i][j][k],
877 mv_table_size * 2 * sizeof(int16_t),
879 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
882 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
883 mb_array_size * 2 * sizeof(uint8_t), fail);
884 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
885 mv_table_size * 2 * sizeof(int16_t), fail);
886 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
889 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
890 mb_array_size * 2 * sizeof(uint8_t), fail);
893 if (s->out_format == FMT_H263) {
895 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
896 s->coded_block = s->coded_block_base + s->b8_stride + 1;
898 /* cbp, ac_pred, pred_dir */
899 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
900 mb_array_size * sizeof(uint8_t), fail);
901 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
902 mb_array_size * sizeof(uint8_t), fail);
905 if (s->h263_pred || s->h263_plus || !s->encoding) {
907 // MN: we need these for error resilience of intra-frames
908 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
909 yc_size * sizeof(int16_t), fail);
910 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
911 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
912 s->dc_val[2] = s->dc_val[1] + c_size;
913 for (i = 0; i < yc_size; i++)
914 s->dc_val_base[i] = 1024;
917 /* which mb is a intra block */
918 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
919 memset(s->mbintra_table, 1, mb_array_size);
921 /* init macroblock skip table */
922 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
923 // Note the + 1 is for a quicker mpeg4 slice_end detection
925 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
926 s->avctx->debug_mv) {
927 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
928 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
929 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
930 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
931 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
932 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
937 return AVERROR(ENOMEM);
941 * init common structure for both encoder and decoder.
942 * this assumes that some variables like width/height are already set
944 av_cold int ff_MPV_common_init(MpegEncContext *s)
947 int nb_slices = (HAVE_THREADS &&
948 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
949 s->avctx->thread_count : 1;
951 if (s->encoding && s->avctx->slices)
952 nb_slices = s->avctx->slices;
954 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
955 s->mb_height = (s->height + 31) / 32 * 2;
956 else if (s->codec_id != AV_CODEC_ID_H264)
957 s->mb_height = (s->height + 15) / 16;
959 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
960 av_log(s->avctx, AV_LOG_ERROR,
961 "decoding to AV_PIX_FMT_NONE is not supported.\n");
965 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
968 max_slices = FFMIN(MAX_THREADS, s->mb_height);
970 max_slices = MAX_THREADS;
971 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
972 " reducing to %d\n", nb_slices, max_slices);
973 nb_slices = max_slices;
976 if ((s->width || s->height) &&
977 av_image_check_size(s->width, s->height, 0, s->avctx))
980 ff_dct_common_init(s);
982 s->flags = s->avctx->flags;
983 s->flags2 = s->avctx->flags2;
985 if (s->width && s->height) {
986 /* set chroma shifts */
987 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
991 /* convert fourcc to upper case */
992 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
994 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
996 s->avctx->coded_frame = &s->current_picture.f;
999 if (s->msmpeg4_version) {
1000 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
1001 2 * 2 * (MAX_LEVEL + 1) *
1002 (MAX_RUN + 1) * 2 * sizeof(int), fail);
1004 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1006 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
1007 64 * 32 * sizeof(int), fail);
1008 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
1009 64 * 32 * sizeof(int), fail);
1010 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
1011 64 * 32 * 2 * sizeof(uint16_t), fail);
1012 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
1013 64 * 32 * 2 * sizeof(uint16_t), fail);
1014 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
1015 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1016 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
1017 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
1019 if (s->avctx->noise_reduction) {
1020 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
1021 2 * 64 * sizeof(uint16_t), fail);
1026 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
1027 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
1028 s->picture_count * sizeof(Picture), fail);
1029 for (i = 0; i < s->picture_count; i++) {
1030 avcodec_get_frame_defaults(&s->picture[i].f);
1033 if (s->width && s->height) {
1034 if (init_context_frame(s))
1037 s->parse_context.state = -1;
1040 s->context_initialized = 1;
1041 s->thread_context[0] = s;
1043 if (s->width && s->height) {
1044 if (nb_slices > 1) {
1045 for (i = 1; i < nb_slices; i++) {
1046 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1047 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1050 for (i = 0; i < nb_slices; i++) {
1051 if (init_duplicate_context(s->thread_context[i]) < 0)
1053 s->thread_context[i]->start_mb_y =
1054 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1055 s->thread_context[i]->end_mb_y =
1056 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1059 if (init_duplicate_context(s) < 0)
1062 s->end_mb_y = s->mb_height;
1064 s->slice_context_count = nb_slices;
1069 ff_MPV_common_end(s);
1074 * Frees and resets MpegEncContext fields depending on the resolution.
1075 * Is used during resolution changes to avoid a full reinitialization of the
1078 static int free_context_frame(MpegEncContext *s)
1082 av_freep(&s->mb_type);
1083 av_freep(&s->p_mv_table_base);
1084 av_freep(&s->b_forw_mv_table_base);
1085 av_freep(&s->b_back_mv_table_base);
1086 av_freep(&s->b_bidir_forw_mv_table_base);
1087 av_freep(&s->b_bidir_back_mv_table_base);
1088 av_freep(&s->b_direct_mv_table_base);
1089 s->p_mv_table = NULL;
1090 s->b_forw_mv_table = NULL;
1091 s->b_back_mv_table = NULL;
1092 s->b_bidir_forw_mv_table = NULL;
1093 s->b_bidir_back_mv_table = NULL;
1094 s->b_direct_mv_table = NULL;
1095 for (i = 0; i < 2; i++) {
1096 for (j = 0; j < 2; j++) {
1097 for (k = 0; k < 2; k++) {
1098 av_freep(&s->b_field_mv_table_base[i][j][k]);
1099 s->b_field_mv_table[i][j][k] = NULL;
1101 av_freep(&s->b_field_select_table[i][j]);
1102 av_freep(&s->p_field_mv_table_base[i][j]);
1103 s->p_field_mv_table[i][j] = NULL;
1105 av_freep(&s->p_field_select_table[i]);
1108 av_freep(&s->dc_val_base);
1109 av_freep(&s->coded_block_base);
1110 av_freep(&s->mbintra_table);
1111 av_freep(&s->cbp_table);
1112 av_freep(&s->pred_dir_table);
1114 av_freep(&s->mbskip_table);
1116 av_freep(&s->er.error_status_table);
1117 av_freep(&s->er.er_temp_buffer);
1118 av_freep(&s->mb_index2xy);
1119 av_freep(&s->lambda_table);
1120 av_freep(&s->cplx_tab);
1121 av_freep(&s->bits_tab);
1123 s->linesize = s->uvlinesize = 0;
1125 for (i = 0; i < 3; i++)
1126 av_freep(&s->visualization_buffer[i]);
1131 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1135 if (s->slice_context_count > 1) {
1136 for (i = 0; i < s->slice_context_count; i++) {
1137 free_duplicate_context(s->thread_context[i]);
1139 for (i = 1; i < s->slice_context_count; i++) {
1140 av_freep(&s->thread_context[i]);
1143 free_duplicate_context(s);
1145 free_context_frame(s);
1148 for (i = 0; i < s->picture_count; i++) {
1149 s->picture[i].needs_realloc = 1;
1152 s->last_picture_ptr =
1153 s->next_picture_ptr =
1154 s->current_picture_ptr = NULL;
1157 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1158 s->mb_height = (s->height + 31) / 32 * 2;
1159 else if (s->codec_id != AV_CODEC_ID_H264)
1160 s->mb_height = (s->height + 15) / 16;
1162 if ((s->width || s->height) &&
1163 av_image_check_size(s->width, s->height, 0, s->avctx))
1164 return AVERROR_INVALIDDATA;
1166 if ((err = init_context_frame(s)))
1169 s->thread_context[0] = s;
1171 if (s->width && s->height) {
1172 int nb_slices = s->slice_context_count;
1173 if (nb_slices > 1) {
1174 for (i = 1; i < nb_slices; i++) {
1175 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1176 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1179 for (i = 0; i < nb_slices; i++) {
1180 if (init_duplicate_context(s->thread_context[i]) < 0)
1182 s->thread_context[i]->start_mb_y =
1183 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1184 s->thread_context[i]->end_mb_y =
1185 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1188 if (init_duplicate_context(s) < 0)
1191 s->end_mb_y = s->mb_height;
1193 s->slice_context_count = nb_slices;
1198 ff_MPV_common_end(s);
1202 /* init common structure for both encoder and decoder */
1203 void ff_MPV_common_end(MpegEncContext *s)
1207 if (s->slice_context_count > 1) {
1208 for (i = 0; i < s->slice_context_count; i++) {
1209 free_duplicate_context(s->thread_context[i]);
1211 for (i = 1; i < s->slice_context_count; i++) {
1212 av_freep(&s->thread_context[i]);
1214 s->slice_context_count = 1;
1215 } else free_duplicate_context(s);
1217 av_freep(&s->parse_context.buffer);
1218 s->parse_context.buffer_size = 0;
1220 av_freep(&s->bitstream_buffer);
1221 s->allocated_bitstream_buffer_size = 0;
1223 av_freep(&s->avctx->stats_out);
1224 av_freep(&s->ac_stats);
1226 av_freep(&s->q_intra_matrix);
1227 av_freep(&s->q_inter_matrix);
1228 av_freep(&s->q_intra_matrix16);
1229 av_freep(&s->q_inter_matrix16);
1230 av_freep(&s->input_picture);
1231 av_freep(&s->reordered_input_picture);
1232 av_freep(&s->dct_offset);
1234 if (s->picture && !s->avctx->internal->is_copy) {
1235 for (i = 0; i < s->picture_count; i++) {
1236 free_picture(s, &s->picture[i]);
1239 av_freep(&s->picture);
1241 free_context_frame(s);
1243 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1244 avcodec_default_free_buffers(s->avctx);
1246 s->context_initialized = 0;
1247 s->last_picture_ptr =
1248 s->next_picture_ptr =
1249 s->current_picture_ptr = NULL;
1250 s->linesize = s->uvlinesize = 0;
1253 void ff_init_rl(RLTable *rl,
1254 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1256 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1257 uint8_t index_run[MAX_RUN + 1];
1258 int last, run, level, start, end, i;
1260 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1261 if (static_store && rl->max_level[0])
1264 /* compute max_level[], max_run[] and index_run[] */
1265 for (last = 0; last < 2; last++) {
1274 memset(max_level, 0, MAX_RUN + 1);
1275 memset(max_run, 0, MAX_LEVEL + 1);
1276 memset(index_run, rl->n, MAX_RUN + 1);
1277 for (i = start; i < end; i++) {
1278 run = rl->table_run[i];
1279 level = rl->table_level[i];
1280 if (index_run[run] == rl->n)
1282 if (level > max_level[run])
1283 max_level[run] = level;
1284 if (run > max_run[level])
1285 max_run[level] = run;
1288 rl->max_level[last] = static_store[last];
1290 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1291 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1293 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1295 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1296 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1298 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1300 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1301 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1305 void ff_init_vlc_rl(RLTable *rl)
1309 for (q = 0; q < 32; q++) {
1311 int qadd = (q - 1) | 1;
1317 for (i = 0; i < rl->vlc.table_size; i++) {
1318 int code = rl->vlc.table[i][0];
1319 int len = rl->vlc.table[i][1];
1322 if (len == 0) { // illegal code
1325 } else if (len < 0) { // more bits needed
1329 if (code == rl->n) { // esc
1333 run = rl->table_run[code] + 1;
1334 level = rl->table_level[code] * qmul + qadd;
1335 if (code >= rl->last) run += 192;
1338 rl->rl_vlc[q][i].len = len;
1339 rl->rl_vlc[q][i].level = level;
1340 rl->rl_vlc[q][i].run = run;
1345 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1349 /* release non reference frames */
1350 for (i = 0; i < s->picture_count; i++) {
1351 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1352 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1353 (remove_current || &s->picture[i] != s->current_picture_ptr)
1354 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1355 free_frame_buffer(s, &s->picture[i]);
1360 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1362 if (pic->f.data[0] == NULL)
1364 if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
1365 if (!pic->owner2 || pic->owner2 == s)
1370 static int find_unused_picture(MpegEncContext *s, int shared)
1375 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1376 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1380 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1381 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1384 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1385 if (pic_is_unused(s, &s->picture[i]))
1390 return AVERROR_INVALIDDATA;
1393 int ff_find_unused_picture(MpegEncContext *s, int shared)
1395 int ret = find_unused_picture(s, shared);
1397 if (ret >= 0 && ret < s->picture_range_end) {
1398 if (s->picture[ret].needs_realloc) {
1399 s->picture[ret].needs_realloc = 0;
1400 free_picture(s, &s->picture[ret]);
1401 avcodec_get_frame_defaults(&s->picture[ret].f);
1407 static void update_noise_reduction(MpegEncContext *s)
1411 for (intra = 0; intra < 2; intra++) {
1412 if (s->dct_count[intra] > (1 << 16)) {
1413 for (i = 0; i < 64; i++) {
1414 s->dct_error_sum[intra][i] >>= 1;
1416 s->dct_count[intra] >>= 1;
1419 for (i = 0; i < 64; i++) {
1420 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1421 s->dct_count[intra] +
1422 s->dct_error_sum[intra][i] / 2) /
1423 (s->dct_error_sum[intra][i] + 1);
1429 * generic function for encode/decode called after coding/decoding
1430 * the header and before a frame is coded/decoded.
1432 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1438 /* mark & release old frames */
1439 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1440 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1441 s->last_picture_ptr != s->next_picture_ptr &&
1442 s->last_picture_ptr->f.data[0]) {
1443 if (s->last_picture_ptr->owner2 == s)
1444 free_frame_buffer(s, s->last_picture_ptr);
1447 /* release forgotten pictures */
1448 /* if (mpeg124/h263) */
1450 for (i = 0; i < s->picture_count; i++) {
1451 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1452 &s->picture[i] != s->last_picture_ptr &&
1453 &s->picture[i] != s->next_picture_ptr &&
1454 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1455 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1456 av_log(avctx, AV_LOG_ERROR,
1457 "releasing zombie picture\n");
1458 free_frame_buffer(s, &s->picture[i]);
1465 ff_release_unused_pictures(s, 1);
1467 if (s->current_picture_ptr &&
1468 s->current_picture_ptr->f.data[0] == NULL) {
1469 // we already have a unused image
1470 // (maybe it was set before reading the header)
1471 pic = s->current_picture_ptr;
1473 i = ff_find_unused_picture(s, 0);
1475 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1478 pic = &s->picture[i];
1481 pic->f.reference = 0;
1482 if (!s->droppable) {
1483 if (s->codec_id == AV_CODEC_ID_H264)
1484 pic->f.reference = s->picture_structure;
1485 else if (s->pict_type != AV_PICTURE_TYPE_B)
1486 pic->f.reference = 3;
1489 pic->f.coded_picture_number = s->coded_picture_number++;
1491 if (ff_alloc_picture(s, pic, 0) < 0)
1494 s->current_picture_ptr = pic;
1495 // FIXME use only the vars from current_pic
1496 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1497 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1498 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1499 if (s->picture_structure != PICT_FRAME)
1500 s->current_picture_ptr->f.top_field_first =
1501 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1503 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1504 !s->progressive_sequence;
1505 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1508 s->current_picture_ptr->f.pict_type = s->pict_type;
1509 // if (s->flags && CODEC_FLAG_QSCALE)
1510 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1511 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1513 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1515 if (s->pict_type != AV_PICTURE_TYPE_B) {
1516 s->last_picture_ptr = s->next_picture_ptr;
1518 s->next_picture_ptr = s->current_picture_ptr;
1520 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1521 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1522 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1523 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1524 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1525 s->pict_type, s->droppable);
1527 if (s->codec_id != AV_CODEC_ID_H264) {
1528 if ((s->last_picture_ptr == NULL ||
1529 s->last_picture_ptr->f.data[0] == NULL) &&
1530 (s->pict_type != AV_PICTURE_TYPE_I ||
1531 s->picture_structure != PICT_FRAME)) {
1532 int h_chroma_shift, v_chroma_shift;
1533 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1534 &h_chroma_shift, &v_chroma_shift);
1535 if (s->pict_type != AV_PICTURE_TYPE_I)
1536 av_log(avctx, AV_LOG_ERROR,
1537 "warning: first frame is no keyframe\n");
1538 else if (s->picture_structure != PICT_FRAME)
1539 av_log(avctx, AV_LOG_INFO,
1540 "allocate dummy last picture for field based first keyframe\n");
1542 /* Allocate a dummy frame */
1543 i = ff_find_unused_picture(s, 0);
1545 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1548 s->last_picture_ptr = &s->picture[i];
1549 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1550 s->last_picture_ptr = NULL;
1554 memset(s->last_picture_ptr->f.data[0], 0,
1555 avctx->height * s->last_picture_ptr->f.linesize[0]);
1556 memset(s->last_picture_ptr->f.data[1], 0x80,
1557 (avctx->height >> v_chroma_shift) *
1558 s->last_picture_ptr->f.linesize[1]);
1559 memset(s->last_picture_ptr->f.data[2], 0x80,
1560 (avctx->height >> v_chroma_shift) *
1561 s->last_picture_ptr->f.linesize[2]);
1563 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1564 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1565 s->last_picture_ptr->f.reference = 3;
1567 if ((s->next_picture_ptr == NULL ||
1568 s->next_picture_ptr->f.data[0] == NULL) &&
1569 s->pict_type == AV_PICTURE_TYPE_B) {
1570 /* Allocate a dummy frame */
1571 i = ff_find_unused_picture(s, 0);
1573 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1576 s->next_picture_ptr = &s->picture[i];
1577 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1578 s->next_picture_ptr = NULL;
1581 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1582 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1583 s->next_picture_ptr->f.reference = 3;
1587 if (s->last_picture_ptr)
1588 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1589 if (s->next_picture_ptr)
1590 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1592 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1593 if (s->next_picture_ptr)
1594 s->next_picture_ptr->owner2 = s;
1595 if (s->last_picture_ptr)
1596 s->last_picture_ptr->owner2 = s;
1599 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1600 s->last_picture_ptr->f.data[0]));
1602 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1604 for (i = 0; i < 4; i++) {
1605 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1606 s->current_picture.f.data[i] +=
1607 s->current_picture.f.linesize[i];
1609 s->current_picture.f.linesize[i] *= 2;
1610 s->last_picture.f.linesize[i] *= 2;
1611 s->next_picture.f.linesize[i] *= 2;
1615 s->err_recognition = avctx->err_recognition;
1617 /* set dequantizer, we can't do it during init as
1618 * it might change for mpeg4 and we can't do it in the header
1619 * decode as init is not called for mpeg4 there yet */
1620 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1621 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1622 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1623 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1624 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1625 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1627 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1628 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1631 if (s->dct_error_sum) {
1632 assert(s->avctx->noise_reduction && s->encoding);
1633 update_noise_reduction(s);
1636 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1637 return ff_xvmc_field_start(s, avctx);
1642 /* generic function for encode/decode called after a
1643 * frame has been coded/decoded. */
1644 void ff_MPV_frame_end(MpegEncContext *s)
1647 /* redraw edges for the frame if decoding didn't complete */
1648 // just to make sure that all data is rendered.
1649 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1650 ff_xvmc_field_end(s);
1651 } else if ((s->er.error_count || s->encoding) &&
1652 !s->avctx->hwaccel &&
1653 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1654 s->unrestricted_mv &&
1655 s->current_picture.f.reference &&
1657 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1658 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1659 int hshift = desc->log2_chroma_w;
1660 int vshift = desc->log2_chroma_h;
1661 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1662 s->h_edge_pos, s->v_edge_pos,
1663 EDGE_WIDTH, EDGE_WIDTH,
1664 EDGE_TOP | EDGE_BOTTOM);
1665 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1666 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1667 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1668 EDGE_TOP | EDGE_BOTTOM);
1669 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1670 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1671 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1672 EDGE_TOP | EDGE_BOTTOM);
1677 s->last_pict_type = s->pict_type;
1678 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1679 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1680 s->last_non_b_pict_type = s->pict_type;
1683 /* copy back current_picture variables */
1684 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1685 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1686 s->picture[i] = s->current_picture;
1690 assert(i < MAX_PICTURE_COUNT);
1694 /* release non-reference frames */
1695 for (i = 0; i < s->picture_count; i++) {
1696 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1697 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1698 free_frame_buffer(s, &s->picture[i]);
1702 // clear copies, to avoid confusion
1704 memset(&s->last_picture, 0, sizeof(Picture));
1705 memset(&s->next_picture, 0, sizeof(Picture));
1706 memset(&s->current_picture, 0, sizeof(Picture));
1708 s->avctx->coded_frame = &s->current_picture_ptr->f;
1710 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1711 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1716 * Draw a line from (ex, ey) -> (sx, sy).
1717 * @param w width of the image
1718 * @param h height of the image
1719 * @param stride stride/linesize of the image
1720 * @param color color of the arrow
1722 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1723 int w, int h, int stride, int color)
1727 sx = av_clip(sx, 0, w - 1);
1728 sy = av_clip(sy, 0, h - 1);
1729 ex = av_clip(ex, 0, w - 1);
1730 ey = av_clip(ey, 0, h - 1);
1732 buf[sy * stride + sx] += color;
1734 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1736 FFSWAP(int, sx, ex);
1737 FFSWAP(int, sy, ey);
1739 buf += sx + sy * stride;
1741 f = ((ey - sy) << 16) / ex;
1742 for (x = 0; x <= ex; x++) {
1744 fr = (x * f) & 0xFFFF;
1745 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1746 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1750 FFSWAP(int, sx, ex);
1751 FFSWAP(int, sy, ey);
1753 buf += sx + sy * stride;
1756 f = ((ex - sx) << 16) / ey;
1759 for (y = 0; y = ey; y++) {
1761 fr = (y * f) & 0xFFFF;
1762 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1763 buf[y * stride + x + 1] += (color * fr ) >> 16;
1769 * Draw an arrow from (ex, ey) -> (sx, sy).
1770 * @param w width of the image
1771 * @param h height of the image
1772 * @param stride stride/linesize of the image
1773 * @param color color of the arrow
1775 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1776 int ey, int w, int h, int stride, int color)
1780 sx = av_clip(sx, -100, w + 100);
1781 sy = av_clip(sy, -100, h + 100);
1782 ex = av_clip(ex, -100, w + 100);
1783 ey = av_clip(ey, -100, h + 100);
1788 if (dx * dx + dy * dy > 3 * 3) {
1791 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1793 // FIXME subpixel accuracy
1794 rx = ROUNDED_DIV(rx * 3 << 4, length);
1795 ry = ROUNDED_DIV(ry * 3 << 4, length);
1797 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1798 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1800 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1804 * Print debugging info for the given picture.
1806 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1808 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1811 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1814 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1815 switch (pict->pict_type) {
1816 case AV_PICTURE_TYPE_I:
1817 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1819 case AV_PICTURE_TYPE_P:
1820 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1822 case AV_PICTURE_TYPE_B:
1823 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1825 case AV_PICTURE_TYPE_S:
1826 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1828 case AV_PICTURE_TYPE_SI:
1829 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1831 case AV_PICTURE_TYPE_SP:
1832 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1835 for (y = 0; y < s->mb_height; y++) {
1836 for (x = 0; x < s->mb_width; x++) {
1837 if (s->avctx->debug & FF_DEBUG_SKIP) {
1838 int count = s->mbskip_table[x + y * s->mb_stride];
1841 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1843 if (s->avctx->debug & FF_DEBUG_QP) {
1844 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1845 pict->qscale_table[x + y * s->mb_stride]);
1847 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1848 int mb_type = pict->mb_type[x + y * s->mb_stride];
1849 // Type & MV direction
1850 if (IS_PCM(mb_type))
1851 av_log(s->avctx, AV_LOG_DEBUG, "P");
1852 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1853 av_log(s->avctx, AV_LOG_DEBUG, "A");
1854 else if (IS_INTRA4x4(mb_type))
1855 av_log(s->avctx, AV_LOG_DEBUG, "i");
1856 else if (IS_INTRA16x16(mb_type))
1857 av_log(s->avctx, AV_LOG_DEBUG, "I");
1858 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1859 av_log(s->avctx, AV_LOG_DEBUG, "d");
1860 else if (IS_DIRECT(mb_type))
1861 av_log(s->avctx, AV_LOG_DEBUG, "D");
1862 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1863 av_log(s->avctx, AV_LOG_DEBUG, "g");
1864 else if (IS_GMC(mb_type))
1865 av_log(s->avctx, AV_LOG_DEBUG, "G");
1866 else if (IS_SKIP(mb_type))
1867 av_log(s->avctx, AV_LOG_DEBUG, "S");
1868 else if (!USES_LIST(mb_type, 1))
1869 av_log(s->avctx, AV_LOG_DEBUG, ">");
1870 else if (!USES_LIST(mb_type, 0))
1871 av_log(s->avctx, AV_LOG_DEBUG, "<");
1873 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1874 av_log(s->avctx, AV_LOG_DEBUG, "X");
1878 if (IS_8X8(mb_type))
1879 av_log(s->avctx, AV_LOG_DEBUG, "+");
1880 else if (IS_16X8(mb_type))
1881 av_log(s->avctx, AV_LOG_DEBUG, "-");
1882 else if (IS_8X16(mb_type))
1883 av_log(s->avctx, AV_LOG_DEBUG, "|");
1884 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1885 av_log(s->avctx, AV_LOG_DEBUG, " ");
1887 av_log(s->avctx, AV_LOG_DEBUG, "?");
1890 if (IS_INTERLACED(mb_type))
1891 av_log(s->avctx, AV_LOG_DEBUG, "=");
1893 av_log(s->avctx, AV_LOG_DEBUG, " ");
1896 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1900 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1901 (s->avctx->debug_mv)) {
1902 const int shift = 1 + s->quarter_sample;
1906 int h_chroma_shift, v_chroma_shift, block_height;
1907 const int width = s->avctx->width;
1908 const int height = s->avctx->height;
1909 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1910 const int mv_stride = (s->mb_width << mv_sample_log2) +
1911 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1912 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1914 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1915 &h_chroma_shift, &v_chroma_shift);
1916 for (i = 0; i < 3; i++) {
1917 memcpy(s->visualization_buffer[i], pict->data[i],
1918 (i == 0) ? pict->linesize[i] * height:
1919 pict->linesize[i] * height >> v_chroma_shift);
1920 pict->data[i] = s->visualization_buffer[i];
1922 pict->type = FF_BUFFER_TYPE_COPY;
1923 ptr = pict->data[0];
1924 block_height = 16 >> v_chroma_shift;
1926 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1928 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1929 const int mb_index = mb_x + mb_y * s->mb_stride;
1930 if ((s->avctx->debug_mv) && pict->motion_val) {
1932 for (type = 0; type < 3; type++) {
1936 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1937 (pict->pict_type!= AV_PICTURE_TYPE_P))
1942 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1943 (pict->pict_type!= AV_PICTURE_TYPE_B))
1948 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1949 (pict->pict_type!= AV_PICTURE_TYPE_B))
1954 if (!USES_LIST(pict->mb_type[mb_index], direction))
1957 if (IS_8X8(pict->mb_type[mb_index])) {
1959 for (i = 0; i < 4; i++) {
1960 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1961 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1962 int xy = (mb_x * 2 + (i & 1) +
1963 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1964 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1965 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1966 draw_arrow(ptr, sx, sy, mx, my, width,
1967 height, s->linesize, 100);
1969 } else if (IS_16X8(pict->mb_type[mb_index])) {
1971 for (i = 0; i < 2; i++) {
1972 int sx = mb_x * 16 + 8;
1973 int sy = mb_y * 16 + 4 + 8 * i;
1974 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1975 int mx = (pict->motion_val[direction][xy][0] >> shift);
1976 int my = (pict->motion_val[direction][xy][1] >> shift);
1978 if (IS_INTERLACED(pict->mb_type[mb_index]))
1981 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1982 height, s->linesize, 100);
1984 } else if (IS_8X16(pict->mb_type[mb_index])) {
1986 for (i = 0; i < 2; i++) {
1987 int sx = mb_x * 16 + 4 + 8 * i;
1988 int sy = mb_y * 16 + 8;
1989 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1990 int mx = pict->motion_val[direction][xy][0] >> shift;
1991 int my = pict->motion_val[direction][xy][1] >> shift;
1993 if (IS_INTERLACED(pict->mb_type[mb_index]))
1996 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1997 height, s->linesize, 100);
2000 int sx = mb_x * 16 + 8;
2001 int sy = mb_y * 16 + 8;
2002 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2003 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
2004 int my = pict->motion_val[direction][xy][1] >> shift + sy;
2005 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
2009 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
2010 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
2011 0x0101010101010101ULL;
2013 for (y = 0; y < block_height; y++) {
2014 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2015 (block_height * mb_y + y) *
2016 pict->linesize[1]) = c;
2017 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2018 (block_height * mb_y + y) *
2019 pict->linesize[2]) = c;
2022 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2024 int mb_type = pict->mb_type[mb_index];
2027 #define COLOR(theta, r) \
2028 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2029 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2033 if (IS_PCM(mb_type)) {
2035 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2036 IS_INTRA16x16(mb_type)) {
2038 } else if (IS_INTRA4x4(mb_type)) {
2040 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2042 } else if (IS_DIRECT(mb_type)) {
2044 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2046 } else if (IS_GMC(mb_type)) {
2048 } else if (IS_SKIP(mb_type)) {
2050 } else if (!USES_LIST(mb_type, 1)) {
2052 } else if (!USES_LIST(mb_type, 0)) {
2055 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2059 u *= 0x0101010101010101ULL;
2060 v *= 0x0101010101010101ULL;
2061 for (y = 0; y < block_height; y++) {
2062 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2063 (block_height * mb_y + y) * pict->linesize[1]) = u;
2064 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2065 (block_height * mb_y + y) * pict->linesize[2]) = v;
2069 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2070 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2071 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2072 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2073 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2075 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2076 for (y = 0; y < 16; y++)
2077 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2078 pict->linesize[0]] ^= 0x80;
2080 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2081 int dm = 1 << (mv_sample_log2 - 2);
2082 for (i = 0; i < 4; i++) {
2083 int sx = mb_x * 16 + 8 * (i & 1);
2084 int sy = mb_y * 16 + 8 * (i >> 1);
2085 int xy = (mb_x * 2 + (i & 1) +
2086 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2088 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
2089 if (mv[0] != mv[dm] ||
2090 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2091 for (y = 0; y < 8; y++)
2092 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2093 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2094 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2095 pict->linesize[0]) ^= 0x8080808080808080ULL;
2099 if (IS_INTERLACED(mb_type) &&
2100 s->codec_id == AV_CODEC_ID_H264) {
2104 s->mbskip_table[mb_index] = 0;
2111 * find the lowest MB row referenced in the MVs
2113 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2115 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2116 int my, off, i, mvs;
2118 if (s->picture_structure != PICT_FRAME || s->mcsel)
2121 switch (s->mv_type) {
2135 for (i = 0; i < mvs; i++) {
2136 my = s->mv[dir][i][1]<<qpel_shift;
2137 my_max = FFMAX(my_max, my);
2138 my_min = FFMIN(my_min, my);
2141 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2143 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2145 return s->mb_height-1;
2148 /* put block[] to dest[] */
2149 static inline void put_dct(MpegEncContext *s,
2150 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2152 s->dct_unquantize_intra(s, block, i, qscale);
2153 s->dsp.idct_put (dest, line_size, block);
2156 /* add block[] to dest[] */
2157 static inline void add_dct(MpegEncContext *s,
2158 int16_t *block, int i, uint8_t *dest, int line_size)
2160 if (s->block_last_index[i] >= 0) {
2161 s->dsp.idct_add (dest, line_size, block);
2165 static inline void add_dequant_dct(MpegEncContext *s,
2166 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2168 if (s->block_last_index[i] >= 0) {
2169 s->dct_unquantize_inter(s, block, i, qscale);
2171 s->dsp.idct_add (dest, line_size, block);
2176 * Clean dc, ac, coded_block for the current non-intra MB.
2178 void ff_clean_intra_table_entries(MpegEncContext *s)
2180 int wrap = s->b8_stride;
2181 int xy = s->block_index[0];
2184 s->dc_val[0][xy + 1 ] =
2185 s->dc_val[0][xy + wrap] =
2186 s->dc_val[0][xy + 1 + wrap] = 1024;
2188 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2189 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2190 if (s->msmpeg4_version>=3) {
2191 s->coded_block[xy ] =
2192 s->coded_block[xy + 1 ] =
2193 s->coded_block[xy + wrap] =
2194 s->coded_block[xy + 1 + wrap] = 0;
2197 wrap = s->mb_stride;
2198 xy = s->mb_x + s->mb_y * wrap;
2200 s->dc_val[2][xy] = 1024;
2202 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2203 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2205 s->mbintra_table[xy]= 0;
2208 /* generic function called after a macroblock has been parsed by the
2209 decoder or after it has been encoded by the encoder.
2211 Important variables used:
2212 s->mb_intra : true if intra macroblock
2213 s->mv_dir : motion vector direction
2214 s->mv_type : motion vector type
2215 s->mv : motion vector
2216 s->interlaced_dct : true if interlaced dct used (mpeg2)
2218 static av_always_inline
2219 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2222 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2223 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2224 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2228 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2229 /* save DCT coefficients */
2231 int16_t *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2232 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2234 for(j=0; j<64; j++){
2235 *dct++ = block[i][s->dsp.idct_permutation[j]];
2236 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2238 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2242 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2244 /* update DC predictors for P macroblocks */
2246 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2247 if(s->mbintra_table[mb_xy])
2248 ff_clean_intra_table_entries(s);
2252 s->last_dc[2] = 128 << s->intra_dc_precision;
2255 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2256 s->mbintra_table[mb_xy]=1;
2258 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2259 uint8_t *dest_y, *dest_cb, *dest_cr;
2260 int dct_linesize, dct_offset;
2261 op_pixels_func (*op_pix)[4];
2262 qpel_mc_func (*op_qpix)[16];
2263 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2264 const int uvlinesize = s->current_picture.f.linesize[1];
2265 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2266 const int block_size = 8;
2268 /* avoid copy if macroblock skipped in last frame too */
2269 /* skip only during decoding as we might trash the buffers during encoding a bit */
2271 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2273 if (s->mb_skipped) {
2275 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2277 } else if(!s->current_picture.f.reference) {
2280 *mbskip_ptr = 0; /* not skipped */
2284 dct_linesize = linesize << s->interlaced_dct;
2285 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2289 dest_cb= s->dest[1];
2290 dest_cr= s->dest[2];
2292 dest_y = s->b_scratchpad;
2293 dest_cb= s->b_scratchpad+16*linesize;
2294 dest_cr= s->b_scratchpad+32*linesize;
2298 /* motion handling */
2299 /* decoding or more than one mb_type (MC was already done otherwise) */
2302 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2303 if (s->mv_dir & MV_DIR_FORWARD) {
2304 ff_thread_await_progress(&s->last_picture_ptr->f,
2305 ff_MPV_lowest_referenced_row(s, 0),
2308 if (s->mv_dir & MV_DIR_BACKWARD) {
2309 ff_thread_await_progress(&s->next_picture_ptr->f,
2310 ff_MPV_lowest_referenced_row(s, 1),
2315 op_qpix= s->me.qpel_put;
2316 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2317 op_pix = s->dsp.put_pixels_tab;
2319 op_pix = s->dsp.put_no_rnd_pixels_tab;
2321 if (s->mv_dir & MV_DIR_FORWARD) {
2322 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2323 op_pix = s->dsp.avg_pixels_tab;
2324 op_qpix= s->me.qpel_avg;
2326 if (s->mv_dir & MV_DIR_BACKWARD) {
2327 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2331 /* skip dequant / idct if we are really late ;) */
2332 if(s->avctx->skip_idct){
2333 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2334 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2335 || s->avctx->skip_idct >= AVDISCARD_ALL)
2339 /* add dct residue */
2340 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2341 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2342 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2343 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2344 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2345 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2347 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2348 if (s->chroma_y_shift){
2349 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2350 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2354 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2355 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2356 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2357 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2360 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2361 add_dct(s, block[0], 0, dest_y , dct_linesize);
2362 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2363 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2364 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2366 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2367 if(s->chroma_y_shift){//Chroma420
2368 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2369 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2372 dct_linesize = uvlinesize << s->interlaced_dct;
2373 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2375 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2376 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2377 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2378 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2379 if(!s->chroma_x_shift){//Chroma444
2380 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2381 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2382 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2383 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2388 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2389 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2392 /* dct only in intra block */
2393 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2394 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2395 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2396 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2397 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2399 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2400 if(s->chroma_y_shift){
2401 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2402 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2406 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2407 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2408 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2409 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2413 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2414 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2415 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2416 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2418 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2419 if(s->chroma_y_shift){
2420 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2421 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2424 dct_linesize = uvlinesize << s->interlaced_dct;
2425 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2427 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2428 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2429 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2430 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2431 if(!s->chroma_x_shift){//Chroma444
2432 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2433 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2434 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2435 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2443 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2444 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2445 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2450 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2452 if(s->out_format == FMT_MPEG1) {
2453 MPV_decode_mb_internal(s, block, 1);
2456 MPV_decode_mb_internal(s, block, 0);
2460 * @param h is the normal height, this will be reduced automatically if needed for the last row
2462 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2463 const int field_pic= s->picture_structure != PICT_FRAME;
2469 if (!s->avctx->hwaccel
2470 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2471 && s->unrestricted_mv
2472 && s->current_picture.f.reference
2474 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2475 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
2476 int sides = 0, edge_h;
2477 int hshift = desc->log2_chroma_w;
2478 int vshift = desc->log2_chroma_h;
2479 if (y==0) sides |= EDGE_TOP;
2480 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2482 edge_h= FFMIN(h, s->v_edge_pos - y);
2484 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2485 s->linesize, s->h_edge_pos, edge_h,
2486 EDGE_WIDTH, EDGE_WIDTH, sides);
2487 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2488 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2489 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2490 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2491 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2492 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2495 h= FFMIN(h, s->avctx->height - y);
2497 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2499 if (s->avctx->draw_horiz_band) {
2501 int offset[AV_NUM_DATA_POINTERS];
2504 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2505 src = &s->current_picture_ptr->f;
2506 else if(s->last_picture_ptr)
2507 src = &s->last_picture_ptr->f;
2511 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2512 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2515 offset[0]= y * s->linesize;
2517 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2518 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2524 s->avctx->draw_horiz_band(s->avctx, src, offset,
2525 y, s->picture_structure, h);
2529 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2530 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2531 const int uvlinesize = s->current_picture.f.linesize[1];
2532 const int mb_size= 4;
2534 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2535 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2536 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2537 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2538 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2539 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2540 //block_index is not used by mpeg2, so it is not affected by chroma_format
2542 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2543 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2544 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2546 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2548 if(s->picture_structure==PICT_FRAME){
2549 s->dest[0] += s->mb_y * linesize << mb_size;
2550 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2551 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2553 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2554 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2555 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2556 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2562 * Permute an 8x8 block.
2563 * @param block the block which will be permuted according to the given permutation vector
2564 * @param permutation the permutation vector
2565 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2566 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2567 * (inverse) permutated to scantable order!
2569 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2575 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2577 for(i=0; i<=last; i++){
2578 const int j= scantable[i];
2583 for(i=0; i<=last; i++){
2584 const int j= scantable[i];
2585 const int perm_j= permutation[j];
2586 block[perm_j]= temp[j];
2590 void ff_mpeg_flush(AVCodecContext *avctx){
2592 MpegEncContext *s = avctx->priv_data;
2594 if(s==NULL || s->picture==NULL)
2597 for(i=0; i<s->picture_count; i++){
2598 if (s->picture[i].f.data[0] &&
2599 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2600 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2601 free_frame_buffer(s, &s->picture[i]);
2603 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2605 s->mb_x= s->mb_y= 0;
2607 s->parse_context.state= -1;
2608 s->parse_context.frame_start_found= 0;
2609 s->parse_context.overread= 0;
2610 s->parse_context.overread_index= 0;
2611 s->parse_context.index= 0;
2612 s->parse_context.last_index= 0;
2613 s->bitstream_buffer_size=0;
2617 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2618 int16_t *block, int n, int qscale)
2620 int i, level, nCoeffs;
2621 const uint16_t *quant_matrix;
2623 nCoeffs= s->block_last_index[n];
2626 block[0] = block[0] * s->y_dc_scale;
2628 block[0] = block[0] * s->c_dc_scale;
2629 /* XXX: only mpeg1 */
2630 quant_matrix = s->intra_matrix;
2631 for(i=1;i<=nCoeffs;i++) {
2632 int j= s->intra_scantable.permutated[i];
2637 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2638 level = (level - 1) | 1;
2641 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2642 level = (level - 1) | 1;
2649 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2650 int16_t *block, int n, int qscale)
2652 int i, level, nCoeffs;
2653 const uint16_t *quant_matrix;
2655 nCoeffs= s->block_last_index[n];
2657 quant_matrix = s->inter_matrix;
2658 for(i=0; i<=nCoeffs; i++) {
2659 int j= s->intra_scantable.permutated[i];
2664 level = (((level << 1) + 1) * qscale *
2665 ((int) (quant_matrix[j]))) >> 4;
2666 level = (level - 1) | 1;
2669 level = (((level << 1) + 1) * qscale *
2670 ((int) (quant_matrix[j]))) >> 4;
2671 level = (level - 1) | 1;
2678 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2679 int16_t *block, int n, int qscale)
2681 int i, level, nCoeffs;
2682 const uint16_t *quant_matrix;
2684 if(s->alternate_scan) nCoeffs= 63;
2685 else nCoeffs= s->block_last_index[n];
2688 block[0] = block[0] * s->y_dc_scale;
2690 block[0] = block[0] * s->c_dc_scale;
2691 quant_matrix = s->intra_matrix;
2692 for(i=1;i<=nCoeffs;i++) {
2693 int j= s->intra_scantable.permutated[i];
2698 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2701 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2708 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2709 int16_t *block, int n, int qscale)
2711 int i, level, nCoeffs;
2712 const uint16_t *quant_matrix;
2715 if(s->alternate_scan) nCoeffs= 63;
2716 else nCoeffs= s->block_last_index[n];
2719 block[0] = block[0] * s->y_dc_scale;
2721 block[0] = block[0] * s->c_dc_scale;
2722 quant_matrix = s->intra_matrix;
2723 for(i=1;i<=nCoeffs;i++) {
2724 int j= s->intra_scantable.permutated[i];
2729 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2732 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2741 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2742 int16_t *block, int n, int qscale)
2744 int i, level, nCoeffs;
2745 const uint16_t *quant_matrix;
2748 if(s->alternate_scan) nCoeffs= 63;
2749 else nCoeffs= s->block_last_index[n];
2751 quant_matrix = s->inter_matrix;
2752 for(i=0; i<=nCoeffs; i++) {
2753 int j= s->intra_scantable.permutated[i];
2758 level = (((level << 1) + 1) * qscale *
2759 ((int) (quant_matrix[j]))) >> 4;
2762 level = (((level << 1) + 1) * qscale *
2763 ((int) (quant_matrix[j]))) >> 4;
2772 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2773 int16_t *block, int n, int qscale)
2775 int i, level, qmul, qadd;
2778 assert(s->block_last_index[n]>=0);
2784 block[0] = block[0] * s->y_dc_scale;
2786 block[0] = block[0] * s->c_dc_scale;
2787 qadd = (qscale - 1) | 1;
2794 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2796 for(i=1; i<=nCoeffs; i++) {
2800 level = level * qmul - qadd;
2802 level = level * qmul + qadd;
2809 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2810 int16_t *block, int n, int qscale)
2812 int i, level, qmul, qadd;
2815 assert(s->block_last_index[n]>=0);
2817 qadd = (qscale - 1) | 1;
2820 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2822 for(i=0; i<=nCoeffs; i++) {
2826 level = level * qmul - qadd;
2828 level = level * qmul + qadd;
2836 * set qscale and update qscale dependent variables.
2838 void ff_set_qscale(MpegEncContext * s, int qscale)
2842 else if (qscale > 31)
2846 s->chroma_qscale= s->chroma_qscale_table[qscale];
2848 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2849 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2852 void ff_MPV_report_decode_progress(MpegEncContext *s)
2854 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2855 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);
2858 void ff_mpeg_er_frame_start(MpegEncContext *s)
2860 ERContext *er = &s->er;
2862 er->cur_pic = s->current_picture_ptr;
2863 er->last_pic = s->last_picture_ptr;
2864 er->next_pic = s->next_picture_ptr;
2866 er->pp_time = s->pp_time;
2867 er->pb_time = s->pb_time;
2868 er->quarter_sample = s->quarter_sample;
2869 er->partitioned_frame = s->partitioned_frame;
2871 ff_er_frame_start(er);