2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
38 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
61 /* enable all paranoid tests for rounding, overflows, etc... */
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
128 const enum AVPixelFormat ff_pixfmt_list_420[] = {
133 const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
134 AV_PIX_FMT_DXVA2_VLD,
135 AV_PIX_FMT_VAAPI_VLD,
142 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
144 uint32_t * restrict state)
152 for (i = 0; i < 3; i++) {
153 uint32_t tmp = *state << 8;
154 *state = tmp + *(p++);
155 if (tmp == 0x100 || p == end)
160 if (p[-1] > 1 ) p += 3;
161 else if (p[-2] ) p += 2;
162 else if (p[-3]|(p[-1]-1)) p++;
169 p = FFMIN(p, end) - 4;
175 /* init common dct for both encoder and decoder */
176 av_cold int ff_dct_common_init(MpegEncContext *s)
178 ff_dsputil_init(&s->dsp, s->avctx);
179 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
186 if (s->flags & CODEC_FLAG_BITEXACT)
187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
191 ff_MPV_common_init_x86(s);
193 ff_MPV_common_init_axp(s);
195 ff_MPV_common_init_arm(s);
197 ff_MPV_common_init_altivec(s);
199 ff_MPV_common_init_bfin(s);
202 /* load & permutate scantables
203 * note: only wmv uses different ones
205 if (s->alternate_scan) {
206 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
207 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
209 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
210 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
212 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
213 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
218 void ff_copy_picture(Picture *dst, Picture *src)
221 dst->f.type = FF_BUFFER_TYPE_COPY;
225 * Release a frame buffer
227 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
229 /* WM Image / Screen codecs allocate internal buffers with different
230 * dimensions / colorspaces; ignore user-defined callbacks for these. */
231 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
232 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
233 s->codec_id != AV_CODEC_ID_MSS2)
234 ff_thread_release_buffer(s->avctx, &pic->f);
236 avcodec_default_release_buffer(s->avctx, &pic->f);
237 av_freep(&pic->f.hwaccel_picture_private);
240 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
242 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
244 // edge emu needs blocksize + filter length - 1
245 // (= 17x17 for halfpel / 21x21 for h264)
246 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
247 // at uvlinesize. It supports only YUV420 so 24x24 is enough
248 // linesize * interlaced * MBsize
249 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
252 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
254 s->me.temp = s->me.scratchpad;
255 s->rd_scratchpad = s->me.scratchpad;
256 s->b_scratchpad = s->me.scratchpad;
257 s->obmc_scratchpad = s->me.scratchpad + 16;
261 av_freep(&s->edge_emu_buffer);
262 return AVERROR(ENOMEM);
266 * Allocate a frame buffer
268 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
272 if (s->avctx->hwaccel) {
273 assert(!pic->f.hwaccel_picture_private);
274 if (s->avctx->hwaccel->priv_data_size) {
275 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
276 if (!pic->f.hwaccel_picture_private) {
277 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
283 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
284 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
285 s->codec_id != AV_CODEC_ID_MSS2)
286 r = ff_thread_get_buffer(s->avctx, &pic->f);
288 r = avcodec_default_get_buffer(s->avctx, &pic->f);
290 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
291 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
292 r, pic->f.type, pic->f.data[0]);
293 av_freep(&pic->f.hwaccel_picture_private);
297 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
298 s->uvlinesize != pic->f.linesize[1])) {
299 av_log(s->avctx, AV_LOG_ERROR,
300 "get_buffer() failed (stride changed)\n");
301 free_frame_buffer(s, pic);
305 if (pic->f.linesize[1] != pic->f.linesize[2]) {
306 av_log(s->avctx, AV_LOG_ERROR,
307 "get_buffer() failed (uv stride mismatch)\n");
308 free_frame_buffer(s, pic);
312 if (!s->edge_emu_buffer &&
313 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
314 av_log(s->avctx, AV_LOG_ERROR,
315 "get_buffer() failed to allocate context scratch buffers.\n");
316 free_frame_buffer(s, pic);
324 * Allocate a Picture.
325 * The pixels are allocated/set by calling get_buffer() if shared = 0
327 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
329 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
331 // the + 1 is needed so memset(,,stride*height) does not sig11
333 const int mb_array_size = s->mb_stride * s->mb_height;
334 const int b8_array_size = s->b8_stride * s->mb_height * 2;
335 const int b4_array_size = s->b4_stride * s->mb_height * 4;
340 assert(pic->f.data[0]);
341 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
342 pic->f.type = FF_BUFFER_TYPE_SHARED;
344 assert(!pic->f.data[0]);
346 if (alloc_frame_buffer(s, pic) < 0)
349 s->linesize = pic->f.linesize[0];
350 s->uvlinesize = pic->f.linesize[1];
353 if (pic->f.qscale_table == NULL) {
355 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
356 mb_array_size * sizeof(int16_t), fail)
357 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
358 mb_array_size * sizeof(int16_t), fail)
359 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
360 mb_array_size * sizeof(int8_t ), fail)
363 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
364 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
365 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
366 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
368 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
369 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
371 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
372 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
373 if (s->out_format == FMT_H264) {
374 for (i = 0; i < 2; i++) {
375 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
376 2 * (b4_array_size + 4) * sizeof(int16_t),
378 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
379 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
380 4 * mb_array_size * sizeof(uint8_t), fail)
382 pic->f.motion_subsample_log2 = 2;
383 } else if (s->out_format == FMT_H263 || s->encoding ||
384 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
385 for (i = 0; i < 2; i++) {
386 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
387 2 * (b8_array_size + 4) * sizeof(int16_t),
389 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
390 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
391 4 * mb_array_size * sizeof(uint8_t), fail)
393 pic->f.motion_subsample_log2 = 3;
395 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
396 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
397 64 * mb_array_size * sizeof(int16_t) * 6, fail)
399 pic->f.qstride = s->mb_stride;
400 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
401 1 * sizeof(AVPanScan), fail)
407 fail: // for the FF_ALLOCZ_OR_GOTO macro
409 free_frame_buffer(s, pic);
414 * Deallocate a picture.
416 static void free_picture(MpegEncContext *s, Picture *pic)
420 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
421 free_frame_buffer(s, pic);
424 av_freep(&pic->mb_var);
425 av_freep(&pic->mc_mb_var);
426 av_freep(&pic->mb_mean);
427 av_freep(&pic->f.mbskip_table);
428 av_freep(&pic->qscale_table_base);
429 pic->f.qscale_table = NULL;
430 av_freep(&pic->mb_type_base);
431 pic->f.mb_type = NULL;
432 av_freep(&pic->f.dct_coeff);
433 av_freep(&pic->f.pan_scan);
434 pic->f.mb_type = NULL;
435 for (i = 0; i < 2; i++) {
436 av_freep(&pic->motion_val_base[i]);
437 av_freep(&pic->f.ref_index[i]);
438 pic->f.motion_val[i] = NULL;
441 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
442 for (i = 0; i < 4; i++) {
444 pic->f.data[i] = NULL;
450 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
452 int y_size = s->b8_stride * (2 * s->mb_height + 1);
453 int c_size = s->mb_stride * (s->mb_height + 1);
454 int yc_size = y_size + 2 * c_size;
462 s->obmc_scratchpad = NULL;
465 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
466 ME_MAP_SIZE * sizeof(uint32_t), fail)
467 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
468 ME_MAP_SIZE * sizeof(uint32_t), fail)
469 if (s->avctx->noise_reduction) {
470 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
471 2 * 64 * sizeof(int), fail)
474 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
475 s->block = s->blocks[0];
477 for (i = 0; i < 12; i++) {
478 s->pblocks[i] = &s->block[i];
481 if (s->out_format == FMT_H263) {
483 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
484 yc_size * sizeof(int16_t) * 16, fail);
485 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
486 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
487 s->ac_val[2] = s->ac_val[1] + c_size;
492 return -1; // free() through ff_MPV_common_end()
495 static void free_duplicate_context(MpegEncContext *s)
500 av_freep(&s->edge_emu_buffer);
501 av_freep(&s->me.scratchpad);
505 s->obmc_scratchpad = NULL;
507 av_freep(&s->dct_error_sum);
508 av_freep(&s->me.map);
509 av_freep(&s->me.score_map);
510 av_freep(&s->blocks);
511 av_freep(&s->ac_val_base);
515 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
517 #define COPY(a) bak->a = src->a
518 COPY(edge_emu_buffer);
523 COPY(obmc_scratchpad);
530 COPY(me.map_generation);
542 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
546 // FIXME copy only needed parts
548 backup_duplicate_context(&bak, dst);
549 memcpy(dst, src, sizeof(MpegEncContext));
550 backup_duplicate_context(dst, &bak);
551 for (i = 0; i < 12; i++) {
552 dst->pblocks[i] = &dst->block[i];
554 if (!dst->edge_emu_buffer &&
555 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
556 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
557 "scratch buffers.\n");
560 // STOP_TIMER("update_duplicate_context")
561 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
565 int ff_mpeg_update_thread_context(AVCodecContext *dst,
566 const AVCodecContext *src)
569 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
571 if (dst == src || !s1->context_initialized)
574 // FIXME can parameters change on I-frames?
575 // in that case dst may need a reinit
576 if (!s->context_initialized) {
577 memcpy(s, s1, sizeof(MpegEncContext));
580 s->picture_range_start += MAX_PICTURE_COUNT;
581 s->picture_range_end += MAX_PICTURE_COUNT;
582 s->bitstream_buffer = NULL;
583 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
585 ff_MPV_common_init(s);
588 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
590 s->context_reinit = 0;
591 s->height = s1->height;
592 s->width = s1->width;
593 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
597 s->avctx->coded_height = s1->avctx->coded_height;
598 s->avctx->coded_width = s1->avctx->coded_width;
599 s->avctx->width = s1->avctx->width;
600 s->avctx->height = s1->avctx->height;
602 s->coded_picture_number = s1->coded_picture_number;
603 s->picture_number = s1->picture_number;
604 s->input_picture_number = s1->input_picture_number;
606 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
607 memcpy(&s->last_picture, &s1->last_picture,
608 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
610 // reset s->picture[].f.extended_data to s->picture[].f.data
611 for (i = 0; i < s->picture_count; i++)
612 s->picture[i].f.extended_data = s->picture[i].f.data;
614 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
615 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
616 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
618 // Error/bug resilience
619 s->next_p_frame_damaged = s1->next_p_frame_damaged;
620 s->workaround_bugs = s1->workaround_bugs;
623 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
624 (char *) &s1->shape - (char *) &s1->time_increment_bits);
627 s->max_b_frames = s1->max_b_frames;
628 s->low_delay = s1->low_delay;
629 s->droppable = s1->droppable;
631 // DivX handling (doesn't work)
632 s->divx_packed = s1->divx_packed;
634 if (s1->bitstream_buffer) {
635 if (s1->bitstream_buffer_size +
636 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
637 av_fast_malloc(&s->bitstream_buffer,
638 &s->allocated_bitstream_buffer_size,
639 s1->allocated_bitstream_buffer_size);
640 s->bitstream_buffer_size = s1->bitstream_buffer_size;
641 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
642 s1->bitstream_buffer_size);
643 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
644 FF_INPUT_BUFFER_PADDING_SIZE);
647 // linesize dependend scratch buffer allocation
648 if (!s->edge_emu_buffer)
650 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
651 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
652 "scratch buffers.\n");
653 return AVERROR(ENOMEM);
656 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
657 "be allocated due to unknown size.\n");
661 // MPEG2/interlacing info
662 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
663 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
665 if (!s1->first_field) {
666 s->last_pict_type = s1->pict_type;
667 if (s1->current_picture_ptr)
668 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
670 if (s1->pict_type != AV_PICTURE_TYPE_B) {
671 s->last_non_b_pict_type = s1->pict_type;
679 * Set the given MpegEncContext to common defaults
680 * (same for encoding and decoding).
681 * The changed fields will not depend upon the
682 * prior state of the MpegEncContext.
684 void ff_MPV_common_defaults(MpegEncContext *s)
686 s->y_dc_scale_table =
687 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
688 s->chroma_qscale_table = ff_default_chroma_qscale_table;
689 s->progressive_frame = 1;
690 s->progressive_sequence = 1;
691 s->picture_structure = PICT_FRAME;
693 s->coded_picture_number = 0;
694 s->picture_number = 0;
695 s->input_picture_number = 0;
697 s->picture_in_gop_number = 0;
702 s->picture_range_start = 0;
703 s->picture_range_end = MAX_PICTURE_COUNT;
705 s->slice_context_count = 1;
709 * Set the given MpegEncContext to defaults for decoding.
710 * the changed fields will not depend upon
711 * the prior state of the MpegEncContext.
713 void ff_MPV_decode_defaults(MpegEncContext *s)
715 ff_MPV_common_defaults(s);
719 * Initialize and allocates MpegEncContext fields dependent on the resolution.
721 static int init_context_frame(MpegEncContext *s)
723 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
725 s->mb_width = (s->width + 15) / 16;
726 s->mb_stride = s->mb_width + 1;
727 s->b8_stride = s->mb_width * 2 + 1;
728 s->b4_stride = s->mb_width * 4 + 1;
729 mb_array_size = s->mb_height * s->mb_stride;
730 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
732 /* set default edge pos, will be overriden
733 * in decode_header if needed */
734 s->h_edge_pos = s->mb_width * 16;
735 s->v_edge_pos = s->mb_height * 16;
737 s->mb_num = s->mb_width * s->mb_height;
742 s->block_wrap[3] = s->b8_stride;
744 s->block_wrap[5] = s->mb_stride;
746 y_size = s->b8_stride * (2 * s->mb_height + 1);
747 c_size = s->mb_stride * (s->mb_height + 1);
748 yc_size = y_size + 2 * c_size;
750 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
751 fail); // error ressilience code looks cleaner with this
752 for (y = 0; y < s->mb_height; y++)
753 for (x = 0; x < s->mb_width; x++)
754 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
756 s->mb_index2xy[s->mb_height * s->mb_width] =
757 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
760 /* Allocate MV tables */
761 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
762 mv_table_size * 2 * sizeof(int16_t), fail);
763 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
764 mv_table_size * 2 * sizeof(int16_t), fail);
765 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
766 mv_table_size * 2 * sizeof(int16_t), fail);
767 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
768 mv_table_size * 2 * sizeof(int16_t), fail);
769 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
770 mv_table_size * 2 * sizeof(int16_t), fail);
771 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
772 mv_table_size * 2 * sizeof(int16_t), fail);
773 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
774 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
775 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
776 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
778 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
780 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
782 /* Allocate MB type table */
783 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
784 sizeof(uint16_t), fail); // needed for encoding
786 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
789 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
790 mb_array_size * sizeof(float), fail);
791 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
792 mb_array_size * sizeof(float), fail);
796 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
797 mb_array_size * sizeof(uint8_t), fail);
798 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
799 mb_array_size * sizeof(uint8_t), fail);
801 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
802 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
803 /* interlaced direct mode decoding tables */
804 for (i = 0; i < 2; i++) {
806 for (j = 0; j < 2; j++) {
807 for (k = 0; k < 2; k++) {
808 FF_ALLOCZ_OR_GOTO(s->avctx,
809 s->b_field_mv_table_base[i][j][k],
810 mv_table_size * 2 * sizeof(int16_t),
812 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
815 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
816 mb_array_size * 2 * sizeof(uint8_t), fail);
817 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
818 mv_table_size * 2 * sizeof(int16_t), fail);
819 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
822 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
823 mb_array_size * 2 * sizeof(uint8_t), fail);
826 if (s->out_format == FMT_H263) {
828 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
829 s->coded_block = s->coded_block_base + s->b8_stride + 1;
831 /* cbp, ac_pred, pred_dir */
832 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
833 mb_array_size * sizeof(uint8_t), fail);
834 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
835 mb_array_size * sizeof(uint8_t), fail);
838 if (s->h263_pred || s->h263_plus || !s->encoding) {
840 // MN: we need these for error resilience of intra-frames
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
842 yc_size * sizeof(int16_t), fail);
843 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
844 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
845 s->dc_val[2] = s->dc_val[1] + c_size;
846 for (i = 0; i < yc_size; i++)
847 s->dc_val_base[i] = 1024;
850 /* which mb is a intra block */
851 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
852 memset(s->mbintra_table, 1, mb_array_size);
854 /* init macroblock skip table */
855 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
856 // Note the + 1 is for a quicker mpeg4 slice_end detection
858 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
859 s->avctx->debug_mv) {
860 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
861 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
862 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
863 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
864 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
865 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
870 return AVERROR(ENOMEM);
874 * init common structure for both encoder and decoder.
875 * this assumes that some variables like width/height are already set
877 av_cold int ff_MPV_common_init(MpegEncContext *s)
880 int nb_slices = (HAVE_THREADS &&
881 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
882 s->avctx->thread_count : 1;
884 if (s->encoding && s->avctx->slices)
885 nb_slices = s->avctx->slices;
887 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
888 s->mb_height = (s->height + 31) / 32 * 2;
889 else if (s->codec_id != AV_CODEC_ID_H264)
890 s->mb_height = (s->height + 15) / 16;
892 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
893 av_log(s->avctx, AV_LOG_ERROR,
894 "decoding to AV_PIX_FMT_NONE is not supported.\n");
898 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
901 max_slices = FFMIN(MAX_THREADS, s->mb_height);
903 max_slices = MAX_THREADS;
904 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
905 " reducing to %d\n", nb_slices, max_slices);
906 nb_slices = max_slices;
909 if ((s->width || s->height) &&
910 av_image_check_size(s->width, s->height, 0, s->avctx))
913 ff_dct_common_init(s);
915 s->flags = s->avctx->flags;
916 s->flags2 = s->avctx->flags2;
918 if (s->width && s->height) {
919 /* set chroma shifts */
920 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
924 /* convert fourcc to upper case */
925 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
927 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
929 s->avctx->coded_frame = &s->current_picture.f;
932 if (s->msmpeg4_version) {
933 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
934 2 * 2 * (MAX_LEVEL + 1) *
935 (MAX_RUN + 1) * 2 * sizeof(int), fail);
937 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
939 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
940 64 * 32 * sizeof(int), fail);
941 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
942 64 * 32 * sizeof(int), fail);
943 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
944 64 * 32 * 2 * sizeof(uint16_t), fail);
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
946 64 * 32 * 2 * sizeof(uint16_t), fail);
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
948 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
950 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
952 if (s->avctx->noise_reduction) {
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
954 2 * 64 * sizeof(uint16_t), fail);
959 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
960 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
961 s->picture_count * sizeof(Picture), fail);
962 for (i = 0; i < s->picture_count; i++) {
963 avcodec_get_frame_defaults(&s->picture[i].f);
966 if (s->width && s->height) {
967 if (init_context_frame(s))
970 s->parse_context.state = -1;
973 s->context_initialized = 1;
974 s->thread_context[0] = s;
976 if (s->width && s->height) {
978 for (i = 1; i < nb_slices; i++) {
979 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
980 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
983 for (i = 0; i < nb_slices; i++) {
984 if (init_duplicate_context(s->thread_context[i], s) < 0)
986 s->thread_context[i]->start_mb_y =
987 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
988 s->thread_context[i]->end_mb_y =
989 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
992 if (init_duplicate_context(s, s) < 0)
995 s->end_mb_y = s->mb_height;
997 s->slice_context_count = nb_slices;
1002 ff_MPV_common_end(s);
1007 * Frees and resets MpegEncContext fields depending on the resolution.
1008 * Is used during resolution changes to avoid a full reinitialization of the
1011 static int free_context_frame(MpegEncContext *s)
1015 av_freep(&s->mb_type);
1016 av_freep(&s->p_mv_table_base);
1017 av_freep(&s->b_forw_mv_table_base);
1018 av_freep(&s->b_back_mv_table_base);
1019 av_freep(&s->b_bidir_forw_mv_table_base);
1020 av_freep(&s->b_bidir_back_mv_table_base);
1021 av_freep(&s->b_direct_mv_table_base);
1022 s->p_mv_table = NULL;
1023 s->b_forw_mv_table = NULL;
1024 s->b_back_mv_table = NULL;
1025 s->b_bidir_forw_mv_table = NULL;
1026 s->b_bidir_back_mv_table = NULL;
1027 s->b_direct_mv_table = NULL;
1028 for (i = 0; i < 2; i++) {
1029 for (j = 0; j < 2; j++) {
1030 for (k = 0; k < 2; k++) {
1031 av_freep(&s->b_field_mv_table_base[i][j][k]);
1032 s->b_field_mv_table[i][j][k] = NULL;
1034 av_freep(&s->b_field_select_table[i][j]);
1035 av_freep(&s->p_field_mv_table_base[i][j]);
1036 s->p_field_mv_table[i][j] = NULL;
1038 av_freep(&s->p_field_select_table[i]);
1041 av_freep(&s->dc_val_base);
1042 av_freep(&s->coded_block_base);
1043 av_freep(&s->mbintra_table);
1044 av_freep(&s->cbp_table);
1045 av_freep(&s->pred_dir_table);
1047 av_freep(&s->mbskip_table);
1049 av_freep(&s->error_status_table);
1050 av_freep(&s->er_temp_buffer);
1051 av_freep(&s->mb_index2xy);
1052 av_freep(&s->lambda_table);
1053 av_freep(&s->cplx_tab);
1054 av_freep(&s->bits_tab);
1056 s->linesize = s->uvlinesize = 0;
1058 for (i = 0; i < 3; i++)
1059 av_freep(&s->visualization_buffer[i]);
1064 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1068 if (s->slice_context_count > 1) {
1069 for (i = 0; i < s->slice_context_count; i++) {
1070 free_duplicate_context(s->thread_context[i]);
1072 for (i = 1; i < s->slice_context_count; i++) {
1073 av_freep(&s->thread_context[i]);
1076 free_duplicate_context(s);
1078 free_context_frame(s);
1081 for (i = 0; i < s->picture_count; i++) {
1082 s->picture[i].needs_realloc = 1;
1085 s->last_picture_ptr =
1086 s->next_picture_ptr =
1087 s->current_picture_ptr = NULL;
1090 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1091 s->mb_height = (s->height + 31) / 32 * 2;
1092 else if (s->codec_id != AV_CODEC_ID_H264)
1093 s->mb_height = (s->height + 15) / 16;
1095 if ((s->width || s->height) &&
1096 av_image_check_size(s->width, s->height, 0, s->avctx))
1097 return AVERROR_INVALIDDATA;
1099 if ((err = init_context_frame(s)))
1102 s->thread_context[0] = s;
1104 if (s->width && s->height) {
1105 int nb_slices = s->slice_context_count;
1106 if (nb_slices > 1) {
1107 for (i = 1; i < nb_slices; i++) {
1108 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1109 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1112 for (i = 0; i < nb_slices; i++) {
1113 if (init_duplicate_context(s->thread_context[i], s) < 0)
1115 s->thread_context[i]->start_mb_y =
1116 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1117 s->thread_context[i]->end_mb_y =
1118 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1121 if (init_duplicate_context(s, s) < 0)
1124 s->end_mb_y = s->mb_height;
1126 s->slice_context_count = nb_slices;
1131 ff_MPV_common_end(s);
1135 /* init common structure for both encoder and decoder */
1136 void ff_MPV_common_end(MpegEncContext *s)
1140 if (s->slice_context_count > 1) {
1141 for (i = 0; i < s->slice_context_count; i++) {
1142 free_duplicate_context(s->thread_context[i]);
1144 for (i = 1; i < s->slice_context_count; i++) {
1145 av_freep(&s->thread_context[i]);
1147 s->slice_context_count = 1;
1148 } else free_duplicate_context(s);
1150 av_freep(&s->parse_context.buffer);
1151 s->parse_context.buffer_size = 0;
1153 av_freep(&s->bitstream_buffer);
1154 s->allocated_bitstream_buffer_size = 0;
1156 av_freep(&s->avctx->stats_out);
1157 av_freep(&s->ac_stats);
1159 av_freep(&s->q_intra_matrix);
1160 av_freep(&s->q_inter_matrix);
1161 av_freep(&s->q_intra_matrix16);
1162 av_freep(&s->q_inter_matrix16);
1163 av_freep(&s->input_picture);
1164 av_freep(&s->reordered_input_picture);
1165 av_freep(&s->dct_offset);
1167 if (s->picture && !s->avctx->internal->is_copy) {
1168 for (i = 0; i < s->picture_count; i++) {
1169 free_picture(s, &s->picture[i]);
1172 av_freep(&s->picture);
1174 free_context_frame(s);
1176 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1177 avcodec_default_free_buffers(s->avctx);
1179 s->context_initialized = 0;
1180 s->last_picture_ptr =
1181 s->next_picture_ptr =
1182 s->current_picture_ptr = NULL;
1183 s->linesize = s->uvlinesize = 0;
1186 void ff_init_rl(RLTable *rl,
1187 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1189 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1190 uint8_t index_run[MAX_RUN + 1];
1191 int last, run, level, start, end, i;
1193 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1194 if (static_store && rl->max_level[0])
1197 /* compute max_level[], max_run[] and index_run[] */
1198 for (last = 0; last < 2; last++) {
1207 memset(max_level, 0, MAX_RUN + 1);
1208 memset(max_run, 0, MAX_LEVEL + 1);
1209 memset(index_run, rl->n, MAX_RUN + 1);
1210 for (i = start; i < end; i++) {
1211 run = rl->table_run[i];
1212 level = rl->table_level[i];
1213 if (index_run[run] == rl->n)
1215 if (level > max_level[run])
1216 max_level[run] = level;
1217 if (run > max_run[level])
1218 max_run[level] = run;
1221 rl->max_level[last] = static_store[last];
1223 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1224 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1226 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1228 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1229 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1231 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1233 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1234 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1238 void ff_init_vlc_rl(RLTable *rl)
1242 for (q = 0; q < 32; q++) {
1244 int qadd = (q - 1) | 1;
1250 for (i = 0; i < rl->vlc.table_size; i++) {
1251 int code = rl->vlc.table[i][0];
1252 int len = rl->vlc.table[i][1];
1255 if (len == 0) { // illegal code
1258 } else if (len < 0) { // more bits needed
1262 if (code == rl->n) { // esc
1266 run = rl->table_run[code] + 1;
1267 level = rl->table_level[code] * qmul + qadd;
1268 if (code >= rl->last) run += 192;
1271 rl->rl_vlc[q][i].len = len;
1272 rl->rl_vlc[q][i].level = level;
1273 rl->rl_vlc[q][i].run = run;
1278 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1282 /* release non reference frames */
1283 for (i = 0; i < s->picture_count; i++) {
1284 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1285 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1286 (remove_current || &s->picture[i] != s->current_picture_ptr)
1287 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1288 free_frame_buffer(s, &s->picture[i]);
1293 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1295 if (pic->f.data[0] == NULL)
1297 if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
1298 if (!pic->owner2 || pic->owner2 == s)
1303 static int find_unused_picture(MpegEncContext *s, int shared)
1308 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1309 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1313 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1314 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1317 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1318 if (pic_is_unused(s, &s->picture[i]))
1323 return AVERROR_INVALIDDATA;
1326 int ff_find_unused_picture(MpegEncContext *s, int shared)
1328 int ret = find_unused_picture(s, shared);
1330 if (ret >= 0 && ret < s->picture_range_end) {
1331 if (s->picture[ret].needs_realloc) {
1332 s->picture[ret].needs_realloc = 0;
1333 free_picture(s, &s->picture[ret]);
1334 avcodec_get_frame_defaults(&s->picture[ret].f);
1340 static void update_noise_reduction(MpegEncContext *s)
1344 for (intra = 0; intra < 2; intra++) {
1345 if (s->dct_count[intra] > (1 << 16)) {
1346 for (i = 0; i < 64; i++) {
1347 s->dct_error_sum[intra][i] >>= 1;
1349 s->dct_count[intra] >>= 1;
1352 for (i = 0; i < 64; i++) {
1353 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1354 s->dct_count[intra] +
1355 s->dct_error_sum[intra][i] / 2) /
1356 (s->dct_error_sum[intra][i] + 1);
1362 * generic function for encode/decode called after coding/decoding
1363 * the header and before a frame is coded/decoded.
1365 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1371 /* mark & release old frames */
1372 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1373 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1374 s->last_picture_ptr != s->next_picture_ptr &&
1375 s->last_picture_ptr->f.data[0]) {
1376 if (s->last_picture_ptr->owner2 == s)
1377 free_frame_buffer(s, s->last_picture_ptr);
1380 /* release forgotten pictures */
1381 /* if (mpeg124/h263) */
1383 for (i = 0; i < s->picture_count; i++) {
1384 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1385 &s->picture[i] != s->last_picture_ptr &&
1386 &s->picture[i] != s->next_picture_ptr &&
1387 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1388 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1389 av_log(avctx, AV_LOG_ERROR,
1390 "releasing zombie picture\n");
1391 free_frame_buffer(s, &s->picture[i]);
1398 ff_release_unused_pictures(s, 1);
1400 if (s->current_picture_ptr &&
1401 s->current_picture_ptr->f.data[0] == NULL) {
1402 // we already have a unused image
1403 // (maybe it was set before reading the header)
1404 pic = s->current_picture_ptr;
1406 i = ff_find_unused_picture(s, 0);
1408 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1411 pic = &s->picture[i];
1414 pic->f.reference = 0;
1415 if (!s->droppable) {
1416 if (s->codec_id == AV_CODEC_ID_H264)
1417 pic->f.reference = s->picture_structure;
1418 else if (s->pict_type != AV_PICTURE_TYPE_B)
1419 pic->f.reference = 3;
1422 pic->f.coded_picture_number = s->coded_picture_number++;
1424 if (ff_alloc_picture(s, pic, 0) < 0)
1427 s->current_picture_ptr = pic;
1428 // FIXME use only the vars from current_pic
1429 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1430 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1431 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1432 if (s->picture_structure != PICT_FRAME)
1433 s->current_picture_ptr->f.top_field_first =
1434 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1436 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1437 !s->progressive_sequence;
1438 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1441 s->current_picture_ptr->f.pict_type = s->pict_type;
1442 // if (s->flags && CODEC_FLAG_QSCALE)
1443 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1444 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1446 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1448 if (s->pict_type != AV_PICTURE_TYPE_B) {
1449 s->last_picture_ptr = s->next_picture_ptr;
1451 s->next_picture_ptr = s->current_picture_ptr;
1453 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1454 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1455 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1456 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1457 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1458 s->pict_type, s->droppable);
1460 if (s->codec_id != AV_CODEC_ID_H264) {
1461 if ((s->last_picture_ptr == NULL ||
1462 s->last_picture_ptr->f.data[0] == NULL) &&
1463 (s->pict_type != AV_PICTURE_TYPE_I ||
1464 s->picture_structure != PICT_FRAME)) {
1465 if (s->pict_type != AV_PICTURE_TYPE_I)
1466 av_log(avctx, AV_LOG_ERROR,
1467 "warning: first frame is no keyframe\n");
1468 else if (s->picture_structure != PICT_FRAME)
1469 av_log(avctx, AV_LOG_INFO,
1470 "allocate dummy last picture for field based first keyframe\n");
1472 /* Allocate a dummy frame */
1473 i = ff_find_unused_picture(s, 0);
1475 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1478 s->last_picture_ptr = &s->picture[i];
1479 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1480 s->last_picture_ptr = NULL;
1483 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1484 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1485 s->last_picture_ptr->f.reference = 3;
1487 if ((s->next_picture_ptr == NULL ||
1488 s->next_picture_ptr->f.data[0] == NULL) &&
1489 s->pict_type == AV_PICTURE_TYPE_B) {
1490 /* Allocate a dummy frame */
1491 i = ff_find_unused_picture(s, 0);
1493 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1496 s->next_picture_ptr = &s->picture[i];
1497 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1498 s->next_picture_ptr = NULL;
1501 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1502 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1503 s->next_picture_ptr->f.reference = 3;
1507 if (s->last_picture_ptr)
1508 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1509 if (s->next_picture_ptr)
1510 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1512 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1513 if (s->next_picture_ptr)
1514 s->next_picture_ptr->owner2 = s;
1515 if (s->last_picture_ptr)
1516 s->last_picture_ptr->owner2 = s;
1519 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1520 s->last_picture_ptr->f.data[0]));
1522 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1524 for (i = 0; i < 4; i++) {
1525 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1526 s->current_picture.f.data[i] +=
1527 s->current_picture.f.linesize[i];
1529 s->current_picture.f.linesize[i] *= 2;
1530 s->last_picture.f.linesize[i] *= 2;
1531 s->next_picture.f.linesize[i] *= 2;
1535 s->err_recognition = avctx->err_recognition;
1537 /* set dequantizer, we can't do it during init as
1538 * it might change for mpeg4 and we can't do it in the header
1539 * decode as init is not called for mpeg4 there yet */
1540 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1541 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1542 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1543 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1544 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1545 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1547 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1548 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1551 if (s->dct_error_sum) {
1552 assert(s->avctx->noise_reduction && s->encoding);
1553 update_noise_reduction(s);
1556 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1557 return ff_xvmc_field_start(s, avctx);
1562 /* generic function for encode/decode called after a
1563 * frame has been coded/decoded. */
1564 void ff_MPV_frame_end(MpegEncContext *s)
1567 /* redraw edges for the frame if decoding didn't complete */
1568 // just to make sure that all data is rendered.
1569 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1570 ff_xvmc_field_end(s);
1571 } else if ((s->error_count || s->encoding) &&
1572 !s->avctx->hwaccel &&
1573 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1574 s->unrestricted_mv &&
1575 s->current_picture.f.reference &&
1577 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1578 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1579 int hshift = desc->log2_chroma_w;
1580 int vshift = desc->log2_chroma_h;
1581 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1582 s->h_edge_pos, s->v_edge_pos,
1583 EDGE_WIDTH, EDGE_WIDTH,
1584 EDGE_TOP | EDGE_BOTTOM);
1585 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1586 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1587 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1588 EDGE_TOP | EDGE_BOTTOM);
1589 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1590 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1591 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1592 EDGE_TOP | EDGE_BOTTOM);
1597 s->last_pict_type = s->pict_type;
1598 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1599 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1600 s->last_non_b_pict_type = s->pict_type;
1603 /* copy back current_picture variables */
1604 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1605 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1606 s->picture[i] = s->current_picture;
1610 assert(i < MAX_PICTURE_COUNT);
1614 /* release non-reference frames */
1615 for (i = 0; i < s->picture_count; i++) {
1616 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1617 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1618 free_frame_buffer(s, &s->picture[i]);
1622 // clear copies, to avoid confusion
1624 memset(&s->last_picture, 0, sizeof(Picture));
1625 memset(&s->next_picture, 0, sizeof(Picture));
1626 memset(&s->current_picture, 0, sizeof(Picture));
1628 s->avctx->coded_frame = &s->current_picture_ptr->f;
1630 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1631 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1636 * Draw a line from (ex, ey) -> (sx, sy).
1637 * @param w width of the image
1638 * @param h height of the image
1639 * @param stride stride/linesize of the image
1640 * @param color color of the arrow
1642 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1643 int w, int h, int stride, int color)
1647 sx = av_clip(sx, 0, w - 1);
1648 sy = av_clip(sy, 0, h - 1);
1649 ex = av_clip(ex, 0, w - 1);
1650 ey = av_clip(ey, 0, h - 1);
1652 buf[sy * stride + sx] += color;
1654 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1656 FFSWAP(int, sx, ex);
1657 FFSWAP(int, sy, ey);
1659 buf += sx + sy * stride;
1661 f = ((ey - sy) << 16) / ex;
1662 for (x = 0; x <= ex; x++) {
1664 fr = (x * f) & 0xFFFF;
1665 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1666 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1670 FFSWAP(int, sx, ex);
1671 FFSWAP(int, sy, ey);
1673 buf += sx + sy * stride;
1676 f = ((ex - sx) << 16) / ey;
1679 for (y = 0; y = ey; y++) {
1681 fr = (y * f) & 0xFFFF;
1682 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1683 buf[y * stride + x + 1] += (color * fr ) >> 16;
1689 * Draw an arrow from (ex, ey) -> (sx, sy).
1690 * @param w width of the image
1691 * @param h height of the image
1692 * @param stride stride/linesize of the image
1693 * @param color color of the arrow
1695 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1696 int ey, int w, int h, int stride, int color)
1700 sx = av_clip(sx, -100, w + 100);
1701 sy = av_clip(sy, -100, h + 100);
1702 ex = av_clip(ex, -100, w + 100);
1703 ey = av_clip(ey, -100, h + 100);
1708 if (dx * dx + dy * dy > 3 * 3) {
1711 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1713 // FIXME subpixel accuracy
1714 rx = ROUNDED_DIV(rx * 3 << 4, length);
1715 ry = ROUNDED_DIV(ry * 3 << 4, length);
1717 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1718 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1720 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1724 * Print debugging info for the given picture.
1726 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1728 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1731 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1734 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1735 switch (pict->pict_type) {
1736 case AV_PICTURE_TYPE_I:
1737 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1739 case AV_PICTURE_TYPE_P:
1740 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1742 case AV_PICTURE_TYPE_B:
1743 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1745 case AV_PICTURE_TYPE_S:
1746 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1748 case AV_PICTURE_TYPE_SI:
1749 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1751 case AV_PICTURE_TYPE_SP:
1752 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1755 for (y = 0; y < s->mb_height; y++) {
1756 for (x = 0; x < s->mb_width; x++) {
1757 if (s->avctx->debug & FF_DEBUG_SKIP) {
1758 int count = s->mbskip_table[x + y * s->mb_stride];
1761 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1763 if (s->avctx->debug & FF_DEBUG_QP) {
1764 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1765 pict->qscale_table[x + y * s->mb_stride]);
1767 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1768 int mb_type = pict->mb_type[x + y * s->mb_stride];
1769 // Type & MV direction
1770 if (IS_PCM(mb_type))
1771 av_log(s->avctx, AV_LOG_DEBUG, "P");
1772 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1773 av_log(s->avctx, AV_LOG_DEBUG, "A");
1774 else if (IS_INTRA4x4(mb_type))
1775 av_log(s->avctx, AV_LOG_DEBUG, "i");
1776 else if (IS_INTRA16x16(mb_type))
1777 av_log(s->avctx, AV_LOG_DEBUG, "I");
1778 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1779 av_log(s->avctx, AV_LOG_DEBUG, "d");
1780 else if (IS_DIRECT(mb_type))
1781 av_log(s->avctx, AV_LOG_DEBUG, "D");
1782 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1783 av_log(s->avctx, AV_LOG_DEBUG, "g");
1784 else if (IS_GMC(mb_type))
1785 av_log(s->avctx, AV_LOG_DEBUG, "G");
1786 else if (IS_SKIP(mb_type))
1787 av_log(s->avctx, AV_LOG_DEBUG, "S");
1788 else if (!USES_LIST(mb_type, 1))
1789 av_log(s->avctx, AV_LOG_DEBUG, ">");
1790 else if (!USES_LIST(mb_type, 0))
1791 av_log(s->avctx, AV_LOG_DEBUG, "<");
1793 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1794 av_log(s->avctx, AV_LOG_DEBUG, "X");
1798 if (IS_8X8(mb_type))
1799 av_log(s->avctx, AV_LOG_DEBUG, "+");
1800 else if (IS_16X8(mb_type))
1801 av_log(s->avctx, AV_LOG_DEBUG, "-");
1802 else if (IS_8X16(mb_type))
1803 av_log(s->avctx, AV_LOG_DEBUG, "|");
1804 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1805 av_log(s->avctx, AV_LOG_DEBUG, " ");
1807 av_log(s->avctx, AV_LOG_DEBUG, "?");
1810 if (IS_INTERLACED(mb_type))
1811 av_log(s->avctx, AV_LOG_DEBUG, "=");
1813 av_log(s->avctx, AV_LOG_DEBUG, " ");
1816 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1820 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1821 (s->avctx->debug_mv)) {
1822 const int shift = 1 + s->quarter_sample;
1826 int h_chroma_shift, v_chroma_shift, block_height;
1827 const int width = s->avctx->width;
1828 const int height = s->avctx->height;
1829 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1830 const int mv_stride = (s->mb_width << mv_sample_log2) +
1831 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1832 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1834 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1835 &h_chroma_shift, &v_chroma_shift);
1836 for (i = 0; i < 3; i++) {
1837 memcpy(s->visualization_buffer[i], pict->data[i],
1838 (i == 0) ? pict->linesize[i] * height:
1839 pict->linesize[i] * height >> v_chroma_shift);
1840 pict->data[i] = s->visualization_buffer[i];
1842 pict->type = FF_BUFFER_TYPE_COPY;
1843 ptr = pict->data[0];
1844 block_height = 16 >> v_chroma_shift;
1846 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1848 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1849 const int mb_index = mb_x + mb_y * s->mb_stride;
1850 if ((s->avctx->debug_mv) && pict->motion_val) {
1852 for (type = 0; type < 3; type++) {
1856 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1857 (pict->pict_type!= AV_PICTURE_TYPE_P))
1862 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1863 (pict->pict_type!= AV_PICTURE_TYPE_B))
1868 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1869 (pict->pict_type!= AV_PICTURE_TYPE_B))
1874 if (!USES_LIST(pict->mb_type[mb_index], direction))
1877 if (IS_8X8(pict->mb_type[mb_index])) {
1879 for (i = 0; i < 4; i++) {
1880 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1881 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1882 int xy = (mb_x * 2 + (i & 1) +
1883 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1884 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1885 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1886 draw_arrow(ptr, sx, sy, mx, my, width,
1887 height, s->linesize, 100);
1889 } else if (IS_16X8(pict->mb_type[mb_index])) {
1891 for (i = 0; i < 2; i++) {
1892 int sx = mb_x * 16 + 8;
1893 int sy = mb_y * 16 + 4 + 8 * i;
1894 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1895 int mx = (pict->motion_val[direction][xy][0] >> shift);
1896 int my = (pict->motion_val[direction][xy][1] >> shift);
1898 if (IS_INTERLACED(pict->mb_type[mb_index]))
1901 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1902 height, s->linesize, 100);
1904 } else if (IS_8X16(pict->mb_type[mb_index])) {
1906 for (i = 0; i < 2; i++) {
1907 int sx = mb_x * 16 + 4 + 8 * i;
1908 int sy = mb_y * 16 + 8;
1909 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1910 int mx = pict->motion_val[direction][xy][0] >> shift;
1911 int my = pict->motion_val[direction][xy][1] >> shift;
1913 if (IS_INTERLACED(pict->mb_type[mb_index]))
1916 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1917 height, s->linesize, 100);
1920 int sx = mb_x * 16 + 8;
1921 int sy = mb_y * 16 + 8;
1922 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1923 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1924 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1925 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1929 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1930 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1931 0x0101010101010101ULL;
1933 for (y = 0; y < block_height; y++) {
1934 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1935 (block_height * mb_y + y) *
1936 pict->linesize[1]) = c;
1937 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1938 (block_height * mb_y + y) *
1939 pict->linesize[2]) = c;
1942 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1944 int mb_type = pict->mb_type[mb_index];
1947 #define COLOR(theta, r) \
1948 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1949 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1953 if (IS_PCM(mb_type)) {
1955 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1956 IS_INTRA16x16(mb_type)) {
1958 } else if (IS_INTRA4x4(mb_type)) {
1960 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1962 } else if (IS_DIRECT(mb_type)) {
1964 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1966 } else if (IS_GMC(mb_type)) {
1968 } else if (IS_SKIP(mb_type)) {
1970 } else if (!USES_LIST(mb_type, 1)) {
1972 } else if (!USES_LIST(mb_type, 0)) {
1975 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1979 u *= 0x0101010101010101ULL;
1980 v *= 0x0101010101010101ULL;
1981 for (y = 0; y < block_height; y++) {
1982 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1983 (block_height * mb_y + y) * pict->linesize[1]) = u;
1984 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1985 (block_height * mb_y + y) * pict->linesize[2]) = v;
1989 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1990 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1991 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1992 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1993 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1995 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1996 for (y = 0; y < 16; y++)
1997 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1998 pict->linesize[0]] ^= 0x80;
2000 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2001 int dm = 1 << (mv_sample_log2 - 2);
2002 for (i = 0; i < 4; i++) {
2003 int sx = mb_x * 16 + 8 * (i & 1);
2004 int sy = mb_y * 16 + 8 * (i >> 1);
2005 int xy = (mb_x * 2 + (i & 1) +
2006 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2008 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
2009 if (mv[0] != mv[dm] ||
2010 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2011 for (y = 0; y < 8; y++)
2012 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2013 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2014 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2015 pict->linesize[0]) ^= 0x8080808080808080ULL;
2019 if (IS_INTERLACED(mb_type) &&
2020 s->codec_id == AV_CODEC_ID_H264) {
2024 s->mbskip_table[mb_index] = 0;
2031 * find the lowest MB row referenced in the MVs
2033 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2035 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2036 int my, off, i, mvs;
2038 if (s->picture_structure != PICT_FRAME || s->mcsel)
2041 switch (s->mv_type) {
2055 for (i = 0; i < mvs; i++) {
2056 my = s->mv[dir][i][1]<<qpel_shift;
2057 my_max = FFMAX(my_max, my);
2058 my_min = FFMIN(my_min, my);
2061 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2063 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2065 return s->mb_height-1;
2068 /* put block[] to dest[] */
2069 static inline void put_dct(MpegEncContext *s,
2070 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2072 s->dct_unquantize_intra(s, block, i, qscale);
2073 s->dsp.idct_put (dest, line_size, block);
2076 /* add block[] to dest[] */
2077 static inline void add_dct(MpegEncContext *s,
2078 int16_t *block, int i, uint8_t *dest, int line_size)
2080 if (s->block_last_index[i] >= 0) {
2081 s->dsp.idct_add (dest, line_size, block);
2085 static inline void add_dequant_dct(MpegEncContext *s,
2086 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2088 if (s->block_last_index[i] >= 0) {
2089 s->dct_unquantize_inter(s, block, i, qscale);
2091 s->dsp.idct_add (dest, line_size, block);
2096 * Clean dc, ac, coded_block for the current non-intra MB.
2098 void ff_clean_intra_table_entries(MpegEncContext *s)
2100 int wrap = s->b8_stride;
2101 int xy = s->block_index[0];
2104 s->dc_val[0][xy + 1 ] =
2105 s->dc_val[0][xy + wrap] =
2106 s->dc_val[0][xy + 1 + wrap] = 1024;
2108 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2109 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2110 if (s->msmpeg4_version>=3) {
2111 s->coded_block[xy ] =
2112 s->coded_block[xy + 1 ] =
2113 s->coded_block[xy + wrap] =
2114 s->coded_block[xy + 1 + wrap] = 0;
2117 wrap = s->mb_stride;
2118 xy = s->mb_x + s->mb_y * wrap;
2120 s->dc_val[2][xy] = 1024;
2122 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2123 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2125 s->mbintra_table[xy]= 0;
2128 /* generic function called after a macroblock has been parsed by the
2129 decoder or after it has been encoded by the encoder.
2131 Important variables used:
2132 s->mb_intra : true if intra macroblock
2133 s->mv_dir : motion vector direction
2134 s->mv_type : motion vector type
2135 s->mv : motion vector
2136 s->interlaced_dct : true if interlaced dct used (mpeg2)
2138 static av_always_inline
2139 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2142 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2143 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2144 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2148 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2149 /* save DCT coefficients */
2151 int16_t *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2152 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2154 for(j=0; j<64; j++){
2155 *dct++ = block[i][s->dsp.idct_permutation[j]];
2156 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2158 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2162 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2164 /* update DC predictors for P macroblocks */
2166 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2167 if(s->mbintra_table[mb_xy])
2168 ff_clean_intra_table_entries(s);
2172 s->last_dc[2] = 128 << s->intra_dc_precision;
2175 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2176 s->mbintra_table[mb_xy]=1;
2178 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2179 uint8_t *dest_y, *dest_cb, *dest_cr;
2180 int dct_linesize, dct_offset;
2181 op_pixels_func (*op_pix)[4];
2182 qpel_mc_func (*op_qpix)[16];
2183 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2184 const int uvlinesize = s->current_picture.f.linesize[1];
2185 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2186 const int block_size = 8;
2188 /* avoid copy if macroblock skipped in last frame too */
2189 /* skip only during decoding as we might trash the buffers during encoding a bit */
2191 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2193 if (s->mb_skipped) {
2195 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2197 } else if(!s->current_picture.f.reference) {
2200 *mbskip_ptr = 0; /* not skipped */
2204 dct_linesize = linesize << s->interlaced_dct;
2205 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2209 dest_cb= s->dest[1];
2210 dest_cr= s->dest[2];
2212 dest_y = s->b_scratchpad;
2213 dest_cb= s->b_scratchpad+16*linesize;
2214 dest_cr= s->b_scratchpad+32*linesize;
2218 /* motion handling */
2219 /* decoding or more than one mb_type (MC was already done otherwise) */
2222 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2223 if (s->mv_dir & MV_DIR_FORWARD) {
2224 ff_thread_await_progress(&s->last_picture_ptr->f,
2225 ff_MPV_lowest_referenced_row(s, 0),
2228 if (s->mv_dir & MV_DIR_BACKWARD) {
2229 ff_thread_await_progress(&s->next_picture_ptr->f,
2230 ff_MPV_lowest_referenced_row(s, 1),
2235 op_qpix= s->me.qpel_put;
2236 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2237 op_pix = s->dsp.put_pixels_tab;
2239 op_pix = s->dsp.put_no_rnd_pixels_tab;
2241 if (s->mv_dir & MV_DIR_FORWARD) {
2242 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2243 op_pix = s->dsp.avg_pixels_tab;
2244 op_qpix= s->me.qpel_avg;
2246 if (s->mv_dir & MV_DIR_BACKWARD) {
2247 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2251 /* skip dequant / idct if we are really late ;) */
2252 if(s->avctx->skip_idct){
2253 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2254 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2255 || s->avctx->skip_idct >= AVDISCARD_ALL)
2259 /* add dct residue */
2260 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2261 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2262 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2263 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2264 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2265 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2267 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2268 if (s->chroma_y_shift){
2269 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2270 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2274 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2275 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2276 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2277 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2280 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2281 add_dct(s, block[0], 0, dest_y , dct_linesize);
2282 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2283 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2284 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2286 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2287 if(s->chroma_y_shift){//Chroma420
2288 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2289 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2292 dct_linesize = uvlinesize << s->interlaced_dct;
2293 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2295 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2296 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2297 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2298 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2299 if(!s->chroma_x_shift){//Chroma444
2300 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2301 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2302 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2303 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2308 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2309 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2312 /* dct only in intra block */
2313 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2314 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2315 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2316 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2317 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2319 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2320 if(s->chroma_y_shift){
2321 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2322 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2326 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2327 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2328 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2329 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2333 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2334 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2335 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2336 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2338 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2339 if(s->chroma_y_shift){
2340 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2341 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2344 dct_linesize = uvlinesize << s->interlaced_dct;
2345 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2347 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2348 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2349 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2350 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2351 if(!s->chroma_x_shift){//Chroma444
2352 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2353 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2354 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2355 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2363 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2364 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2365 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2370 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2372 if(s->out_format == FMT_MPEG1) {
2373 MPV_decode_mb_internal(s, block, 1);
2376 MPV_decode_mb_internal(s, block, 0);
2380 * @param h is the normal height, this will be reduced automatically if needed for the last row
2382 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2383 const int field_pic= s->picture_structure != PICT_FRAME;
2389 if (!s->avctx->hwaccel
2390 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2391 && s->unrestricted_mv
2392 && s->current_picture.f.reference
2394 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2395 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
2396 int sides = 0, edge_h;
2397 int hshift = desc->log2_chroma_w;
2398 int vshift = desc->log2_chroma_h;
2399 if (y==0) sides |= EDGE_TOP;
2400 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2402 edge_h= FFMIN(h, s->v_edge_pos - y);
2404 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2405 s->linesize, s->h_edge_pos, edge_h,
2406 EDGE_WIDTH, EDGE_WIDTH, sides);
2407 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2408 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2409 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2410 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2411 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2412 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2415 h= FFMIN(h, s->avctx->height - y);
2417 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2419 if (s->avctx->draw_horiz_band) {
2421 int offset[AV_NUM_DATA_POINTERS];
2424 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2425 src = &s->current_picture_ptr->f;
2426 else if(s->last_picture_ptr)
2427 src = &s->last_picture_ptr->f;
2431 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2432 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2435 offset[0]= y * s->linesize;
2437 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2438 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2444 s->avctx->draw_horiz_band(s->avctx, src, offset,
2445 y, s->picture_structure, h);
2449 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2450 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2451 const int uvlinesize = s->current_picture.f.linesize[1];
2452 const int mb_size= 4;
2454 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2455 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2456 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2457 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2458 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2459 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2460 //block_index is not used by mpeg2, so it is not affected by chroma_format
2462 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2463 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2464 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2466 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2468 if(s->picture_structure==PICT_FRAME){
2469 s->dest[0] += s->mb_y * linesize << mb_size;
2470 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2471 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2473 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2474 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2475 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2476 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2481 void ff_mpeg_flush(AVCodecContext *avctx){
2483 MpegEncContext *s = avctx->priv_data;
2485 if(s==NULL || s->picture==NULL)
2488 for(i=0; i<s->picture_count; i++){
2489 if (s->picture[i].f.data[0] &&
2490 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2491 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2492 free_frame_buffer(s, &s->picture[i]);
2494 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2496 s->mb_x= s->mb_y= 0;
2498 s->parse_context.state= -1;
2499 s->parse_context.frame_start_found= 0;
2500 s->parse_context.overread= 0;
2501 s->parse_context.overread_index= 0;
2502 s->parse_context.index= 0;
2503 s->parse_context.last_index= 0;
2504 s->bitstream_buffer_size=0;
2508 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2509 int16_t *block, int n, int qscale)
2511 int i, level, nCoeffs;
2512 const uint16_t *quant_matrix;
2514 nCoeffs= s->block_last_index[n];
2517 block[0] = block[0] * s->y_dc_scale;
2519 block[0] = block[0] * s->c_dc_scale;
2520 /* XXX: only mpeg1 */
2521 quant_matrix = s->intra_matrix;
2522 for(i=1;i<=nCoeffs;i++) {
2523 int j= s->intra_scantable.permutated[i];
2528 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2529 level = (level - 1) | 1;
2532 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2533 level = (level - 1) | 1;
2540 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2541 int16_t *block, int n, int qscale)
2543 int i, level, nCoeffs;
2544 const uint16_t *quant_matrix;
2546 nCoeffs= s->block_last_index[n];
2548 quant_matrix = s->inter_matrix;
2549 for(i=0; i<=nCoeffs; i++) {
2550 int j= s->intra_scantable.permutated[i];
2555 level = (((level << 1) + 1) * qscale *
2556 ((int) (quant_matrix[j]))) >> 4;
2557 level = (level - 1) | 1;
2560 level = (((level << 1) + 1) * qscale *
2561 ((int) (quant_matrix[j]))) >> 4;
2562 level = (level - 1) | 1;
2569 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2570 int16_t *block, int n, int qscale)
2572 int i, level, nCoeffs;
2573 const uint16_t *quant_matrix;
2575 if(s->alternate_scan) nCoeffs= 63;
2576 else nCoeffs= s->block_last_index[n];
2579 block[0] = block[0] * s->y_dc_scale;
2581 block[0] = block[0] * s->c_dc_scale;
2582 quant_matrix = s->intra_matrix;
2583 for(i=1;i<=nCoeffs;i++) {
2584 int j= s->intra_scantable.permutated[i];
2589 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2592 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2599 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2600 int16_t *block, int n, int qscale)
2602 int i, level, nCoeffs;
2603 const uint16_t *quant_matrix;
2606 if(s->alternate_scan) nCoeffs= 63;
2607 else nCoeffs= s->block_last_index[n];
2610 block[0] = block[0] * s->y_dc_scale;
2612 block[0] = block[0] * s->c_dc_scale;
2613 quant_matrix = s->intra_matrix;
2614 for(i=1;i<=nCoeffs;i++) {
2615 int j= s->intra_scantable.permutated[i];
2620 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2623 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2632 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2633 int16_t *block, int n, int qscale)
2635 int i, level, nCoeffs;
2636 const uint16_t *quant_matrix;
2639 if(s->alternate_scan) nCoeffs= 63;
2640 else nCoeffs= s->block_last_index[n];
2642 quant_matrix = s->inter_matrix;
2643 for(i=0; i<=nCoeffs; i++) {
2644 int j= s->intra_scantable.permutated[i];
2649 level = (((level << 1) + 1) * qscale *
2650 ((int) (quant_matrix[j]))) >> 4;
2653 level = (((level << 1) + 1) * qscale *
2654 ((int) (quant_matrix[j]))) >> 4;
2663 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2664 int16_t *block, int n, int qscale)
2666 int i, level, qmul, qadd;
2669 assert(s->block_last_index[n]>=0);
2675 block[0] = block[0] * s->y_dc_scale;
2677 block[0] = block[0] * s->c_dc_scale;
2678 qadd = (qscale - 1) | 1;
2685 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2687 for(i=1; i<=nCoeffs; i++) {
2691 level = level * qmul - qadd;
2693 level = level * qmul + qadd;
2700 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2701 int16_t *block, int n, int qscale)
2703 int i, level, qmul, qadd;
2706 assert(s->block_last_index[n]>=0);
2708 qadd = (qscale - 1) | 1;
2711 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2713 for(i=0; i<=nCoeffs; i++) {
2717 level = level * qmul - qadd;
2719 level = level * qmul + qadd;
2727 * set qscale and update qscale dependent variables.
2729 void ff_set_qscale(MpegEncContext * s, int qscale)
2733 else if (qscale > 31)
2737 s->chroma_qscale= s->chroma_qscale_table[qscale];
2739 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2740 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2743 void ff_MPV_report_decode_progress(MpegEncContext *s)
2745 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2746 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);