2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
38 #include "xvmc_internal.h"
45 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
46 int16_t *block, int n, int qscale);
47 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
48 int16_t *block, int n, int qscale);
49 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
50 int16_t *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
52 int16_t *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
54 int16_t *block, int n, int qscale);
55 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
56 int16_t *block, int n, int qscale);
57 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
58 int16_t *block, int n, int qscale);
61 /* enable all paranoid tests for rounding, overflows, etc... */
67 static const uint8_t ff_default_chroma_qscale_table[32] = {
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
73 const uint8_t ff_mpeg1_dc_scale_table[128] = {
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
122 ff_mpeg1_dc_scale_table,
123 mpeg2_dc_scale_table1,
124 mpeg2_dc_scale_table2,
125 mpeg2_dc_scale_table3,
128 const enum AVPixelFormat ff_pixfmt_list_420[] = {
133 const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
135 AV_PIX_FMT_DXVA2_VLD,
138 AV_PIX_FMT_VAAPI_VLD,
150 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
152 uint32_t * restrict state)
160 for (i = 0; i < 3; i++) {
161 uint32_t tmp = *state << 8;
162 *state = tmp + *(p++);
163 if (tmp == 0x100 || p == end)
168 if (p[-1] > 1 ) p += 3;
169 else if (p[-2] ) p += 2;
170 else if (p[-3]|(p[-1]-1)) p++;
177 p = FFMIN(p, end) - 4;
183 /* init common dct for both encoder and decoder */
184 av_cold int ff_dct_common_init(MpegEncContext *s)
186 ff_dsputil_init(&s->dsp, s->avctx);
187 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
189 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
190 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
191 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
192 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
193 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
194 if (s->flags & CODEC_FLAG_BITEXACT)
195 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
196 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
199 ff_MPV_common_init_x86(s);
201 ff_MPV_common_init_axp(s);
203 ff_MPV_common_init_arm(s);
205 ff_MPV_common_init_altivec(s);
207 ff_MPV_common_init_bfin(s);
210 /* load & permutate scantables
211 * note: only wmv uses different ones
213 if (s->alternate_scan) {
214 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
215 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
217 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
218 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
220 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
221 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
226 void ff_copy_picture(Picture *dst, Picture *src)
229 dst->f.type = FF_BUFFER_TYPE_COPY;
233 * Release a frame buffer
235 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
237 /* WM Image / Screen codecs allocate internal buffers with different
238 * dimensions / colorspaces; ignore user-defined callbacks for these. */
239 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
240 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
241 s->codec_id != AV_CODEC_ID_MSS2)
242 ff_thread_release_buffer(s->avctx, &pic->f);
244 avcodec_default_release_buffer(s->avctx, &pic->f);
245 av_freep(&pic->f.hwaccel_picture_private);
248 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
250 int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
252 // edge emu needs blocksize + filter length - 1
253 // (= 17x17 for halfpel / 21x21 for h264)
254 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
255 // at uvlinesize. It supports only YUV420 so 24x24 is enough
256 // linesize * interlaced * MBsize
257 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
260 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
262 s->me.temp = s->me.scratchpad;
263 s->rd_scratchpad = s->me.scratchpad;
264 s->b_scratchpad = s->me.scratchpad;
265 s->obmc_scratchpad = s->me.scratchpad + 16;
269 av_freep(&s->edge_emu_buffer);
270 return AVERROR(ENOMEM);
274 * Allocate a frame buffer
276 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
280 if (s->avctx->hwaccel) {
281 assert(!pic->f.hwaccel_picture_private);
282 if (s->avctx->hwaccel->priv_data_size) {
283 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
284 if (!pic->f.hwaccel_picture_private) {
285 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
291 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
292 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
293 s->codec_id != AV_CODEC_ID_MSS2)
294 r = ff_thread_get_buffer(s->avctx, &pic->f);
296 r = avcodec_default_get_buffer(s->avctx, &pic->f);
298 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
299 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
300 r, pic->f.type, pic->f.data[0]);
301 av_freep(&pic->f.hwaccel_picture_private);
305 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
306 s->uvlinesize != pic->f.linesize[1])) {
307 av_log(s->avctx, AV_LOG_ERROR,
308 "get_buffer() failed (stride changed)\n");
309 free_frame_buffer(s, pic);
313 if (pic->f.linesize[1] != pic->f.linesize[2]) {
314 av_log(s->avctx, AV_LOG_ERROR,
315 "get_buffer() failed (uv stride mismatch)\n");
316 free_frame_buffer(s, pic);
320 if (!s->edge_emu_buffer &&
321 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
322 av_log(s->avctx, AV_LOG_ERROR,
323 "get_buffer() failed to allocate context scratch buffers.\n");
324 free_frame_buffer(s, pic);
332 * Allocate a Picture.
333 * The pixels are allocated/set by calling get_buffer() if shared = 0
335 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
337 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
339 // the + 1 is needed so memset(,,stride*height) does not sig11
341 const int mb_array_size = s->mb_stride * s->mb_height;
342 const int b8_array_size = s->b8_stride * s->mb_height * 2;
343 const int b4_array_size = s->b4_stride * s->mb_height * 4;
348 assert(pic->f.data[0]);
349 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
350 pic->f.type = FF_BUFFER_TYPE_SHARED;
352 assert(!pic->f.data[0]);
354 if (alloc_frame_buffer(s, pic) < 0)
357 s->linesize = pic->f.linesize[0];
358 s->uvlinesize = pic->f.linesize[1];
361 if (pic->f.qscale_table == NULL) {
363 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
364 mb_array_size * sizeof(int16_t), fail)
365 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
366 mb_array_size * sizeof(int16_t), fail)
367 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
368 mb_array_size * sizeof(int8_t ), fail)
371 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
372 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
373 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
374 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
376 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
377 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
379 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
380 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
381 if (s->out_format == FMT_H264) {
382 for (i = 0; i < 2; i++) {
383 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
384 2 * (b4_array_size + 4) * sizeof(int16_t),
386 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
387 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
388 4 * mb_array_size * sizeof(uint8_t), fail)
390 pic->f.motion_subsample_log2 = 2;
391 } else if (s->out_format == FMT_H263 || s->encoding ||
392 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
393 for (i = 0; i < 2; i++) {
394 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
395 2 * (b8_array_size + 4) * sizeof(int16_t),
397 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
398 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
399 4 * mb_array_size * sizeof(uint8_t), fail)
401 pic->f.motion_subsample_log2 = 3;
403 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
404 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
405 64 * mb_array_size * sizeof(int16_t) * 6, fail)
407 pic->f.qstride = s->mb_stride;
408 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
409 1 * sizeof(AVPanScan), fail)
415 fail: // for the FF_ALLOCZ_OR_GOTO macro
417 free_frame_buffer(s, pic);
422 * Deallocate a picture.
424 static void free_picture(MpegEncContext *s, Picture *pic)
428 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
429 free_frame_buffer(s, pic);
432 av_freep(&pic->mb_var);
433 av_freep(&pic->mc_mb_var);
434 av_freep(&pic->mb_mean);
435 av_freep(&pic->f.mbskip_table);
436 av_freep(&pic->qscale_table_base);
437 pic->f.qscale_table = NULL;
438 av_freep(&pic->mb_type_base);
439 pic->f.mb_type = NULL;
440 av_freep(&pic->f.dct_coeff);
441 av_freep(&pic->f.pan_scan);
442 pic->f.mb_type = NULL;
443 for (i = 0; i < 2; i++) {
444 av_freep(&pic->motion_val_base[i]);
445 av_freep(&pic->f.ref_index[i]);
446 pic->f.motion_val[i] = NULL;
449 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
450 for (i = 0; i < 4; i++) {
452 pic->f.data[i] = NULL;
458 static int init_duplicate_context(MpegEncContext *s)
460 int y_size = s->b8_stride * (2 * s->mb_height + 1);
461 int c_size = s->mb_stride * (s->mb_height + 1);
462 int yc_size = y_size + 2 * c_size;
470 s->obmc_scratchpad = NULL;
473 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
474 ME_MAP_SIZE * sizeof(uint32_t), fail)
475 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
476 ME_MAP_SIZE * sizeof(uint32_t), fail)
477 if (s->avctx->noise_reduction) {
478 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
479 2 * 64 * sizeof(int), fail)
482 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
483 s->block = s->blocks[0];
485 for (i = 0; i < 12; i++) {
486 s->pblocks[i] = &s->block[i];
489 if (s->out_format == FMT_H263) {
491 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
492 yc_size * sizeof(int16_t) * 16, fail);
493 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
494 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
495 s->ac_val[2] = s->ac_val[1] + c_size;
500 return -1; // free() through ff_MPV_common_end()
503 static void free_duplicate_context(MpegEncContext *s)
508 av_freep(&s->edge_emu_buffer);
509 av_freep(&s->me.scratchpad);
513 s->obmc_scratchpad = NULL;
515 av_freep(&s->dct_error_sum);
516 av_freep(&s->me.map);
517 av_freep(&s->me.score_map);
518 av_freep(&s->blocks);
519 av_freep(&s->ac_val_base);
523 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
525 #define COPY(a) bak->a = src->a
526 COPY(edge_emu_buffer);
531 COPY(obmc_scratchpad);
538 COPY(me.map_generation);
550 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
554 // FIXME copy only needed parts
556 backup_duplicate_context(&bak, dst);
557 memcpy(dst, src, sizeof(MpegEncContext));
558 backup_duplicate_context(dst, &bak);
559 for (i = 0; i < 12; i++) {
560 dst->pblocks[i] = &dst->block[i];
562 if (!dst->edge_emu_buffer &&
563 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
564 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
565 "scratch buffers.\n");
568 // STOP_TIMER("update_duplicate_context")
569 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
573 int ff_mpeg_update_thread_context(AVCodecContext *dst,
574 const AVCodecContext *src)
577 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
579 if (dst == src || !s1->context_initialized)
582 // FIXME can parameters change on I-frames?
583 // in that case dst may need a reinit
584 if (!s->context_initialized) {
585 memcpy(s, s1, sizeof(MpegEncContext));
588 s->picture_range_start += MAX_PICTURE_COUNT;
589 s->picture_range_end += MAX_PICTURE_COUNT;
590 s->bitstream_buffer = NULL;
591 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
593 ff_MPV_common_init(s);
596 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
598 s->context_reinit = 0;
599 s->height = s1->height;
600 s->width = s1->width;
601 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
605 s->avctx->coded_height = s1->avctx->coded_height;
606 s->avctx->coded_width = s1->avctx->coded_width;
607 s->avctx->width = s1->avctx->width;
608 s->avctx->height = s1->avctx->height;
610 s->coded_picture_number = s1->coded_picture_number;
611 s->picture_number = s1->picture_number;
612 s->input_picture_number = s1->input_picture_number;
614 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
615 memcpy(&s->last_picture, &s1->last_picture,
616 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
618 // reset s->picture[].f.extended_data to s->picture[].f.data
619 for (i = 0; i < s->picture_count; i++)
620 s->picture[i].f.extended_data = s->picture[i].f.data;
622 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
623 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
624 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
626 // Error/bug resilience
627 s->next_p_frame_damaged = s1->next_p_frame_damaged;
628 s->workaround_bugs = s1->workaround_bugs;
631 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
632 (char *) &s1->shape - (char *) &s1->time_increment_bits);
635 s->max_b_frames = s1->max_b_frames;
636 s->low_delay = s1->low_delay;
637 s->droppable = s1->droppable;
639 // DivX handling (doesn't work)
640 s->divx_packed = s1->divx_packed;
642 if (s1->bitstream_buffer) {
643 if (s1->bitstream_buffer_size +
644 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
645 av_fast_malloc(&s->bitstream_buffer,
646 &s->allocated_bitstream_buffer_size,
647 s1->allocated_bitstream_buffer_size);
648 s->bitstream_buffer_size = s1->bitstream_buffer_size;
649 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
650 s1->bitstream_buffer_size);
651 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
652 FF_INPUT_BUFFER_PADDING_SIZE);
655 // linesize dependend scratch buffer allocation
656 if (!s->edge_emu_buffer)
658 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
659 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
660 "scratch buffers.\n");
661 return AVERROR(ENOMEM);
664 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
665 "be allocated due to unknown size.\n");
669 // MPEG2/interlacing info
670 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
671 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
673 if (!s1->first_field) {
674 s->last_pict_type = s1->pict_type;
675 if (s1->current_picture_ptr)
676 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
678 if (s1->pict_type != AV_PICTURE_TYPE_B) {
679 s->last_non_b_pict_type = s1->pict_type;
687 * Set the given MpegEncContext to common defaults
688 * (same for encoding and decoding).
689 * The changed fields will not depend upon the
690 * prior state of the MpegEncContext.
692 void ff_MPV_common_defaults(MpegEncContext *s)
694 s->y_dc_scale_table =
695 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
696 s->chroma_qscale_table = ff_default_chroma_qscale_table;
697 s->progressive_frame = 1;
698 s->progressive_sequence = 1;
699 s->picture_structure = PICT_FRAME;
701 s->coded_picture_number = 0;
702 s->picture_number = 0;
703 s->input_picture_number = 0;
705 s->picture_in_gop_number = 0;
710 s->picture_range_start = 0;
711 s->picture_range_end = MAX_PICTURE_COUNT;
713 s->slice_context_count = 1;
717 * Set the given MpegEncContext to defaults for decoding.
718 * the changed fields will not depend upon
719 * the prior state of the MpegEncContext.
721 void ff_MPV_decode_defaults(MpegEncContext *s)
723 ff_MPV_common_defaults(s);
727 * Initialize and allocates MpegEncContext fields dependent on the resolution.
729 static int init_context_frame(MpegEncContext *s)
731 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
733 s->mb_width = (s->width + 15) / 16;
734 s->mb_stride = s->mb_width + 1;
735 s->b8_stride = s->mb_width * 2 + 1;
736 s->b4_stride = s->mb_width * 4 + 1;
737 mb_array_size = s->mb_height * s->mb_stride;
738 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
740 /* set default edge pos, will be overriden
741 * in decode_header if needed */
742 s->h_edge_pos = s->mb_width * 16;
743 s->v_edge_pos = s->mb_height * 16;
745 s->mb_num = s->mb_width * s->mb_height;
750 s->block_wrap[3] = s->b8_stride;
752 s->block_wrap[5] = s->mb_stride;
754 y_size = s->b8_stride * (2 * s->mb_height + 1);
755 c_size = s->mb_stride * (s->mb_height + 1);
756 yc_size = y_size + 2 * c_size;
758 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
759 fail); // error ressilience code looks cleaner with this
760 for (y = 0; y < s->mb_height; y++)
761 for (x = 0; x < s->mb_width; x++)
762 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
764 s->mb_index2xy[s->mb_height * s->mb_width] =
765 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
768 /* Allocate MV tables */
769 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
770 mv_table_size * 2 * sizeof(int16_t), fail);
771 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
772 mv_table_size * 2 * sizeof(int16_t), fail);
773 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
774 mv_table_size * 2 * sizeof(int16_t), fail);
775 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
776 mv_table_size * 2 * sizeof(int16_t), fail);
777 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
778 mv_table_size * 2 * sizeof(int16_t), fail);
779 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
780 mv_table_size * 2 * sizeof(int16_t), fail);
781 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
782 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
783 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
784 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
786 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
788 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
790 /* Allocate MB type table */
791 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
792 sizeof(uint16_t), fail); // needed for encoding
794 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
797 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
798 mb_array_size * sizeof(float), fail);
799 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
800 mb_array_size * sizeof(float), fail);
804 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
805 mb_array_size * sizeof(uint8_t), fail);
806 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
807 mb_array_size * sizeof(uint8_t), fail);
809 if (s->codec_id == AV_CODEC_ID_MPEG4 ||
810 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
811 /* interlaced direct mode decoding tables */
812 for (i = 0; i < 2; i++) {
814 for (j = 0; j < 2; j++) {
815 for (k = 0; k < 2; k++) {
816 FF_ALLOCZ_OR_GOTO(s->avctx,
817 s->b_field_mv_table_base[i][j][k],
818 mv_table_size * 2 * sizeof(int16_t),
820 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
823 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
824 mb_array_size * 2 * sizeof(uint8_t), fail);
825 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
826 mv_table_size * 2 * sizeof(int16_t), fail);
827 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
830 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
831 mb_array_size * 2 * sizeof(uint8_t), fail);
834 if (s->out_format == FMT_H263) {
836 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
837 s->coded_block = s->coded_block_base + s->b8_stride + 1;
839 /* cbp, ac_pred, pred_dir */
840 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
841 mb_array_size * sizeof(uint8_t), fail);
842 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
843 mb_array_size * sizeof(uint8_t), fail);
846 if (s->h263_pred || s->h263_plus || !s->encoding) {
848 // MN: we need these for error resilience of intra-frames
849 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
850 yc_size * sizeof(int16_t), fail);
851 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
852 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
853 s->dc_val[2] = s->dc_val[1] + c_size;
854 for (i = 0; i < yc_size; i++)
855 s->dc_val_base[i] = 1024;
858 /* which mb is a intra block */
859 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
860 memset(s->mbintra_table, 1, mb_array_size);
862 /* init macroblock skip table */
863 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
864 // Note the + 1 is for a quicker mpeg4 slice_end detection
866 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
867 s->avctx->debug_mv) {
868 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
869 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
870 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
871 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
872 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
873 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
878 return AVERROR(ENOMEM);
882 * init common structure for both encoder and decoder.
883 * this assumes that some variables like width/height are already set
885 av_cold int ff_MPV_common_init(MpegEncContext *s)
888 int nb_slices = (HAVE_THREADS &&
889 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
890 s->avctx->thread_count : 1;
892 if (s->encoding && s->avctx->slices)
893 nb_slices = s->avctx->slices;
895 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
896 s->mb_height = (s->height + 31) / 32 * 2;
897 else if (s->codec_id != AV_CODEC_ID_H264)
898 s->mb_height = (s->height + 15) / 16;
900 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
901 av_log(s->avctx, AV_LOG_ERROR,
902 "decoding to AV_PIX_FMT_NONE is not supported.\n");
906 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
909 max_slices = FFMIN(MAX_THREADS, s->mb_height);
911 max_slices = MAX_THREADS;
912 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
913 " reducing to %d\n", nb_slices, max_slices);
914 nb_slices = max_slices;
917 if ((s->width || s->height) &&
918 av_image_check_size(s->width, s->height, 0, s->avctx))
921 ff_dct_common_init(s);
923 s->flags = s->avctx->flags;
924 s->flags2 = s->avctx->flags2;
926 if (s->width && s->height) {
927 /* set chroma shifts */
928 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
932 /* convert fourcc to upper case */
933 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
935 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
937 s->avctx->coded_frame = &s->current_picture.f;
940 if (s->msmpeg4_version) {
941 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
942 2 * 2 * (MAX_LEVEL + 1) *
943 (MAX_RUN + 1) * 2 * sizeof(int), fail);
945 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
947 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
948 64 * 32 * sizeof(int), fail);
949 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
950 64 * 32 * sizeof(int), fail);
951 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
952 64 * 32 * 2 * sizeof(uint16_t), fail);
953 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
954 64 * 32 * 2 * sizeof(uint16_t), fail);
955 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
956 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
957 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
958 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
960 if (s->avctx->noise_reduction) {
961 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
962 2 * 64 * sizeof(uint16_t), fail);
967 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
968 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
969 s->picture_count * sizeof(Picture), fail);
970 for (i = 0; i < s->picture_count; i++) {
971 avcodec_get_frame_defaults(&s->picture[i].f);
974 if (s->width && s->height) {
975 if (init_context_frame(s))
978 s->parse_context.state = -1;
981 s->context_initialized = 1;
982 s->thread_context[0] = s;
984 if (s->width && s->height) {
986 for (i = 1; i < nb_slices; i++) {
987 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
988 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
991 for (i = 0; i < nb_slices; i++) {
992 if (init_duplicate_context(s->thread_context[i]) < 0)
994 s->thread_context[i]->start_mb_y =
995 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
996 s->thread_context[i]->end_mb_y =
997 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1000 if (init_duplicate_context(s) < 0)
1003 s->end_mb_y = s->mb_height;
1005 s->slice_context_count = nb_slices;
1010 ff_MPV_common_end(s);
1015 * Frees and resets MpegEncContext fields depending on the resolution.
1016 * Is used during resolution changes to avoid a full reinitialization of the
1019 static int free_context_frame(MpegEncContext *s)
1023 av_freep(&s->mb_type);
1024 av_freep(&s->p_mv_table_base);
1025 av_freep(&s->b_forw_mv_table_base);
1026 av_freep(&s->b_back_mv_table_base);
1027 av_freep(&s->b_bidir_forw_mv_table_base);
1028 av_freep(&s->b_bidir_back_mv_table_base);
1029 av_freep(&s->b_direct_mv_table_base);
1030 s->p_mv_table = NULL;
1031 s->b_forw_mv_table = NULL;
1032 s->b_back_mv_table = NULL;
1033 s->b_bidir_forw_mv_table = NULL;
1034 s->b_bidir_back_mv_table = NULL;
1035 s->b_direct_mv_table = NULL;
1036 for (i = 0; i < 2; i++) {
1037 for (j = 0; j < 2; j++) {
1038 for (k = 0; k < 2; k++) {
1039 av_freep(&s->b_field_mv_table_base[i][j][k]);
1040 s->b_field_mv_table[i][j][k] = NULL;
1042 av_freep(&s->b_field_select_table[i][j]);
1043 av_freep(&s->p_field_mv_table_base[i][j]);
1044 s->p_field_mv_table[i][j] = NULL;
1046 av_freep(&s->p_field_select_table[i]);
1049 av_freep(&s->dc_val_base);
1050 av_freep(&s->coded_block_base);
1051 av_freep(&s->mbintra_table);
1052 av_freep(&s->cbp_table);
1053 av_freep(&s->pred_dir_table);
1055 av_freep(&s->mbskip_table);
1057 av_freep(&s->error_status_table);
1058 av_freep(&s->er_temp_buffer);
1059 av_freep(&s->mb_index2xy);
1060 av_freep(&s->lambda_table);
1061 av_freep(&s->cplx_tab);
1062 av_freep(&s->bits_tab);
1064 s->linesize = s->uvlinesize = 0;
1066 for (i = 0; i < 3; i++)
1067 av_freep(&s->visualization_buffer[i]);
1072 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1076 if (s->slice_context_count > 1) {
1077 for (i = 0; i < s->slice_context_count; i++) {
1078 free_duplicate_context(s->thread_context[i]);
1080 for (i = 1; i < s->slice_context_count; i++) {
1081 av_freep(&s->thread_context[i]);
1084 free_duplicate_context(s);
1086 free_context_frame(s);
1089 for (i = 0; i < s->picture_count; i++) {
1090 s->picture[i].needs_realloc = 1;
1093 s->last_picture_ptr =
1094 s->next_picture_ptr =
1095 s->current_picture_ptr = NULL;
1098 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1099 s->mb_height = (s->height + 31) / 32 * 2;
1100 else if (s->codec_id != AV_CODEC_ID_H264)
1101 s->mb_height = (s->height + 15) / 16;
1103 if ((s->width || s->height) &&
1104 av_image_check_size(s->width, s->height, 0, s->avctx))
1105 return AVERROR_INVALIDDATA;
1107 if ((err = init_context_frame(s)))
1110 s->thread_context[0] = s;
1112 if (s->width && s->height) {
1113 int nb_slices = s->slice_context_count;
1114 if (nb_slices > 1) {
1115 for (i = 1; i < nb_slices; i++) {
1116 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1117 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1120 for (i = 0; i < nb_slices; i++) {
1121 if (init_duplicate_context(s->thread_context[i]) < 0)
1123 s->thread_context[i]->start_mb_y =
1124 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1125 s->thread_context[i]->end_mb_y =
1126 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1129 if (init_duplicate_context(s) < 0)
1132 s->end_mb_y = s->mb_height;
1134 s->slice_context_count = nb_slices;
1139 ff_MPV_common_end(s);
1143 /* init common structure for both encoder and decoder */
1144 void ff_MPV_common_end(MpegEncContext *s)
1148 if (s->slice_context_count > 1) {
1149 for (i = 0; i < s->slice_context_count; i++) {
1150 free_duplicate_context(s->thread_context[i]);
1152 for (i = 1; i < s->slice_context_count; i++) {
1153 av_freep(&s->thread_context[i]);
1155 s->slice_context_count = 1;
1156 } else free_duplicate_context(s);
1158 av_freep(&s->parse_context.buffer);
1159 s->parse_context.buffer_size = 0;
1161 av_freep(&s->bitstream_buffer);
1162 s->allocated_bitstream_buffer_size = 0;
1164 av_freep(&s->avctx->stats_out);
1165 av_freep(&s->ac_stats);
1167 av_freep(&s->q_intra_matrix);
1168 av_freep(&s->q_inter_matrix);
1169 av_freep(&s->q_intra_matrix16);
1170 av_freep(&s->q_inter_matrix16);
1171 av_freep(&s->input_picture);
1172 av_freep(&s->reordered_input_picture);
1173 av_freep(&s->dct_offset);
1175 if (s->picture && !s->avctx->internal->is_copy) {
1176 for (i = 0; i < s->picture_count; i++) {
1177 free_picture(s, &s->picture[i]);
1180 av_freep(&s->picture);
1182 free_context_frame(s);
1184 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1185 avcodec_default_free_buffers(s->avctx);
1187 s->context_initialized = 0;
1188 s->last_picture_ptr =
1189 s->next_picture_ptr =
1190 s->current_picture_ptr = NULL;
1191 s->linesize = s->uvlinesize = 0;
1194 void ff_init_rl(RLTable *rl,
1195 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1197 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1198 uint8_t index_run[MAX_RUN + 1];
1199 int last, run, level, start, end, i;
1201 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1202 if (static_store && rl->max_level[0])
1205 /* compute max_level[], max_run[] and index_run[] */
1206 for (last = 0; last < 2; last++) {
1215 memset(max_level, 0, MAX_RUN + 1);
1216 memset(max_run, 0, MAX_LEVEL + 1);
1217 memset(index_run, rl->n, MAX_RUN + 1);
1218 for (i = start; i < end; i++) {
1219 run = rl->table_run[i];
1220 level = rl->table_level[i];
1221 if (index_run[run] == rl->n)
1223 if (level > max_level[run])
1224 max_level[run] = level;
1225 if (run > max_run[level])
1226 max_run[level] = run;
1229 rl->max_level[last] = static_store[last];
1231 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1232 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1234 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1236 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1237 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1239 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1241 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1242 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1246 void ff_init_vlc_rl(RLTable *rl)
1250 for (q = 0; q < 32; q++) {
1252 int qadd = (q - 1) | 1;
1258 for (i = 0; i < rl->vlc.table_size; i++) {
1259 int code = rl->vlc.table[i][0];
1260 int len = rl->vlc.table[i][1];
1263 if (len == 0) { // illegal code
1266 } else if (len < 0) { // more bits needed
1270 if (code == rl->n) { // esc
1274 run = rl->table_run[code] + 1;
1275 level = rl->table_level[code] * qmul + qadd;
1276 if (code >= rl->last) run += 192;
1279 rl->rl_vlc[q][i].len = len;
1280 rl->rl_vlc[q][i].level = level;
1281 rl->rl_vlc[q][i].run = run;
1286 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1290 /* release non reference frames */
1291 for (i = 0; i < s->picture_count; i++) {
1292 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1293 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1294 (remove_current || &s->picture[i] != s->current_picture_ptr)
1295 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1296 free_frame_buffer(s, &s->picture[i]);
1301 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1303 if (pic->f.data[0] == NULL)
1305 if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
1306 if (!pic->owner2 || pic->owner2 == s)
1311 static int find_unused_picture(MpegEncContext *s, int shared)
1316 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1317 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1321 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1322 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1325 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1326 if (pic_is_unused(s, &s->picture[i]))
1331 return AVERROR_INVALIDDATA;
1334 int ff_find_unused_picture(MpegEncContext *s, int shared)
1336 int ret = find_unused_picture(s, shared);
1338 if (ret >= 0 && ret < s->picture_range_end) {
1339 if (s->picture[ret].needs_realloc) {
1340 s->picture[ret].needs_realloc = 0;
1341 free_picture(s, &s->picture[ret]);
1342 avcodec_get_frame_defaults(&s->picture[ret].f);
1348 static void update_noise_reduction(MpegEncContext *s)
1352 for (intra = 0; intra < 2; intra++) {
1353 if (s->dct_count[intra] > (1 << 16)) {
1354 for (i = 0; i < 64; i++) {
1355 s->dct_error_sum[intra][i] >>= 1;
1357 s->dct_count[intra] >>= 1;
1360 for (i = 0; i < 64; i++) {
1361 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1362 s->dct_count[intra] +
1363 s->dct_error_sum[intra][i] / 2) /
1364 (s->dct_error_sum[intra][i] + 1);
1370 * generic function for encode/decode called after coding/decoding
1371 * the header and before a frame is coded/decoded.
1373 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1379 /* mark & release old frames */
1380 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1381 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1382 s->last_picture_ptr != s->next_picture_ptr &&
1383 s->last_picture_ptr->f.data[0]) {
1384 if (s->last_picture_ptr->owner2 == s)
1385 free_frame_buffer(s, s->last_picture_ptr);
1388 /* release forgotten pictures */
1389 /* if (mpeg124/h263) */
1391 for (i = 0; i < s->picture_count; i++) {
1392 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1393 &s->picture[i] != s->last_picture_ptr &&
1394 &s->picture[i] != s->next_picture_ptr &&
1395 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1396 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1397 av_log(avctx, AV_LOG_ERROR,
1398 "releasing zombie picture\n");
1399 free_frame_buffer(s, &s->picture[i]);
1406 ff_release_unused_pictures(s, 1);
1408 if (s->current_picture_ptr &&
1409 s->current_picture_ptr->f.data[0] == NULL) {
1410 // we already have a unused image
1411 // (maybe it was set before reading the header)
1412 pic = s->current_picture_ptr;
1414 i = ff_find_unused_picture(s, 0);
1416 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1419 pic = &s->picture[i];
1422 pic->f.reference = 0;
1423 if (!s->droppable) {
1424 if (s->codec_id == AV_CODEC_ID_H264)
1425 pic->f.reference = s->picture_structure;
1426 else if (s->pict_type != AV_PICTURE_TYPE_B)
1427 pic->f.reference = 3;
1430 pic->f.coded_picture_number = s->coded_picture_number++;
1432 if (ff_alloc_picture(s, pic, 0) < 0)
1435 s->current_picture_ptr = pic;
1436 // FIXME use only the vars from current_pic
1437 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1438 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1439 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1440 if (s->picture_structure != PICT_FRAME)
1441 s->current_picture_ptr->f.top_field_first =
1442 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1444 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1445 !s->progressive_sequence;
1446 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1449 s->current_picture_ptr->f.pict_type = s->pict_type;
1450 // if (s->flags && CODEC_FLAG_QSCALE)
1451 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1452 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1454 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1456 if (s->pict_type != AV_PICTURE_TYPE_B) {
1457 s->last_picture_ptr = s->next_picture_ptr;
1459 s->next_picture_ptr = s->current_picture_ptr;
1461 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1462 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1463 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1464 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1465 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1466 s->pict_type, s->droppable);
1468 if (s->codec_id != AV_CODEC_ID_H264) {
1469 if ((s->last_picture_ptr == NULL ||
1470 s->last_picture_ptr->f.data[0] == NULL) &&
1471 (s->pict_type != AV_PICTURE_TYPE_I ||
1472 s->picture_structure != PICT_FRAME)) {
1473 int h_chroma_shift, v_chroma_shift;
1474 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1475 &h_chroma_shift, &v_chroma_shift);
1476 if (s->pict_type != AV_PICTURE_TYPE_I)
1477 av_log(avctx, AV_LOG_ERROR,
1478 "warning: first frame is no keyframe\n");
1479 else if (s->picture_structure != PICT_FRAME)
1480 av_log(avctx, AV_LOG_INFO,
1481 "allocate dummy last picture for field based first keyframe\n");
1483 /* Allocate a dummy frame */
1484 i = ff_find_unused_picture(s, 0);
1486 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1489 s->last_picture_ptr = &s->picture[i];
1490 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1491 s->last_picture_ptr = NULL;
1495 memset(s->last_picture_ptr->f.data[0], 0,
1496 avctx->height * s->last_picture_ptr->f.linesize[0]);
1497 memset(s->last_picture_ptr->f.data[1], 0x80,
1498 (avctx->height >> v_chroma_shift) *
1499 s->last_picture_ptr->f.linesize[1]);
1500 memset(s->last_picture_ptr->f.data[2], 0x80,
1501 (avctx->height >> v_chroma_shift) *
1502 s->last_picture_ptr->f.linesize[2]);
1504 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1505 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1506 s->last_picture_ptr->f.reference = 3;
1508 if ((s->next_picture_ptr == NULL ||
1509 s->next_picture_ptr->f.data[0] == NULL) &&
1510 s->pict_type == AV_PICTURE_TYPE_B) {
1511 /* Allocate a dummy frame */
1512 i = ff_find_unused_picture(s, 0);
1514 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1517 s->next_picture_ptr = &s->picture[i];
1518 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1519 s->next_picture_ptr = NULL;
1522 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1523 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1524 s->next_picture_ptr->f.reference = 3;
1528 if (s->last_picture_ptr)
1529 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1530 if (s->next_picture_ptr)
1531 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1533 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1534 if (s->next_picture_ptr)
1535 s->next_picture_ptr->owner2 = s;
1536 if (s->last_picture_ptr)
1537 s->last_picture_ptr->owner2 = s;
1540 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1541 s->last_picture_ptr->f.data[0]));
1543 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1545 for (i = 0; i < 4; i++) {
1546 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1547 s->current_picture.f.data[i] +=
1548 s->current_picture.f.linesize[i];
1550 s->current_picture.f.linesize[i] *= 2;
1551 s->last_picture.f.linesize[i] *= 2;
1552 s->next_picture.f.linesize[i] *= 2;
1556 s->err_recognition = avctx->err_recognition;
1558 /* set dequantizer, we can't do it during init as
1559 * it might change for mpeg4 and we can't do it in the header
1560 * decode as init is not called for mpeg4 there yet */
1561 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1562 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1563 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1564 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1565 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1566 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1568 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1569 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1572 if (s->dct_error_sum) {
1573 assert(s->avctx->noise_reduction && s->encoding);
1574 update_noise_reduction(s);
1577 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1578 return ff_xvmc_field_start(s, avctx);
1583 /* generic function for encode/decode called after a
1584 * frame has been coded/decoded. */
1585 void ff_MPV_frame_end(MpegEncContext *s)
1588 /* redraw edges for the frame if decoding didn't complete */
1589 // just to make sure that all data is rendered.
1590 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1591 ff_xvmc_field_end(s);
1592 } else if ((s->error_count || s->encoding) &&
1593 !s->avctx->hwaccel &&
1594 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1595 s->unrestricted_mv &&
1596 s->current_picture.f.reference &&
1598 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1599 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1600 int hshift = desc->log2_chroma_w;
1601 int vshift = desc->log2_chroma_h;
1602 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1603 s->h_edge_pos, s->v_edge_pos,
1604 EDGE_WIDTH, EDGE_WIDTH,
1605 EDGE_TOP | EDGE_BOTTOM);
1606 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1607 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1608 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1609 EDGE_TOP | EDGE_BOTTOM);
1610 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1611 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1612 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1613 EDGE_TOP | EDGE_BOTTOM);
1618 s->last_pict_type = s->pict_type;
1619 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1620 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1621 s->last_non_b_pict_type = s->pict_type;
1624 /* copy back current_picture variables */
1625 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1626 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1627 s->picture[i] = s->current_picture;
1631 assert(i < MAX_PICTURE_COUNT);
1635 /* release non-reference frames */
1636 for (i = 0; i < s->picture_count; i++) {
1637 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1638 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1639 free_frame_buffer(s, &s->picture[i]);
1643 // clear copies, to avoid confusion
1645 memset(&s->last_picture, 0, sizeof(Picture));
1646 memset(&s->next_picture, 0, sizeof(Picture));
1647 memset(&s->current_picture, 0, sizeof(Picture));
1649 s->avctx->coded_frame = &s->current_picture_ptr->f;
1651 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1652 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1657 * Draw a line from (ex, ey) -> (sx, sy).
1658 * @param w width of the image
1659 * @param h height of the image
1660 * @param stride stride/linesize of the image
1661 * @param color color of the arrow
1663 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1664 int w, int h, int stride, int color)
1668 sx = av_clip(sx, 0, w - 1);
1669 sy = av_clip(sy, 0, h - 1);
1670 ex = av_clip(ex, 0, w - 1);
1671 ey = av_clip(ey, 0, h - 1);
1673 buf[sy * stride + sx] += color;
1675 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1677 FFSWAP(int, sx, ex);
1678 FFSWAP(int, sy, ey);
1680 buf += sx + sy * stride;
1682 f = ((ey - sy) << 16) / ex;
1683 for (x = 0; x <= ex; x++) {
1685 fr = (x * f) & 0xFFFF;
1686 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1687 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1691 FFSWAP(int, sx, ex);
1692 FFSWAP(int, sy, ey);
1694 buf += sx + sy * stride;
1697 f = ((ex - sx) << 16) / ey;
1700 for (y = 0; y = ey; y++) {
1702 fr = (y * f) & 0xFFFF;
1703 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1704 buf[y * stride + x + 1] += (color * fr ) >> 16;
1710 * Draw an arrow from (ex, ey) -> (sx, sy).
1711 * @param w width of the image
1712 * @param h height of the image
1713 * @param stride stride/linesize of the image
1714 * @param color color of the arrow
1716 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1717 int ey, int w, int h, int stride, int color)
1721 sx = av_clip(sx, -100, w + 100);
1722 sy = av_clip(sy, -100, h + 100);
1723 ex = av_clip(ex, -100, w + 100);
1724 ey = av_clip(ey, -100, h + 100);
1729 if (dx * dx + dy * dy > 3 * 3) {
1732 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1734 // FIXME subpixel accuracy
1735 rx = ROUNDED_DIV(rx * 3 << 4, length);
1736 ry = ROUNDED_DIV(ry * 3 << 4, length);
1738 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1739 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1741 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1745 * Print debugging info for the given picture.
1747 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1749 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1752 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1755 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1756 switch (pict->pict_type) {
1757 case AV_PICTURE_TYPE_I:
1758 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1760 case AV_PICTURE_TYPE_P:
1761 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1763 case AV_PICTURE_TYPE_B:
1764 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1766 case AV_PICTURE_TYPE_S:
1767 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1769 case AV_PICTURE_TYPE_SI:
1770 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1772 case AV_PICTURE_TYPE_SP:
1773 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1776 for (y = 0; y < s->mb_height; y++) {
1777 for (x = 0; x < s->mb_width; x++) {
1778 if (s->avctx->debug & FF_DEBUG_SKIP) {
1779 int count = s->mbskip_table[x + y * s->mb_stride];
1782 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1784 if (s->avctx->debug & FF_DEBUG_QP) {
1785 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1786 pict->qscale_table[x + y * s->mb_stride]);
1788 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1789 int mb_type = pict->mb_type[x + y * s->mb_stride];
1790 // Type & MV direction
1791 if (IS_PCM(mb_type))
1792 av_log(s->avctx, AV_LOG_DEBUG, "P");
1793 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1794 av_log(s->avctx, AV_LOG_DEBUG, "A");
1795 else if (IS_INTRA4x4(mb_type))
1796 av_log(s->avctx, AV_LOG_DEBUG, "i");
1797 else if (IS_INTRA16x16(mb_type))
1798 av_log(s->avctx, AV_LOG_DEBUG, "I");
1799 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1800 av_log(s->avctx, AV_LOG_DEBUG, "d");
1801 else if (IS_DIRECT(mb_type))
1802 av_log(s->avctx, AV_LOG_DEBUG, "D");
1803 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1804 av_log(s->avctx, AV_LOG_DEBUG, "g");
1805 else if (IS_GMC(mb_type))
1806 av_log(s->avctx, AV_LOG_DEBUG, "G");
1807 else if (IS_SKIP(mb_type))
1808 av_log(s->avctx, AV_LOG_DEBUG, "S");
1809 else if (!USES_LIST(mb_type, 1))
1810 av_log(s->avctx, AV_LOG_DEBUG, ">");
1811 else if (!USES_LIST(mb_type, 0))
1812 av_log(s->avctx, AV_LOG_DEBUG, "<");
1814 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1815 av_log(s->avctx, AV_LOG_DEBUG, "X");
1819 if (IS_8X8(mb_type))
1820 av_log(s->avctx, AV_LOG_DEBUG, "+");
1821 else if (IS_16X8(mb_type))
1822 av_log(s->avctx, AV_LOG_DEBUG, "-");
1823 else if (IS_8X16(mb_type))
1824 av_log(s->avctx, AV_LOG_DEBUG, "|");
1825 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1826 av_log(s->avctx, AV_LOG_DEBUG, " ");
1828 av_log(s->avctx, AV_LOG_DEBUG, "?");
1831 if (IS_INTERLACED(mb_type))
1832 av_log(s->avctx, AV_LOG_DEBUG, "=");
1834 av_log(s->avctx, AV_LOG_DEBUG, " ");
1837 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1841 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1842 (s->avctx->debug_mv)) {
1843 const int shift = 1 + s->quarter_sample;
1847 int h_chroma_shift, v_chroma_shift, block_height;
1848 const int width = s->avctx->width;
1849 const int height = s->avctx->height;
1850 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1851 const int mv_stride = (s->mb_width << mv_sample_log2) +
1852 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1853 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1855 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1856 &h_chroma_shift, &v_chroma_shift);
1857 for (i = 0; i < 3; i++) {
1858 memcpy(s->visualization_buffer[i], pict->data[i],
1859 (i == 0) ? pict->linesize[i] * height:
1860 pict->linesize[i] * height >> v_chroma_shift);
1861 pict->data[i] = s->visualization_buffer[i];
1863 pict->type = FF_BUFFER_TYPE_COPY;
1864 ptr = pict->data[0];
1865 block_height = 16 >> v_chroma_shift;
1867 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1869 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1870 const int mb_index = mb_x + mb_y * s->mb_stride;
1871 if ((s->avctx->debug_mv) && pict->motion_val) {
1873 for (type = 0; type < 3; type++) {
1877 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1878 (pict->pict_type!= AV_PICTURE_TYPE_P))
1883 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1884 (pict->pict_type!= AV_PICTURE_TYPE_B))
1889 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1890 (pict->pict_type!= AV_PICTURE_TYPE_B))
1895 if (!USES_LIST(pict->mb_type[mb_index], direction))
1898 if (IS_8X8(pict->mb_type[mb_index])) {
1900 for (i = 0; i < 4; i++) {
1901 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1902 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1903 int xy = (mb_x * 2 + (i & 1) +
1904 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1905 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1906 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1907 draw_arrow(ptr, sx, sy, mx, my, width,
1908 height, s->linesize, 100);
1910 } else if (IS_16X8(pict->mb_type[mb_index])) {
1912 for (i = 0; i < 2; i++) {
1913 int sx = mb_x * 16 + 8;
1914 int sy = mb_y * 16 + 4 + 8 * i;
1915 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1916 int mx = (pict->motion_val[direction][xy][0] >> shift);
1917 int my = (pict->motion_val[direction][xy][1] >> shift);
1919 if (IS_INTERLACED(pict->mb_type[mb_index]))
1922 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1923 height, s->linesize, 100);
1925 } else if (IS_8X16(pict->mb_type[mb_index])) {
1927 for (i = 0; i < 2; i++) {
1928 int sx = mb_x * 16 + 4 + 8 * i;
1929 int sy = mb_y * 16 + 8;
1930 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1931 int mx = pict->motion_val[direction][xy][0] >> shift;
1932 int my = pict->motion_val[direction][xy][1] >> shift;
1934 if (IS_INTERLACED(pict->mb_type[mb_index]))
1937 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1938 height, s->linesize, 100);
1941 int sx = mb_x * 16 + 8;
1942 int sy = mb_y * 16 + 8;
1943 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1944 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1945 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1946 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1950 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1951 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1952 0x0101010101010101ULL;
1954 for (y = 0; y < block_height; y++) {
1955 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1956 (block_height * mb_y + y) *
1957 pict->linesize[1]) = c;
1958 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1959 (block_height * mb_y + y) *
1960 pict->linesize[2]) = c;
1963 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1965 int mb_type = pict->mb_type[mb_index];
1968 #define COLOR(theta, r) \
1969 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1970 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1974 if (IS_PCM(mb_type)) {
1976 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1977 IS_INTRA16x16(mb_type)) {
1979 } else if (IS_INTRA4x4(mb_type)) {
1981 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1983 } else if (IS_DIRECT(mb_type)) {
1985 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1987 } else if (IS_GMC(mb_type)) {
1989 } else if (IS_SKIP(mb_type)) {
1991 } else if (!USES_LIST(mb_type, 1)) {
1993 } else if (!USES_LIST(mb_type, 0)) {
1996 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2000 u *= 0x0101010101010101ULL;
2001 v *= 0x0101010101010101ULL;
2002 for (y = 0; y < block_height; y++) {
2003 *(uint64_t *)(pict->data[1] + 8 * mb_x +
2004 (block_height * mb_y + y) * pict->linesize[1]) = u;
2005 *(uint64_t *)(pict->data[2] + 8 * mb_x +
2006 (block_height * mb_y + y) * pict->linesize[2]) = v;
2010 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2011 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2012 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2013 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2014 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2016 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2017 for (y = 0; y < 16; y++)
2018 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2019 pict->linesize[0]] ^= 0x80;
2021 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2022 int dm = 1 << (mv_sample_log2 - 2);
2023 for (i = 0; i < 4; i++) {
2024 int sx = mb_x * 16 + 8 * (i & 1);
2025 int sy = mb_y * 16 + 8 * (i >> 1);
2026 int xy = (mb_x * 2 + (i & 1) +
2027 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2029 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
2030 if (mv[0] != mv[dm] ||
2031 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2032 for (y = 0; y < 8; y++)
2033 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2034 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2035 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2036 pict->linesize[0]) ^= 0x8080808080808080ULL;
2040 if (IS_INTERLACED(mb_type) &&
2041 s->codec_id == AV_CODEC_ID_H264) {
2045 s->mbskip_table[mb_index] = 0;
2052 * find the lowest MB row referenced in the MVs
2054 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2056 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2057 int my, off, i, mvs;
2059 if (s->picture_structure != PICT_FRAME || s->mcsel)
2062 switch (s->mv_type) {
2076 for (i = 0; i < mvs; i++) {
2077 my = s->mv[dir][i][1]<<qpel_shift;
2078 my_max = FFMAX(my_max, my);
2079 my_min = FFMIN(my_min, my);
2082 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2084 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2086 return s->mb_height-1;
2089 /* put block[] to dest[] */
2090 static inline void put_dct(MpegEncContext *s,
2091 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2093 s->dct_unquantize_intra(s, block, i, qscale);
2094 s->dsp.idct_put (dest, line_size, block);
2097 /* add block[] to dest[] */
2098 static inline void add_dct(MpegEncContext *s,
2099 int16_t *block, int i, uint8_t *dest, int line_size)
2101 if (s->block_last_index[i] >= 0) {
2102 s->dsp.idct_add (dest, line_size, block);
2106 static inline void add_dequant_dct(MpegEncContext *s,
2107 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2109 if (s->block_last_index[i] >= 0) {
2110 s->dct_unquantize_inter(s, block, i, qscale);
2112 s->dsp.idct_add (dest, line_size, block);
2117 * Clean dc, ac, coded_block for the current non-intra MB.
2119 void ff_clean_intra_table_entries(MpegEncContext *s)
2121 int wrap = s->b8_stride;
2122 int xy = s->block_index[0];
2125 s->dc_val[0][xy + 1 ] =
2126 s->dc_val[0][xy + wrap] =
2127 s->dc_val[0][xy + 1 + wrap] = 1024;
2129 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2130 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2131 if (s->msmpeg4_version>=3) {
2132 s->coded_block[xy ] =
2133 s->coded_block[xy + 1 ] =
2134 s->coded_block[xy + wrap] =
2135 s->coded_block[xy + 1 + wrap] = 0;
2138 wrap = s->mb_stride;
2139 xy = s->mb_x + s->mb_y * wrap;
2141 s->dc_val[2][xy] = 1024;
2143 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2144 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2146 s->mbintra_table[xy]= 0;
2149 /* generic function called after a macroblock has been parsed by the
2150 decoder or after it has been encoded by the encoder.
2152 Important variables used:
2153 s->mb_intra : true if intra macroblock
2154 s->mv_dir : motion vector direction
2155 s->mv_type : motion vector type
2156 s->mv : motion vector
2157 s->interlaced_dct : true if interlaced dct used (mpeg2)
2159 static av_always_inline
2160 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2163 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2164 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2165 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2169 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2170 /* save DCT coefficients */
2172 int16_t *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2173 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2175 for(j=0; j<64; j++){
2176 *dct++ = block[i][s->dsp.idct_permutation[j]];
2177 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2179 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2183 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2185 /* update DC predictors for P macroblocks */
2187 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2188 if(s->mbintra_table[mb_xy])
2189 ff_clean_intra_table_entries(s);
2193 s->last_dc[2] = 128 << s->intra_dc_precision;
2196 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2197 s->mbintra_table[mb_xy]=1;
2199 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2200 uint8_t *dest_y, *dest_cb, *dest_cr;
2201 int dct_linesize, dct_offset;
2202 op_pixels_func (*op_pix)[4];
2203 qpel_mc_func (*op_qpix)[16];
2204 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2205 const int uvlinesize = s->current_picture.f.linesize[1];
2206 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2207 const int block_size = 8;
2209 /* avoid copy if macroblock skipped in last frame too */
2210 /* skip only during decoding as we might trash the buffers during encoding a bit */
2212 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2214 if (s->mb_skipped) {
2216 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2218 } else if(!s->current_picture.f.reference) {
2221 *mbskip_ptr = 0; /* not skipped */
2225 dct_linesize = linesize << s->interlaced_dct;
2226 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2230 dest_cb= s->dest[1];
2231 dest_cr= s->dest[2];
2233 dest_y = s->b_scratchpad;
2234 dest_cb= s->b_scratchpad+16*linesize;
2235 dest_cr= s->b_scratchpad+32*linesize;
2239 /* motion handling */
2240 /* decoding or more than one mb_type (MC was already done otherwise) */
2243 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2244 if (s->mv_dir & MV_DIR_FORWARD) {
2245 ff_thread_await_progress(&s->last_picture_ptr->f,
2246 ff_MPV_lowest_referenced_row(s, 0),
2249 if (s->mv_dir & MV_DIR_BACKWARD) {
2250 ff_thread_await_progress(&s->next_picture_ptr->f,
2251 ff_MPV_lowest_referenced_row(s, 1),
2256 op_qpix= s->me.qpel_put;
2257 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2258 op_pix = s->dsp.put_pixels_tab;
2260 op_pix = s->dsp.put_no_rnd_pixels_tab;
2262 if (s->mv_dir & MV_DIR_FORWARD) {
2263 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2264 op_pix = s->dsp.avg_pixels_tab;
2265 op_qpix= s->me.qpel_avg;
2267 if (s->mv_dir & MV_DIR_BACKWARD) {
2268 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2272 /* skip dequant / idct if we are really late ;) */
2273 if(s->avctx->skip_idct){
2274 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2275 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2276 || s->avctx->skip_idct >= AVDISCARD_ALL)
2280 /* add dct residue */
2281 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2282 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2283 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2284 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2285 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2286 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2288 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2289 if (s->chroma_y_shift){
2290 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2291 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2295 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2296 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2297 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2298 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2301 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2302 add_dct(s, block[0], 0, dest_y , dct_linesize);
2303 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2304 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2305 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2307 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2308 if(s->chroma_y_shift){//Chroma420
2309 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2310 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2313 dct_linesize = uvlinesize << s->interlaced_dct;
2314 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2316 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2317 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2318 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2319 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2320 if(!s->chroma_x_shift){//Chroma444
2321 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2322 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2323 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2324 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2329 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2330 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2333 /* dct only in intra block */
2334 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2335 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2336 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2337 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2338 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2340 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2341 if(s->chroma_y_shift){
2342 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2343 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2347 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2348 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2349 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2350 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2354 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2355 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2356 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2357 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2359 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2360 if(s->chroma_y_shift){
2361 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2362 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2365 dct_linesize = uvlinesize << s->interlaced_dct;
2366 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2368 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2369 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2370 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2371 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2372 if(!s->chroma_x_shift){//Chroma444
2373 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2374 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2375 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2376 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2384 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2385 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2386 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2391 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2393 if(s->out_format == FMT_MPEG1) {
2394 MPV_decode_mb_internal(s, block, 1);
2397 MPV_decode_mb_internal(s, block, 0);
2401 * @param h is the normal height, this will be reduced automatically if needed for the last row
2403 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2404 const int field_pic= s->picture_structure != PICT_FRAME;
2410 if (!s->avctx->hwaccel
2411 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2412 && s->unrestricted_mv
2413 && s->current_picture.f.reference
2415 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2416 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
2417 int sides = 0, edge_h;
2418 int hshift = desc->log2_chroma_w;
2419 int vshift = desc->log2_chroma_h;
2420 if (y==0) sides |= EDGE_TOP;
2421 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2423 edge_h= FFMIN(h, s->v_edge_pos - y);
2425 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2426 s->linesize, s->h_edge_pos, edge_h,
2427 EDGE_WIDTH, EDGE_WIDTH, sides);
2428 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2429 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2430 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2431 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2432 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2433 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2436 h= FFMIN(h, s->avctx->height - y);
2438 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2440 if (s->avctx->draw_horiz_band) {
2442 int offset[AV_NUM_DATA_POINTERS];
2445 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2446 src = &s->current_picture_ptr->f;
2447 else if(s->last_picture_ptr)
2448 src = &s->last_picture_ptr->f;
2452 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2453 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2456 offset[0]= y * s->linesize;
2458 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2459 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2465 s->avctx->draw_horiz_band(s->avctx, src, offset,
2466 y, s->picture_structure, h);
2470 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2471 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2472 const int uvlinesize = s->current_picture.f.linesize[1];
2473 const int mb_size= 4;
2475 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2476 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2477 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2478 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2479 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2480 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2481 //block_index is not used by mpeg2, so it is not affected by chroma_format
2483 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2484 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2485 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2487 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2489 if(s->picture_structure==PICT_FRAME){
2490 s->dest[0] += s->mb_y * linesize << mb_size;
2491 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2492 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2494 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2495 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2496 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2497 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2503 * Permute an 8x8 block.
2504 * @param block the block which will be permuted according to the given permutation vector
2505 * @param permutation the permutation vector
2506 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2507 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2508 * (inverse) permutated to scantable order!
2510 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2516 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2518 for(i=0; i<=last; i++){
2519 const int j= scantable[i];
2524 for(i=0; i<=last; i++){
2525 const int j= scantable[i];
2526 const int perm_j= permutation[j];
2527 block[perm_j]= temp[j];
2531 void ff_mpeg_flush(AVCodecContext *avctx){
2533 MpegEncContext *s = avctx->priv_data;
2535 if(s==NULL || s->picture==NULL)
2538 for(i=0; i<s->picture_count; i++){
2539 if (s->picture[i].f.data[0] &&
2540 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2541 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2542 free_frame_buffer(s, &s->picture[i]);
2544 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2546 s->mb_x= s->mb_y= 0;
2548 s->parse_context.state= -1;
2549 s->parse_context.frame_start_found= 0;
2550 s->parse_context.overread= 0;
2551 s->parse_context.overread_index= 0;
2552 s->parse_context.index= 0;
2553 s->parse_context.last_index= 0;
2554 s->bitstream_buffer_size=0;
2558 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2559 int16_t *block, int n, int qscale)
2561 int i, level, nCoeffs;
2562 const uint16_t *quant_matrix;
2564 nCoeffs= s->block_last_index[n];
2567 block[0] = block[0] * s->y_dc_scale;
2569 block[0] = block[0] * s->c_dc_scale;
2570 /* XXX: only mpeg1 */
2571 quant_matrix = s->intra_matrix;
2572 for(i=1;i<=nCoeffs;i++) {
2573 int j= s->intra_scantable.permutated[i];
2578 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2579 level = (level - 1) | 1;
2582 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2583 level = (level - 1) | 1;
2590 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2591 int16_t *block, int n, int qscale)
2593 int i, level, nCoeffs;
2594 const uint16_t *quant_matrix;
2596 nCoeffs= s->block_last_index[n];
2598 quant_matrix = s->inter_matrix;
2599 for(i=0; i<=nCoeffs; i++) {
2600 int j= s->intra_scantable.permutated[i];
2605 level = (((level << 1) + 1) * qscale *
2606 ((int) (quant_matrix[j]))) >> 4;
2607 level = (level - 1) | 1;
2610 level = (((level << 1) + 1) * qscale *
2611 ((int) (quant_matrix[j]))) >> 4;
2612 level = (level - 1) | 1;
2619 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2620 int16_t *block, int n, int qscale)
2622 int i, level, nCoeffs;
2623 const uint16_t *quant_matrix;
2625 if(s->alternate_scan) nCoeffs= 63;
2626 else nCoeffs= s->block_last_index[n];
2629 block[0] = block[0] * s->y_dc_scale;
2631 block[0] = block[0] * s->c_dc_scale;
2632 quant_matrix = s->intra_matrix;
2633 for(i=1;i<=nCoeffs;i++) {
2634 int j= s->intra_scantable.permutated[i];
2639 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2642 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2649 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2650 int16_t *block, int n, int qscale)
2652 int i, level, nCoeffs;
2653 const uint16_t *quant_matrix;
2656 if(s->alternate_scan) nCoeffs= 63;
2657 else nCoeffs= s->block_last_index[n];
2660 block[0] = block[0] * s->y_dc_scale;
2662 block[0] = block[0] * s->c_dc_scale;
2663 quant_matrix = s->intra_matrix;
2664 for(i=1;i<=nCoeffs;i++) {
2665 int j= s->intra_scantable.permutated[i];
2670 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2673 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2682 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2683 int16_t *block, int n, int qscale)
2685 int i, level, nCoeffs;
2686 const uint16_t *quant_matrix;
2689 if(s->alternate_scan) nCoeffs= 63;
2690 else nCoeffs= s->block_last_index[n];
2692 quant_matrix = s->inter_matrix;
2693 for(i=0; i<=nCoeffs; i++) {
2694 int j= s->intra_scantable.permutated[i];
2699 level = (((level << 1) + 1) * qscale *
2700 ((int) (quant_matrix[j]))) >> 4;
2703 level = (((level << 1) + 1) * qscale *
2704 ((int) (quant_matrix[j]))) >> 4;
2713 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2714 int16_t *block, int n, int qscale)
2716 int i, level, qmul, qadd;
2719 assert(s->block_last_index[n]>=0);
2725 block[0] = block[0] * s->y_dc_scale;
2727 block[0] = block[0] * s->c_dc_scale;
2728 qadd = (qscale - 1) | 1;
2735 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2737 for(i=1; i<=nCoeffs; i++) {
2741 level = level * qmul - qadd;
2743 level = level * qmul + qadd;
2750 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2751 int16_t *block, int n, int qscale)
2753 int i, level, qmul, qadd;
2756 assert(s->block_last_index[n]>=0);
2758 qadd = (qscale - 1) | 1;
2761 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2763 for(i=0; i<=nCoeffs; i++) {
2767 level = level * qmul - qadd;
2769 level = level * qmul + qadd;
2777 * set qscale and update qscale dependent variables.
2779 void ff_set_qscale(MpegEncContext * s, int qscale)
2783 else if (qscale > 31)
2787 s->chroma_qscale= s->chroma_qscale_table[qscale];
2789 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2790 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2793 void ff_MPV_report_decode_progress(MpegEncContext *s)
2795 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2796 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);