2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/imgutils.h"
33 #include "h264chroma.h"
36 #include "mpegvideo.h"
39 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 int16_t *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 int16_t *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 int16_t *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 int16_t *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 int16_t *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 int16_t *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 int16_t *block, int n, int qscale);
65 static const uint8_t ff_default_chroma_qscale_table[32] = {
66 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
67 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
68 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
71 const uint8_t ff_mpeg1_dc_scale_table[128] = {
72 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
73 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 static const uint8_t mpeg2_dc_scale_table1[128] = {
84 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 static const uint8_t mpeg2_dc_scale_table2[128] = {
96 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
97 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 static const uint8_t mpeg2_dc_scale_table3[128] = {
108 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
120 ff_mpeg1_dc_scale_table,
121 mpeg2_dc_scale_table1,
122 mpeg2_dc_scale_table2,
123 mpeg2_dc_scale_table3,
126 const enum AVPixelFormat ff_pixfmt_list_420[] = {
131 const enum AVPixelFormat ff_hwaccel_pixfmt_list_420[] = {
133 AV_PIX_FMT_DXVA2_VLD,
136 AV_PIX_FMT_VAAPI_VLD,
148 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *av_restrict p,
150 uint32_t *av_restrict state)
158 for (i = 0; i < 3; i++) {
159 uint32_t tmp = *state << 8;
160 *state = tmp + *(p++);
161 if (tmp == 0x100 || p == end)
166 if (p[-1] > 1 ) p += 3;
167 else if (p[-2] ) p += 2;
168 else if (p[-3]|(p[-1]-1)) p++;
175 p = FFMIN(p, end) - 4;
181 /* init common dct for both encoder and decoder */
182 av_cold int ff_dct_common_init(MpegEncContext *s)
184 ff_dsputil_init(&s->dsp, s->avctx);
185 ff_h264chroma_init(&s->h264chroma, 8); //for lowres
186 ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
188 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
189 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
190 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
191 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
192 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
193 if (s->flags & CODEC_FLAG_BITEXACT)
194 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
195 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
198 ff_MPV_common_init_x86(s);
200 ff_MPV_common_init_axp(s);
202 ff_MPV_common_init_arm(s);
204 ff_MPV_common_init_altivec(s);
206 ff_MPV_common_init_bfin(s);
209 /* load & permutate scantables
210 * note: only wmv uses different ones
212 if (s->alternate_scan) {
213 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
216 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
217 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
219 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
220 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
225 void ff_copy_picture(Picture *dst, Picture *src)
228 dst->f.type = FF_BUFFER_TYPE_COPY;
232 * Release a frame buffer
234 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
236 pic->period_since_free = 0;
237 /* WM Image / Screen codecs allocate internal buffers with different
238 * dimensions / colorspaces; ignore user-defined callbacks for these. */
239 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
240 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
241 s->codec_id != AV_CODEC_ID_MSS2)
242 ff_thread_release_buffer(s->avctx, &pic->f);
244 avcodec_default_release_buffer(s->avctx, &pic->f);
245 av_freep(&pic->f.hwaccel_picture_private);
248 int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
250 int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
252 // edge emu needs blocksize + filter length - 1
253 // (= 17x17 for halfpel / 21x21 for h264)
254 // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
255 // at uvlinesize. It supports only YUV420 so 24x24 is enough
256 // linesize * interlaced * MBsize
257 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
260 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
262 s->me.temp = s->me.scratchpad;
263 s->rd_scratchpad = s->me.scratchpad;
264 s->b_scratchpad = s->me.scratchpad;
265 s->obmc_scratchpad = s->me.scratchpad + 16;
269 av_freep(&s->edge_emu_buffer);
270 return AVERROR(ENOMEM);
274 * Allocate a frame buffer
276 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
280 if (s->avctx->hwaccel) {
281 assert(!pic->f.hwaccel_picture_private);
282 if (s->avctx->hwaccel->priv_data_size) {
283 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
284 if (!pic->f.hwaccel_picture_private) {
285 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
291 if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
292 s->codec_id != AV_CODEC_ID_VC1IMAGE &&
293 s->codec_id != AV_CODEC_ID_MSS2)
294 r = ff_thread_get_buffer(s->avctx, &pic->f);
296 r = avcodec_default_get_buffer(s->avctx, &pic->f);
298 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
299 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
300 r, pic->f.type, pic->f.data[0]);
301 av_freep(&pic->f.hwaccel_picture_private);
305 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
306 s->uvlinesize != pic->f.linesize[1])) {
307 av_log(s->avctx, AV_LOG_ERROR,
308 "get_buffer() failed (stride changed)\n");
309 free_frame_buffer(s, pic);
313 if (pic->f.linesize[1] != pic->f.linesize[2]) {
314 av_log(s->avctx, AV_LOG_ERROR,
315 "get_buffer() failed (uv stride mismatch)\n");
316 free_frame_buffer(s, pic);
320 if (!s->edge_emu_buffer &&
321 (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
322 av_log(s->avctx, AV_LOG_ERROR,
323 "get_buffer() failed to allocate context scratch buffers.\n");
324 free_frame_buffer(s, pic);
332 * Allocate a Picture.
333 * The pixels are allocated/set by calling get_buffer() if shared = 0
335 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
337 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
339 // the + 1 is needed so memset(,,stride*height) does not sig11
341 const int mb_array_size = s->mb_stride * s->mb_height;
342 const int b8_array_size = s->b8_stride * s->mb_height * 2;
343 const int b4_array_size = s->b4_stride * s->mb_height * 4;
348 assert(pic->f.data[0]);
349 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
350 pic->f.type = FF_BUFFER_TYPE_SHARED;
352 assert(!pic->f.data[0]);
354 if (alloc_frame_buffer(s, pic) < 0)
357 s->linesize = pic->f.linesize[0];
358 s->uvlinesize = pic->f.linesize[1];
361 if (pic->f.qscale_table == NULL) {
363 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
364 mb_array_size * sizeof(int16_t), fail)
365 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
366 mb_array_size * sizeof(int16_t), fail)
367 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
368 mb_array_size * sizeof(int8_t ), fail)
371 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
372 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
373 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
374 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
376 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
377 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
379 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
380 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
381 if (s->out_format == FMT_H264) {
382 for (i = 0; i < 2; i++) {
383 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
384 2 * (b4_array_size + 4) * sizeof(int16_t),
386 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
387 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
388 4 * mb_array_size * sizeof(uint8_t), fail)
390 pic->f.motion_subsample_log2 = 2;
391 } else if (s->out_format == FMT_H263 || s->encoding ||
392 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
393 for (i = 0; i < 2; i++) {
394 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
395 2 * (b8_array_size + 4) * sizeof(int16_t),
397 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
398 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
399 4 * mb_array_size * sizeof(uint8_t), fail)
401 pic->f.motion_subsample_log2 = 3;
403 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
404 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
405 64 * mb_array_size * sizeof(int16_t) * 6, fail)
407 pic->f.qstride = s->mb_stride;
408 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
409 1 * sizeof(AVPanScan), fail)
415 fail: // for the FF_ALLOCZ_OR_GOTO macro
417 free_frame_buffer(s, pic);
422 * Deallocate a picture.
424 static void free_picture(MpegEncContext *s, Picture *pic)
428 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
429 free_frame_buffer(s, pic);
432 av_freep(&pic->mb_var);
433 av_freep(&pic->mc_mb_var);
434 av_freep(&pic->mb_mean);
435 av_freep(&pic->f.mbskip_table);
436 av_freep(&pic->qscale_table_base);
437 pic->f.qscale_table = NULL;
438 av_freep(&pic->mb_type_base);
439 pic->f.mb_type = NULL;
440 av_freep(&pic->f.dct_coeff);
441 av_freep(&pic->f.pan_scan);
442 pic->f.mb_type = NULL;
443 for (i = 0; i < 2; i++) {
444 av_freep(&pic->motion_val_base[i]);
445 av_freep(&pic->f.ref_index[i]);
446 pic->f.motion_val[i] = NULL;
449 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
450 for (i = 0; i < 4; i++) {
452 pic->f.data[i] = NULL;
458 static int init_duplicate_context(MpegEncContext *s)
460 int y_size = s->b8_stride * (2 * s->mb_height + 1);
461 int c_size = s->mb_stride * (s->mb_height + 1);
462 int yc_size = y_size + 2 * c_size;
470 s->obmc_scratchpad = NULL;
473 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
474 ME_MAP_SIZE * sizeof(uint32_t), fail)
475 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
476 ME_MAP_SIZE * sizeof(uint32_t), fail)
477 if (s->avctx->noise_reduction) {
478 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
479 2 * 64 * sizeof(int), fail)
482 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
483 s->block = s->blocks[0];
485 for (i = 0; i < 12; i++) {
486 s->pblocks[i] = &s->block[i];
489 if (s->out_format == FMT_H263) {
491 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
492 yc_size * sizeof(int16_t) * 16, fail);
493 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
494 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
495 s->ac_val[2] = s->ac_val[1] + c_size;
500 return -1; // free() through ff_MPV_common_end()
503 static void free_duplicate_context(MpegEncContext *s)
508 av_freep(&s->edge_emu_buffer);
509 av_freep(&s->me.scratchpad);
513 s->obmc_scratchpad = NULL;
515 av_freep(&s->dct_error_sum);
516 av_freep(&s->me.map);
517 av_freep(&s->me.score_map);
518 av_freep(&s->blocks);
519 av_freep(&s->ac_val_base);
523 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
525 #define COPY(a) bak->a = src->a
526 COPY(edge_emu_buffer);
531 COPY(obmc_scratchpad);
538 COPY(me.map_generation);
550 int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
554 // FIXME copy only needed parts
556 backup_duplicate_context(&bak, dst);
557 memcpy(dst, src, sizeof(MpegEncContext));
558 backup_duplicate_context(dst, &bak);
559 for (i = 0; i < 12; i++) {
560 dst->pblocks[i] = &dst->block[i];
562 if (!dst->edge_emu_buffer &&
563 (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
564 av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
565 "scratch buffers.\n");
568 // STOP_TIMER("update_duplicate_context")
569 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
573 int ff_mpeg_update_thread_context(AVCodecContext *dst,
574 const AVCodecContext *src)
578 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
585 // FIXME can parameters change on I-frames?
586 // in that case dst may need a reinit
587 if (!s->context_initialized) {
588 memcpy(s, s1, sizeof(MpegEncContext));
591 s->bitstream_buffer = NULL;
592 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
594 if (s1->context_initialized){
595 s->picture_range_start += MAX_PICTURE_COUNT;
596 s->picture_range_end += MAX_PICTURE_COUNT;
597 if((err = ff_MPV_common_init(s)) < 0){
598 memset(s, 0, sizeof(MpegEncContext));
605 if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
606 s->context_reinit = 0;
607 s->height = s1->height;
608 s->width = s1->width;
609 if ((err = ff_MPV_common_frame_size_change(s)) < 0)
613 s->avctx->coded_height = s1->avctx->coded_height;
614 s->avctx->coded_width = s1->avctx->coded_width;
615 s->avctx->width = s1->avctx->width;
616 s->avctx->height = s1->avctx->height;
618 s->coded_picture_number = s1->coded_picture_number;
619 s->picture_number = s1->picture_number;
620 s->input_picture_number = s1->input_picture_number;
622 av_assert0(!s->picture || s->picture != s1->picture);
623 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
624 memcpy(&s->last_picture, &s1->last_picture,
625 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
627 // reset s->picture[].f.extended_data to s->picture[].f.data
628 for (i = 0; i < s->picture_count; i++) {
629 s->picture[i].f.extended_data = s->picture[i].f.data;
630 s->picture[i].period_since_free ++;
633 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
634 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
635 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
637 // Error/bug resilience
638 s->next_p_frame_damaged = s1->next_p_frame_damaged;
639 s->workaround_bugs = s1->workaround_bugs;
640 s->padding_bug_score = s1->padding_bug_score;
643 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
644 (char *) &s1->shape - (char *) &s1->time_increment_bits);
647 s->max_b_frames = s1->max_b_frames;
648 s->low_delay = s1->low_delay;
649 s->droppable = s1->droppable;
651 // DivX handling (doesn't work)
652 s->divx_packed = s1->divx_packed;
654 if (s1->bitstream_buffer) {
655 if (s1->bitstream_buffer_size +
656 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
657 av_fast_malloc(&s->bitstream_buffer,
658 &s->allocated_bitstream_buffer_size,
659 s1->allocated_bitstream_buffer_size);
660 s->bitstream_buffer_size = s1->bitstream_buffer_size;
661 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
662 s1->bitstream_buffer_size);
663 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
664 FF_INPUT_BUFFER_PADDING_SIZE);
667 // linesize dependend scratch buffer allocation
668 if (!s->edge_emu_buffer)
670 if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
671 av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
672 "scratch buffers.\n");
673 return AVERROR(ENOMEM);
676 av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
677 "be allocated due to unknown size.\n");
680 // MPEG2/interlacing info
681 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
682 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
684 if (!s1->first_field) {
685 s->last_pict_type = s1->pict_type;
686 if (s1->current_picture_ptr)
687 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
689 if (s1->pict_type != AV_PICTURE_TYPE_B) {
690 s->last_non_b_pict_type = s1->pict_type;
698 * Set the given MpegEncContext to common defaults
699 * (same for encoding and decoding).
700 * The changed fields will not depend upon the
701 * prior state of the MpegEncContext.
703 void ff_MPV_common_defaults(MpegEncContext *s)
705 s->y_dc_scale_table =
706 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
707 s->chroma_qscale_table = ff_default_chroma_qscale_table;
708 s->progressive_frame = 1;
709 s->progressive_sequence = 1;
710 s->picture_structure = PICT_FRAME;
712 s->coded_picture_number = 0;
713 s->picture_number = 0;
714 s->input_picture_number = 0;
716 s->picture_in_gop_number = 0;
721 s->picture_range_start = 0;
722 s->picture_range_end = MAX_PICTURE_COUNT;
724 s->slice_context_count = 1;
728 * Set the given MpegEncContext to defaults for decoding.
729 * the changed fields will not depend upon
730 * the prior state of the MpegEncContext.
732 void ff_MPV_decode_defaults(MpegEncContext *s)
734 ff_MPV_common_defaults(s);
738 * Initialize and allocates MpegEncContext fields dependent on the resolution.
740 static int init_context_frame(MpegEncContext *s)
742 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
744 s->mb_width = (s->width + 15) / 16;
745 s->mb_stride = s->mb_width + 1;
746 s->b8_stride = s->mb_width * 2 + 1;
747 s->b4_stride = s->mb_width * 4 + 1;
748 mb_array_size = s->mb_height * s->mb_stride;
749 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
751 /* set default edge pos, will be overriden
752 * in decode_header if needed */
753 s->h_edge_pos = s->mb_width * 16;
754 s->v_edge_pos = s->mb_height * 16;
756 s->mb_num = s->mb_width * s->mb_height;
761 s->block_wrap[3] = s->b8_stride;
763 s->block_wrap[5] = s->mb_stride;
765 y_size = s->b8_stride * (2 * s->mb_height + 1);
766 c_size = s->mb_stride * (s->mb_height + 1);
767 yc_size = y_size + 2 * c_size;
769 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
770 for (y = 0; y < s->mb_height; y++)
771 for (x = 0; x < s->mb_width; x++)
772 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
774 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
777 /* Allocate MV tables */
778 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
779 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
780 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
781 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
782 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
783 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
784 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
785 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
786 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
787 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
788 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
789 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
791 /* Allocate MB type table */
792 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
794 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
796 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
797 mb_array_size * sizeof(float), fail);
798 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
799 mb_array_size * sizeof(float), fail);
803 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
804 mb_array_size * sizeof(uint8_t), fail);
805 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
806 mb_array_size * sizeof(uint8_t), fail);
808 if (s->codec_id == AV_CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)) {
809 /* interlaced direct mode decoding tables */
810 for (i = 0; i < 2; i++) {
812 for (j = 0; j < 2; j++) {
813 for (k = 0; k < 2; k++) {
814 FF_ALLOCZ_OR_GOTO(s->avctx,
815 s->b_field_mv_table_base[i][j][k],
816 mv_table_size * 2 * sizeof(int16_t),
818 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
821 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
822 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
823 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
825 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
828 if (s->out_format == FMT_H263) {
830 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
831 s->coded_block = s->coded_block_base + s->b8_stride + 1;
833 /* cbp, ac_pred, pred_dir */
834 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
835 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
838 if (s->h263_pred || s->h263_plus || !s->encoding) {
840 // MN: we need these for error resilience of intra-frames
841 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
842 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
843 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
844 s->dc_val[2] = s->dc_val[1] + c_size;
845 for (i = 0; i < yc_size; i++)
846 s->dc_val_base[i] = 1024;
849 /* which mb is a intra block */
850 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
851 memset(s->mbintra_table, 1, mb_array_size);
853 /* init macroblock skip table */
854 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
855 // Note the + 1 is for a quicker mpeg4 slice_end detection
859 return AVERROR(ENOMEM);
863 * init common structure for both encoder and decoder.
864 * this assumes that some variables like width/height are already set
866 av_cold int ff_MPV_common_init(MpegEncContext *s)
869 int nb_slices = (HAVE_THREADS &&
870 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
871 s->avctx->thread_count : 1;
873 if (s->encoding && s->avctx->slices)
874 nb_slices = s->avctx->slices;
876 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
877 s->mb_height = (s->height + 31) / 32 * 2;
878 else if (s->codec_id != AV_CODEC_ID_H264)
879 s->mb_height = (s->height + 15) / 16;
881 if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
882 av_log(s->avctx, AV_LOG_ERROR,
883 "decoding to AV_PIX_FMT_NONE is not supported.\n");
887 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
890 max_slices = FFMIN(MAX_THREADS, s->mb_height);
892 max_slices = MAX_THREADS;
893 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
894 " reducing to %d\n", nb_slices, max_slices);
895 nb_slices = max_slices;
898 if ((s->width || s->height) &&
899 av_image_check_size(s->width, s->height, 0, s->avctx))
902 ff_dct_common_init(s);
904 s->flags = s->avctx->flags;
905 s->flags2 = s->avctx->flags2;
907 /* set chroma shifts */
908 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
910 /* convert fourcc to upper case */
911 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
912 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
914 s->avctx->coded_frame = &s->current_picture.f;
917 if (s->msmpeg4_version) {
918 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
919 2 * 2 * (MAX_LEVEL + 1) *
920 (MAX_RUN + 1) * 2 * sizeof(int), fail);
922 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
924 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
925 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
926 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
927 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
928 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
929 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
930 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
931 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture *), fail)
933 if (s->avctx->noise_reduction) {
934 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
938 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
939 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
940 s->picture_count * sizeof(Picture), fail);
941 for (i = 0; i < s->picture_count; i++) {
942 avcodec_get_frame_defaults(&s->picture[i].f);
945 if (init_context_frame(s))
948 s->parse_context.state = -1;
950 s->context_initialized = 1;
951 s->thread_context[0] = s;
953 // if (s->width && s->height) {
955 for (i = 1; i < nb_slices; i++) {
956 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
957 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
960 for (i = 0; i < nb_slices; i++) {
961 if (init_duplicate_context(s->thread_context[i]) < 0)
963 s->thread_context[i]->start_mb_y =
964 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
965 s->thread_context[i]->end_mb_y =
966 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
969 if (init_duplicate_context(s) < 0)
972 s->end_mb_y = s->mb_height;
974 s->slice_context_count = nb_slices;
979 ff_MPV_common_end(s);
984 * Frees and resets MpegEncContext fields depending on the resolution.
985 * Is used during resolution changes to avoid a full reinitialization of the
988 static int free_context_frame(MpegEncContext *s)
992 av_freep(&s->mb_type);
993 av_freep(&s->p_mv_table_base);
994 av_freep(&s->b_forw_mv_table_base);
995 av_freep(&s->b_back_mv_table_base);
996 av_freep(&s->b_bidir_forw_mv_table_base);
997 av_freep(&s->b_bidir_back_mv_table_base);
998 av_freep(&s->b_direct_mv_table_base);
999 s->p_mv_table = NULL;
1000 s->b_forw_mv_table = NULL;
1001 s->b_back_mv_table = NULL;
1002 s->b_bidir_forw_mv_table = NULL;
1003 s->b_bidir_back_mv_table = NULL;
1004 s->b_direct_mv_table = NULL;
1005 for (i = 0; i < 2; i++) {
1006 for (j = 0; j < 2; j++) {
1007 for (k = 0; k < 2; k++) {
1008 av_freep(&s->b_field_mv_table_base[i][j][k]);
1009 s->b_field_mv_table[i][j][k] = NULL;
1011 av_freep(&s->b_field_select_table[i][j]);
1012 av_freep(&s->p_field_mv_table_base[i][j]);
1013 s->p_field_mv_table[i][j] = NULL;
1015 av_freep(&s->p_field_select_table[i]);
1018 av_freep(&s->dc_val_base);
1019 av_freep(&s->coded_block_base);
1020 av_freep(&s->mbintra_table);
1021 av_freep(&s->cbp_table);
1022 av_freep(&s->pred_dir_table);
1024 av_freep(&s->mbskip_table);
1026 av_freep(&s->error_status_table);
1027 av_freep(&s->er_temp_buffer);
1028 av_freep(&s->mb_index2xy);
1029 av_freep(&s->lambda_table);
1031 av_freep(&s->cplx_tab);
1032 av_freep(&s->bits_tab);
1034 s->linesize = s->uvlinesize = 0;
1036 for (i = 0; i < 3; i++)
1037 av_freep(&s->visualization_buffer[i]);
1042 int ff_MPV_common_frame_size_change(MpegEncContext *s)
1046 if (s->slice_context_count > 1) {
1047 for (i = 0; i < s->slice_context_count; i++) {
1048 free_duplicate_context(s->thread_context[i]);
1050 for (i = 1; i < s->slice_context_count; i++) {
1051 av_freep(&s->thread_context[i]);
1054 free_duplicate_context(s);
1056 free_context_frame(s);
1059 for (i = 0; i < s->picture_count; i++) {
1060 s->picture[i].needs_realloc = 1;
1063 s->last_picture_ptr =
1064 s->next_picture_ptr =
1065 s->current_picture_ptr = NULL;
1068 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1069 s->mb_height = (s->height + 31) / 32 * 2;
1070 else if (s->codec_id != AV_CODEC_ID_H264)
1071 s->mb_height = (s->height + 15) / 16;
1073 if ((s->width || s->height) &&
1074 av_image_check_size(s->width, s->height, 0, s->avctx))
1075 return AVERROR_INVALIDDATA;
1077 if ((err = init_context_frame(s)))
1080 s->thread_context[0] = s;
1082 if (s->width && s->height) {
1083 int nb_slices = s->slice_context_count;
1084 if (nb_slices > 1) {
1085 for (i = 1; i < nb_slices; i++) {
1086 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1087 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1090 for (i = 0; i < nb_slices; i++) {
1091 if (init_duplicate_context(s->thread_context[i]) < 0)
1093 s->thread_context[i]->start_mb_y =
1094 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1095 s->thread_context[i]->end_mb_y =
1096 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1099 if (init_duplicate_context(s) < 0)
1102 s->end_mb_y = s->mb_height;
1104 s->slice_context_count = nb_slices;
1109 ff_MPV_common_end(s);
1113 /* init common structure for both encoder and decoder */
1114 void ff_MPV_common_end(MpegEncContext *s)
1118 if (s->slice_context_count > 1) {
1119 for (i = 0; i < s->slice_context_count; i++) {
1120 free_duplicate_context(s->thread_context[i]);
1122 for (i = 1; i < s->slice_context_count; i++) {
1123 av_freep(&s->thread_context[i]);
1125 s->slice_context_count = 1;
1126 } else free_duplicate_context(s);
1128 av_freep(&s->parse_context.buffer);
1129 s->parse_context.buffer_size = 0;
1131 av_freep(&s->bitstream_buffer);
1132 s->allocated_bitstream_buffer_size = 0;
1134 av_freep(&s->avctx->stats_out);
1135 av_freep(&s->ac_stats);
1137 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1138 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1139 s->q_chroma_intra_matrix= NULL;
1140 s->q_chroma_intra_matrix16= NULL;
1141 av_freep(&s->q_intra_matrix);
1142 av_freep(&s->q_inter_matrix);
1143 av_freep(&s->q_intra_matrix16);
1144 av_freep(&s->q_inter_matrix16);
1145 av_freep(&s->input_picture);
1146 av_freep(&s->reordered_input_picture);
1147 av_freep(&s->dct_offset);
1149 if (s->picture && !s->avctx->internal->is_copy) {
1150 for (i = 0; i < s->picture_count; i++) {
1151 free_picture(s, &s->picture[i]);
1154 av_freep(&s->picture);
1156 free_context_frame(s);
1158 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1159 avcodec_default_free_buffers(s->avctx);
1161 s->context_initialized = 0;
1162 s->last_picture_ptr =
1163 s->next_picture_ptr =
1164 s->current_picture_ptr = NULL;
1165 s->linesize = s->uvlinesize = 0;
1168 void ff_init_rl(RLTable *rl,
1169 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1171 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1172 uint8_t index_run[MAX_RUN + 1];
1173 int last, run, level, start, end, i;
1175 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1176 if (static_store && rl->max_level[0])
1179 /* compute max_level[], max_run[] and index_run[] */
1180 for (last = 0; last < 2; last++) {
1189 memset(max_level, 0, MAX_RUN + 1);
1190 memset(max_run, 0, MAX_LEVEL + 1);
1191 memset(index_run, rl->n, MAX_RUN + 1);
1192 for (i = start; i < end; i++) {
1193 run = rl->table_run[i];
1194 level = rl->table_level[i];
1195 if (index_run[run] == rl->n)
1197 if (level > max_level[run])
1198 max_level[run] = level;
1199 if (run > max_run[level])
1200 max_run[level] = run;
1203 rl->max_level[last] = static_store[last];
1205 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1206 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1208 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1210 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1211 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1213 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1215 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1216 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1220 void ff_init_vlc_rl(RLTable *rl)
1224 for (q = 0; q < 32; q++) {
1226 int qadd = (q - 1) | 1;
1232 for (i = 0; i < rl->vlc.table_size; i++) {
1233 int code = rl->vlc.table[i][0];
1234 int len = rl->vlc.table[i][1];
1237 if (len == 0) { // illegal code
1240 } else if (len < 0) { // more bits needed
1244 if (code == rl->n) { // esc
1248 run = rl->table_run[code] + 1;
1249 level = rl->table_level[code] * qmul + qadd;
1250 if (code >= rl->last) run += 192;
1253 rl->rl_vlc[q][i].len = len;
1254 rl->rl_vlc[q][i].level = level;
1255 rl->rl_vlc[q][i].run = run;
1260 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1264 /* release non reference frames */
1265 for (i = 0; i < s->picture_count; i++) {
1266 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1267 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1268 (remove_current || &s->picture[i] != s->current_picture_ptr)
1269 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1270 free_frame_buffer(s, &s->picture[i]);
1275 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1277 if ( (s->avctx->active_thread_type & FF_THREAD_FRAME)
1278 && pic->f.qscale_table //check if the frame has anything allocated
1279 && pic->period_since_free < s->avctx->thread_count)
1281 if (pic->f.data[0] == NULL)
1283 if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
1284 if (!pic->owner2 || pic->owner2 == s)
1289 static int find_unused_picture(MpegEncContext *s, int shared)
1294 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1295 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1299 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1300 if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1303 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1304 if (pic_is_unused(s, &s->picture[i]))
1309 av_log(s->avctx, AV_LOG_FATAL,
1310 "Internal error, picture buffer overflow\n");
1311 /* We could return -1, but the codec would crash trying to draw into a
1312 * non-existing frame anyway. This is safer than waiting for a random crash.
1313 * Also the return of this is never useful, an encoder must only allocate
1314 * as much as allowed in the specification. This has no relationship to how
1315 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1316 * enough for such valid streams).
1317 * Plus, a decoder has to check stream validity and remove frames if too
1318 * many reference frames are around. Waiting for "OOM" is not correct at
1319 * all. Similarly, missing reference frames have to be replaced by
1320 * interpolated/MC frames, anything else is a bug in the codec ...
1326 int ff_find_unused_picture(MpegEncContext *s, int shared)
1328 int ret = find_unused_picture(s, shared);
1330 if (ret >= 0 && ret < s->picture_range_end) {
1331 if (s->picture[ret].needs_realloc) {
1332 s->picture[ret].needs_realloc = 0;
1333 free_picture(s, &s->picture[ret]);
1334 avcodec_get_frame_defaults(&s->picture[ret].f);
1340 static void update_noise_reduction(MpegEncContext *s)
1344 for (intra = 0; intra < 2; intra++) {
1345 if (s->dct_count[intra] > (1 << 16)) {
1346 for (i = 0; i < 64; i++) {
1347 s->dct_error_sum[intra][i] >>= 1;
1349 s->dct_count[intra] >>= 1;
1352 for (i = 0; i < 64; i++) {
1353 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1354 s->dct_count[intra] +
1355 s->dct_error_sum[intra][i] / 2) /
1356 (s->dct_error_sum[intra][i] + 1);
1362 * generic function for encode/decode called after coding/decoding
1363 * the header and before a frame is coded/decoded.
1365 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1371 if (!ff_thread_can_start_frame(avctx)) {
1372 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1376 /* mark & release old frames */
1377 if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1378 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1379 s->last_picture_ptr != s->next_picture_ptr &&
1380 s->last_picture_ptr->f.data[0]) {
1381 if (s->last_picture_ptr->owner2 == s)
1382 free_frame_buffer(s, s->last_picture_ptr);
1385 /* release forgotten pictures */
1386 /* if (mpeg124/h263) */
1388 for (i = 0; i < s->picture_count; i++) {
1389 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1390 &s->picture[i] != s->last_picture_ptr &&
1391 &s->picture[i] != s->next_picture_ptr &&
1392 s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1393 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1394 av_log(avctx, AV_LOG_ERROR,
1395 "releasing zombie picture\n");
1396 free_frame_buffer(s, &s->picture[i]);
1403 ff_release_unused_pictures(s, 1);
1405 if (s->current_picture_ptr &&
1406 s->current_picture_ptr->f.data[0] == NULL) {
1407 // we already have a unused image
1408 // (maybe it was set before reading the header)
1409 pic = s->current_picture_ptr;
1411 i = ff_find_unused_picture(s, 0);
1413 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1416 pic = &s->picture[i];
1419 pic->f.reference = 0;
1420 if (!s->droppable) {
1421 if (s->codec_id == AV_CODEC_ID_H264)
1422 pic->f.reference = s->picture_structure;
1423 else if (s->pict_type != AV_PICTURE_TYPE_B)
1424 pic->f.reference = 3;
1427 pic->f.coded_picture_number = s->coded_picture_number++;
1429 if (ff_alloc_picture(s, pic, 0) < 0)
1432 s->current_picture_ptr = pic;
1433 // FIXME use only the vars from current_pic
1434 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1435 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1436 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1437 if (s->picture_structure != PICT_FRAME)
1438 s->current_picture_ptr->f.top_field_first =
1439 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1441 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1442 !s->progressive_sequence;
1443 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1446 s->current_picture_ptr->f.pict_type = s->pict_type;
1447 // if (s->flags && CODEC_FLAG_QSCALE)
1448 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1449 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1451 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1453 if (s->pict_type != AV_PICTURE_TYPE_B) {
1454 s->last_picture_ptr = s->next_picture_ptr;
1456 s->next_picture_ptr = s->current_picture_ptr;
1458 av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1459 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1460 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1461 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1462 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1463 s->pict_type, s->droppable);
1465 if (s->codec_id != AV_CODEC_ID_H264) {
1466 if ((s->last_picture_ptr == NULL ||
1467 s->last_picture_ptr->f.data[0] == NULL) &&
1468 (s->pict_type != AV_PICTURE_TYPE_I ||
1469 s->picture_structure != PICT_FRAME)) {
1470 int h_chroma_shift, v_chroma_shift;
1471 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1472 &h_chroma_shift, &v_chroma_shift);
1473 if (s->pict_type != AV_PICTURE_TYPE_I)
1474 av_log(avctx, AV_LOG_ERROR,
1475 "warning: first frame is no keyframe\n");
1476 else if (s->picture_structure != PICT_FRAME)
1477 av_log(avctx, AV_LOG_INFO,
1478 "allocate dummy last picture for field based first keyframe\n");
1480 /* Allocate a dummy frame */
1481 i = ff_find_unused_picture(s, 0);
1483 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1486 s->last_picture_ptr = &s->picture[i];
1487 s->last_picture_ptr->f.key_frame = 0;
1488 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1489 s->last_picture_ptr = NULL;
1493 memset(s->last_picture_ptr->f.data[0], 0x80,
1494 avctx->height * s->last_picture_ptr->f.linesize[0]);
1495 memset(s->last_picture_ptr->f.data[1], 0x80,
1496 (avctx->height >> v_chroma_shift) *
1497 s->last_picture_ptr->f.linesize[1]);
1498 memset(s->last_picture_ptr->f.data[2], 0x80,
1499 (avctx->height >> v_chroma_shift) *
1500 s->last_picture_ptr->f.linesize[2]);
1502 if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1503 for(i=0; i<avctx->height; i++)
1504 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1507 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1508 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1509 s->last_picture_ptr->f.reference = 3;
1511 if ((s->next_picture_ptr == NULL ||
1512 s->next_picture_ptr->f.data[0] == NULL) &&
1513 s->pict_type == AV_PICTURE_TYPE_B) {
1514 /* Allocate a dummy frame */
1515 i = ff_find_unused_picture(s, 0);
1517 av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1520 s->next_picture_ptr = &s->picture[i];
1521 s->next_picture_ptr->f.key_frame = 0;
1522 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1523 s->next_picture_ptr = NULL;
1526 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1527 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1528 s->next_picture_ptr->f.reference = 3;
1532 memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1533 memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1534 if (s->last_picture_ptr)
1535 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1536 if (s->next_picture_ptr)
1537 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1539 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1540 if (s->next_picture_ptr)
1541 s->next_picture_ptr->owner2 = s;
1542 if (s->last_picture_ptr)
1543 s->last_picture_ptr->owner2 = s;
1546 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1547 s->last_picture_ptr->f.data[0]));
1549 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1551 for (i = 0; i < 4; i++) {
1552 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1553 s->current_picture.f.data[i] +=
1554 s->current_picture.f.linesize[i];
1556 s->current_picture.f.linesize[i] *= 2;
1557 s->last_picture.f.linesize[i] *= 2;
1558 s->next_picture.f.linesize[i] *= 2;
1562 s->err_recognition = avctx->err_recognition;
1564 /* set dequantizer, we can't do it during init as
1565 * it might change for mpeg4 and we can't do it in the header
1566 * decode as init is not called for mpeg4 there yet */
1567 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1568 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1569 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1570 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1571 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1572 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1574 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1575 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1578 if (s->dct_error_sum) {
1579 assert(s->avctx->noise_reduction && s->encoding);
1580 update_noise_reduction(s);
1583 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1584 return ff_xvmc_field_start(s, avctx);
1589 /* generic function for encode/decode called after a
1590 * frame has been coded/decoded. */
1591 void ff_MPV_frame_end(MpegEncContext *s)
1594 /* redraw edges for the frame if decoding didn't complete */
1595 // just to make sure that all data is rendered.
1596 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1597 ff_xvmc_field_end(s);
1598 } else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1599 !s->avctx->hwaccel &&
1600 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1601 s->unrestricted_mv &&
1602 s->current_picture.f.reference &&
1604 !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1607 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1608 int hshift = desc->log2_chroma_w;
1609 int vshift = desc->log2_chroma_h;
1610 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1611 s->h_edge_pos, s->v_edge_pos,
1612 EDGE_WIDTH, EDGE_WIDTH,
1613 EDGE_TOP | EDGE_BOTTOM);
1614 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1615 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1616 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1617 EDGE_TOP | EDGE_BOTTOM);
1618 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1619 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1620 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1621 EDGE_TOP | EDGE_BOTTOM);
1626 s->last_pict_type = s->pict_type;
1627 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1628 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1629 s->last_non_b_pict_type = s->pict_type;
1632 /* copy back current_picture variables */
1633 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1634 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1635 s->picture[i] = s->current_picture;
1639 assert(i < MAX_PICTURE_COUNT);
1643 /* release non-reference frames */
1644 for (i = 0; i < s->picture_count; i++) {
1645 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1646 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1647 free_frame_buffer(s, &s->picture[i]);
1651 // clear copies, to avoid confusion
1653 memset(&s->last_picture, 0, sizeof(Picture));
1654 memset(&s->next_picture, 0, sizeof(Picture));
1655 memset(&s->current_picture, 0, sizeof(Picture));
1657 s->avctx->coded_frame = &s->current_picture_ptr->f;
1659 if (s->codec_id != AV_CODEC_ID_H264 && s->current_picture.f.reference) {
1660 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1665 * Draw a line from (ex, ey) -> (sx, sy).
1666 * @param w width of the image
1667 * @param h height of the image
1668 * @param stride stride/linesize of the image
1669 * @param color color of the arrow
1671 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1672 int w, int h, int stride, int color)
1676 sx = av_clip(sx, 0, w - 1);
1677 sy = av_clip(sy, 0, h - 1);
1678 ex = av_clip(ex, 0, w - 1);
1679 ey = av_clip(ey, 0, h - 1);
1681 buf[sy * stride + sx] += color;
1683 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1685 FFSWAP(int, sx, ex);
1686 FFSWAP(int, sy, ey);
1688 buf += sx + sy * stride;
1690 f = ((ey - sy) << 16) / ex;
1691 for (x = 0; x <= ex; x++) {
1693 fr = (x * f) & 0xFFFF;
1694 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1695 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1699 FFSWAP(int, sx, ex);
1700 FFSWAP(int, sy, ey);
1702 buf += sx + sy * stride;
1705 f = ((ex - sx) << 16) / ey;
1708 for(y= 0; y <= ey; y++){
1710 fr = (y*f) & 0xFFFF;
1711 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1712 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1718 * Draw an arrow from (ex, ey) -> (sx, sy).
1719 * @param w width of the image
1720 * @param h height of the image
1721 * @param stride stride/linesize of the image
1722 * @param color color of the arrow
1724 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1725 int ey, int w, int h, int stride, int color)
1729 sx = av_clip(sx, -100, w + 100);
1730 sy = av_clip(sy, -100, h + 100);
1731 ex = av_clip(ex, -100, w + 100);
1732 ey = av_clip(ey, -100, h + 100);
1737 if (dx * dx + dy * dy > 3 * 3) {
1740 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1742 // FIXME subpixel accuracy
1743 rx = ROUNDED_DIV(rx * 3 << 4, length);
1744 ry = ROUNDED_DIV(ry * 3 << 4, length);
1746 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1747 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1749 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1753 * Print debugging info for the given picture.
1755 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1757 if ( s->avctx->hwaccel || !pict || !pict->mb_type
1758 || (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
1762 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1765 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1766 av_get_picture_type_char(pict->pict_type));
1767 for (y = 0; y < s->mb_height; y++) {
1768 for (x = 0; x < s->mb_width; x++) {
1769 if (s->avctx->debug & FF_DEBUG_SKIP) {
1770 int count = s->mbskip_table[x + y * s->mb_stride];
1773 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1775 if (s->avctx->debug & FF_DEBUG_QP) {
1776 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1777 pict->qscale_table[x + y * s->mb_stride]);
1779 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1780 int mb_type = pict->mb_type[x + y * s->mb_stride];
1781 // Type & MV direction
1782 if (IS_PCM(mb_type))
1783 av_log(s->avctx, AV_LOG_DEBUG, "P");
1784 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1785 av_log(s->avctx, AV_LOG_DEBUG, "A");
1786 else if (IS_INTRA4x4(mb_type))
1787 av_log(s->avctx, AV_LOG_DEBUG, "i");
1788 else if (IS_INTRA16x16(mb_type))
1789 av_log(s->avctx, AV_LOG_DEBUG, "I");
1790 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1791 av_log(s->avctx, AV_LOG_DEBUG, "d");
1792 else if (IS_DIRECT(mb_type))
1793 av_log(s->avctx, AV_LOG_DEBUG, "D");
1794 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1795 av_log(s->avctx, AV_LOG_DEBUG, "g");
1796 else if (IS_GMC(mb_type))
1797 av_log(s->avctx, AV_LOG_DEBUG, "G");
1798 else if (IS_SKIP(mb_type))
1799 av_log(s->avctx, AV_LOG_DEBUG, "S");
1800 else if (!USES_LIST(mb_type, 1))
1801 av_log(s->avctx, AV_LOG_DEBUG, ">");
1802 else if (!USES_LIST(mb_type, 0))
1803 av_log(s->avctx, AV_LOG_DEBUG, "<");
1805 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1806 av_log(s->avctx, AV_LOG_DEBUG, "X");
1810 if (IS_8X8(mb_type))
1811 av_log(s->avctx, AV_LOG_DEBUG, "+");
1812 else if (IS_16X8(mb_type))
1813 av_log(s->avctx, AV_LOG_DEBUG, "-");
1814 else if (IS_8X16(mb_type))
1815 av_log(s->avctx, AV_LOG_DEBUG, "|");
1816 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1817 av_log(s->avctx, AV_LOG_DEBUG, " ");
1819 av_log(s->avctx, AV_LOG_DEBUG, "?");
1822 if (IS_INTERLACED(mb_type))
1823 av_log(s->avctx, AV_LOG_DEBUG, "=");
1825 av_log(s->avctx, AV_LOG_DEBUG, " ");
1828 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1832 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1833 (s->avctx->debug_mv)) {
1834 const int shift = 1 + s->quarter_sample;
1838 int h_chroma_shift, v_chroma_shift, block_height;
1839 const int width = s->avctx->width;
1840 const int height = s->avctx->height;
1841 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1842 const int mv_stride = (s->mb_width << mv_sample_log2) +
1843 (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1844 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1846 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1848 for (i = 0; i < 3; i++) {
1849 size_t size= (i == 0) ? pict->linesize[i] * FFALIGN(height, 16):
1850 pict->linesize[i] * FFALIGN(height, 16) >> v_chroma_shift;
1851 s->visualization_buffer[i]= av_realloc(s->visualization_buffer[i], size);
1852 memcpy(s->visualization_buffer[i], pict->data[i], size);
1853 pict->data[i] = s->visualization_buffer[i];
1855 pict->type = FF_BUFFER_TYPE_COPY;
1857 ptr = pict->data[0];
1858 block_height = 16 >> v_chroma_shift;
1860 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1862 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1863 const int mb_index = mb_x + mb_y * s->mb_stride;
1864 if ((s->avctx->debug_mv) && pict->motion_val[0]) {
1866 for (type = 0; type < 3; type++) {
1870 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1871 (pict->pict_type!= AV_PICTURE_TYPE_P))
1876 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1877 (pict->pict_type!= AV_PICTURE_TYPE_B))
1882 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1883 (pict->pict_type!= AV_PICTURE_TYPE_B))
1888 if (!USES_LIST(pict->mb_type[mb_index], direction))
1891 if (IS_8X8(pict->mb_type[mb_index])) {
1893 for (i = 0; i < 4; i++) {
1894 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1895 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1896 int xy = (mb_x * 2 + (i & 1) +
1897 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1898 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1899 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1900 draw_arrow(ptr, sx, sy, mx, my, width,
1901 height, s->linesize, 100);
1903 } else if (IS_16X8(pict->mb_type[mb_index])) {
1905 for (i = 0; i < 2; i++) {
1906 int sx = mb_x * 16 + 8;
1907 int sy = mb_y * 16 + 4 + 8 * i;
1908 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1909 int mx = (pict->motion_val[direction][xy][0] >> shift);
1910 int my = (pict->motion_val[direction][xy][1] >> shift);
1912 if (IS_INTERLACED(pict->mb_type[mb_index]))
1915 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1916 height, s->linesize, 100);
1918 } else if (IS_8X16(pict->mb_type[mb_index])) {
1920 for (i = 0; i < 2; i++) {
1921 int sx = mb_x * 16 + 4 + 8 * i;
1922 int sy = mb_y * 16 + 8;
1923 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1924 int mx = pict->motion_val[direction][xy][0] >> shift;
1925 int my = pict->motion_val[direction][xy][1] >> shift;
1927 if (IS_INTERLACED(pict->mb_type[mb_index]))
1930 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1931 height, s->linesize, 100);
1934 int sx= mb_x * 16 + 8;
1935 int sy= mb_y * 16 + 8;
1936 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
1937 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1938 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1939 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1943 if ((s->avctx->debug & FF_DEBUG_VIS_QP)) {
1944 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1945 0x0101010101010101ULL;
1947 for (y = 0; y < block_height; y++) {
1948 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1949 (block_height * mb_y + y) *
1950 pict->linesize[1]) = c;
1951 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1952 (block_height * mb_y + y) *
1953 pict->linesize[2]) = c;
1956 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1957 pict->motion_val[0]) {
1958 int mb_type = pict->mb_type[mb_index];
1961 #define COLOR(theta, r) \
1962 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1963 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1967 if (IS_PCM(mb_type)) {
1969 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1970 IS_INTRA16x16(mb_type)) {
1972 } else if (IS_INTRA4x4(mb_type)) {
1974 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1976 } else if (IS_DIRECT(mb_type)) {
1978 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1980 } else if (IS_GMC(mb_type)) {
1982 } else if (IS_SKIP(mb_type)) {
1984 } else if (!USES_LIST(mb_type, 1)) {
1986 } else if (!USES_LIST(mb_type, 0)) {
1989 av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1993 u *= 0x0101010101010101ULL;
1994 v *= 0x0101010101010101ULL;
1995 for (y = 0; y < block_height; y++) {
1996 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1997 (block_height * mb_y + y) * pict->linesize[1]) = u;
1998 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1999 (block_height * mb_y + y) * pict->linesize[2]) = v;
2003 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2004 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2005 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2006 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2007 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2009 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2010 for (y = 0; y < 16; y++)
2011 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2012 pict->linesize[0]] ^= 0x80;
2014 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2015 int dm = 1 << (mv_sample_log2 - 2);
2016 for (i = 0; i < 4; i++) {
2017 int sx = mb_x * 16 + 8 * (i & 1);
2018 int sy = mb_y * 16 + 8 * (i >> 1);
2019 int xy = (mb_x * 2 + (i & 1) +
2020 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2022 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
2023 if (mv[0] != mv[dm] ||
2024 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2025 for (y = 0; y < 8; y++)
2026 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2027 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2028 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2029 pict->linesize[0]) ^= 0x8080808080808080ULL;
2033 if (IS_INTERLACED(mb_type) &&
2034 s->codec_id == AV_CODEC_ID_H264) {
2038 s->mbskip_table[mb_index] = 0;
2044 static inline int hpel_motion_lowres(MpegEncContext *s,
2045 uint8_t *dest, uint8_t *src,
2046 int field_based, int field_select,
2047 int src_x, int src_y,
2048 int width, int height, int stride,
2049 int h_edge_pos, int v_edge_pos,
2050 int w, int h, h264_chroma_mc_func *pix_op,
2051 int motion_x, int motion_y)
2053 const int lowres = s->avctx->lowres;
2054 const int op_index = FFMIN(lowres, 2);
2055 const int s_mask = (2 << lowres) - 1;
2059 if (s->quarter_sample) {
2064 sx = motion_x & s_mask;
2065 sy = motion_y & s_mask;
2066 src_x += motion_x >> lowres + 1;
2067 src_y += motion_y >> lowres + 1;
2069 src += src_y * stride + src_x;
2071 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2072 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2073 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2074 (h + 1) << field_based, src_x,
2075 src_y << field_based,
2078 src = s->edge_emu_buffer;
2082 sx = (sx << 2) >> lowres;
2083 sy = (sy << 2) >> lowres;
2086 pix_op[op_index](dest, src, stride, h, sx, sy);
2090 /* apply one mpeg motion vector to the three components */
2091 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
2098 uint8_t **ref_picture,
2099 h264_chroma_mc_func *pix_op,
2100 int motion_x, int motion_y,
2103 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2104 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2106 const int lowres = s->avctx->lowres;
2107 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
2108 const int block_s = 8>>lowres;
2109 const int s_mask = (2 << lowres) - 1;
2110 const int h_edge_pos = s->h_edge_pos >> lowres;
2111 const int v_edge_pos = s->v_edge_pos >> lowres;
2112 linesize = s->current_picture.f.linesize[0] << field_based;
2113 uvlinesize = s->current_picture.f.linesize[1] << field_based;
2115 // FIXME obviously not perfect but qpel will not work in lowres anyway
2116 if (s->quarter_sample) {
2122 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2125 sx = motion_x & s_mask;
2126 sy = motion_y & s_mask;
2127 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2128 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2130 if (s->out_format == FMT_H263) {
2131 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2132 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2133 uvsrc_x = src_x >> 1;
2134 uvsrc_y = src_y >> 1;
2135 } else if (s->out_format == FMT_H261) {
2136 // even chroma mv's are full pel in H261
2139 uvsx = (2 * mx) & s_mask;
2140 uvsy = (2 * my) & s_mask;
2141 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2142 uvsrc_y = mb_y * block_s + (my >> lowres);
2144 if(s->chroma_y_shift){
2149 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2150 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2152 if(s->chroma_x_shift){
2156 uvsy = motion_y & s_mask;
2158 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2161 uvsx = motion_x & s_mask;
2162 uvsy = motion_y & s_mask;
2169 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2170 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2171 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2173 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
2174 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2175 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2176 linesize >> field_based, 17, 17 + field_based,
2177 src_x, src_y << field_based, h_edge_pos,
2179 ptr_y = s->edge_emu_buffer;
2180 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2181 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2182 s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2184 uvsrc_x, uvsrc_y << field_based,
2185 h_edge_pos >> 1, v_edge_pos >> 1);
2186 s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2188 uvsrc_x, uvsrc_y << field_based,
2189 h_edge_pos >> 1, v_edge_pos >> 1);
2191 ptr_cr = uvbuf + 16;
2195 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2197 dest_y += s->linesize;
2198 dest_cb += s->uvlinesize;
2199 dest_cr += s->uvlinesize;
2203 ptr_y += s->linesize;
2204 ptr_cb += s->uvlinesize;
2205 ptr_cr += s->uvlinesize;
2208 sx = (sx << 2) >> lowres;
2209 sy = (sy << 2) >> lowres;
2210 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2212 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2213 uvsx = (uvsx << 2) >> lowres;
2214 uvsy = (uvsy << 2) >> lowres;
2215 if (h >> s->chroma_y_shift) {
2216 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
2217 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
2220 // FIXME h261 lowres loop filter
2223 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
2224 uint8_t *dest_cb, uint8_t *dest_cr,
2225 uint8_t **ref_picture,
2226 h264_chroma_mc_func * pix_op,
2229 const int lowres = s->avctx->lowres;
2230 const int op_index = FFMIN(lowres, 2);
2231 const int block_s = 8 >> lowres;
2232 const int s_mask = (2 << lowres) - 1;
2233 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2234 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2235 int emu = 0, src_x, src_y, offset, sx, sy;
2238 if (s->quarter_sample) {
2243 /* In case of 8X8, we construct a single chroma motion vector
2244 with a special rounding */
2245 mx = ff_h263_round_chroma(mx);
2246 my = ff_h263_round_chroma(my);
2250 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2251 src_y = s->mb_y * block_s + (my >> lowres + 1);
2253 offset = src_y * s->uvlinesize + src_x;
2254 ptr = ref_picture[1] + offset;
2255 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2256 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2257 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2258 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2259 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2260 ptr = s->edge_emu_buffer;
2264 sx = (sx << 2) >> lowres;
2265 sy = (sy << 2) >> lowres;
2266 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2268 ptr = ref_picture[2] + offset;
2270 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2271 src_x, src_y, h_edge_pos, v_edge_pos);
2272 ptr = s->edge_emu_buffer;
2274 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2278 * motion compensation of a single macroblock
2280 * @param dest_y luma destination pointer
2281 * @param dest_cb chroma cb/u destination pointer
2282 * @param dest_cr chroma cr/v destination pointer
2283 * @param dir direction (0->forward, 1->backward)
2284 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2285 * @param pix_op halfpel motion compensation function (average or put normally)
2286 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2288 static inline void MPV_motion_lowres(MpegEncContext *s,
2289 uint8_t *dest_y, uint8_t *dest_cb,
2291 int dir, uint8_t **ref_picture,
2292 h264_chroma_mc_func *pix_op)
2296 const int lowres = s->avctx->lowres;
2297 const int block_s = 8 >>lowres;
2302 switch (s->mv_type) {
2304 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2306 ref_picture, pix_op,
2307 s->mv[dir][0][0], s->mv[dir][0][1],
2313 for (i = 0; i < 4; i++) {
2314 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2315 s->linesize) * block_s,
2316 ref_picture[0], 0, 0,
2317 (2 * mb_x + (i & 1)) * block_s,
2318 (2 * mb_y + (i >> 1)) * block_s,
2319 s->width, s->height, s->linesize,
2320 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2321 block_s, block_s, pix_op,
2322 s->mv[dir][i][0], s->mv[dir][i][1]);
2324 mx += s->mv[dir][i][0];
2325 my += s->mv[dir][i][1];
2328 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2329 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2333 if (s->picture_structure == PICT_FRAME) {
2335 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2336 1, 0, s->field_select[dir][0],
2337 ref_picture, pix_op,
2338 s->mv[dir][0][0], s->mv[dir][0][1],
2341 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2342 1, 1, s->field_select[dir][1],
2343 ref_picture, pix_op,
2344 s->mv[dir][1][0], s->mv[dir][1][1],
2347 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2348 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2349 ref_picture = s->current_picture_ptr->f.data;
2352 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2353 0, 0, s->field_select[dir][0],
2354 ref_picture, pix_op,
2356 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2360 for (i = 0; i < 2; i++) {
2361 uint8_t **ref2picture;
2363 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2364 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2365 ref2picture = ref_picture;
2367 ref2picture = s->current_picture_ptr->f.data;
2370 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2371 0, 0, s->field_select[dir][i],
2372 ref2picture, pix_op,
2373 s->mv[dir][i][0], s->mv[dir][i][1] +
2374 2 * block_s * i, block_s, mb_y >> 1);
2376 dest_y += 2 * block_s * s->linesize;
2377 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2378 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2382 if (s->picture_structure == PICT_FRAME) {
2383 for (i = 0; i < 2; i++) {
2385 for (j = 0; j < 2; j++) {
2386 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2388 ref_picture, pix_op,
2389 s->mv[dir][2 * i + j][0],
2390 s->mv[dir][2 * i + j][1],
2393 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2396 for (i = 0; i < 2; i++) {
2397 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2398 0, 0, s->picture_structure != i + 1,
2399 ref_picture, pix_op,
2400 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2401 2 * block_s, mb_y >> 1);
2403 // after put we make avg of the same block
2404 pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
2406 // opposite parity is always in the same
2407 // frame if this is second field
2408 if (!s->first_field) {
2409 ref_picture = s->current_picture_ptr->f.data;
2420 * find the lowest MB row referenced in the MVs
2422 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2424 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2425 int my, off, i, mvs;
2427 if (s->picture_structure != PICT_FRAME || s->mcsel)
2430 switch (s->mv_type) {
2444 for (i = 0; i < mvs; i++) {
2445 my = s->mv[dir][i][1]<<qpel_shift;
2446 my_max = FFMAX(my_max, my);
2447 my_min = FFMIN(my_min, my);
2450 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2452 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2454 return s->mb_height-1;
2457 /* put block[] to dest[] */
2458 static inline void put_dct(MpegEncContext *s,
2459 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2461 s->dct_unquantize_intra(s, block, i, qscale);
2462 s->dsp.idct_put (dest, line_size, block);
2465 /* add block[] to dest[] */
2466 static inline void add_dct(MpegEncContext *s,
2467 int16_t *block, int i, uint8_t *dest, int line_size)
2469 if (s->block_last_index[i] >= 0) {
2470 s->dsp.idct_add (dest, line_size, block);
2474 static inline void add_dequant_dct(MpegEncContext *s,
2475 int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2477 if (s->block_last_index[i] >= 0) {
2478 s->dct_unquantize_inter(s, block, i, qscale);
2480 s->dsp.idct_add (dest, line_size, block);
2485 * Clean dc, ac, coded_block for the current non-intra MB.
2487 void ff_clean_intra_table_entries(MpegEncContext *s)
2489 int wrap = s->b8_stride;
2490 int xy = s->block_index[0];
2493 s->dc_val[0][xy + 1 ] =
2494 s->dc_val[0][xy + wrap] =
2495 s->dc_val[0][xy + 1 + wrap] = 1024;
2497 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2498 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2499 if (s->msmpeg4_version>=3) {
2500 s->coded_block[xy ] =
2501 s->coded_block[xy + 1 ] =
2502 s->coded_block[xy + wrap] =
2503 s->coded_block[xy + 1 + wrap] = 0;
2506 wrap = s->mb_stride;
2507 xy = s->mb_x + s->mb_y * wrap;
2509 s->dc_val[2][xy] = 1024;
2511 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2512 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2514 s->mbintra_table[xy]= 0;
2517 /* generic function called after a macroblock has been parsed by the
2518 decoder or after it has been encoded by the encoder.
2520 Important variables used:
2521 s->mb_intra : true if intra macroblock
2522 s->mv_dir : motion vector direction
2523 s->mv_type : motion vector type
2524 s->mv : motion vector
2525 s->interlaced_dct : true if interlaced dct used (mpeg2)
2527 static av_always_inline
2528 void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
2529 int lowres_flag, int is_mpeg12)
2531 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2532 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2533 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2537 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2538 /* save DCT coefficients */
2540 int16_t *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2541 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2543 for(j=0; j<64; j++){
2544 *dct++ = block[i][s->dsp.idct_permutation[j]];
2545 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2547 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2551 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2553 /* update DC predictors for P macroblocks */
2555 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2556 if(s->mbintra_table[mb_xy])
2557 ff_clean_intra_table_entries(s);
2561 s->last_dc[2] = 128 << s->intra_dc_precision;
2564 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2565 s->mbintra_table[mb_xy]=1;
2567 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2568 uint8_t *dest_y, *dest_cb, *dest_cr;
2569 int dct_linesize, dct_offset;
2570 op_pixels_func (*op_pix)[4];
2571 qpel_mc_func (*op_qpix)[16];
2572 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2573 const int uvlinesize = s->current_picture.f.linesize[1];
2574 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2575 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2577 /* avoid copy if macroblock skipped in last frame too */
2578 /* skip only during decoding as we might trash the buffers during encoding a bit */
2580 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2582 if (s->mb_skipped) {
2584 av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2586 } else if(!s->current_picture.f.reference) {
2589 *mbskip_ptr = 0; /* not skipped */
2593 dct_linesize = linesize << s->interlaced_dct;
2594 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2598 dest_cb= s->dest[1];
2599 dest_cr= s->dest[2];
2601 dest_y = s->b_scratchpad;
2602 dest_cb= s->b_scratchpad+16*linesize;
2603 dest_cr= s->b_scratchpad+32*linesize;
2607 /* motion handling */
2608 /* decoding or more than one mb_type (MC was already done otherwise) */
2611 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2612 if (s->mv_dir & MV_DIR_FORWARD) {
2613 ff_thread_await_progress(&s->last_picture_ptr->f,
2614 ff_MPV_lowest_referenced_row(s, 0),
2617 if (s->mv_dir & MV_DIR_BACKWARD) {
2618 ff_thread_await_progress(&s->next_picture_ptr->f,
2619 ff_MPV_lowest_referenced_row(s, 1),
2625 h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2627 if (s->mv_dir & MV_DIR_FORWARD) {
2628 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2629 op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2631 if (s->mv_dir & MV_DIR_BACKWARD) {
2632 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2635 op_qpix= s->me.qpel_put;
2636 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2637 op_pix = s->dsp.put_pixels_tab;
2639 op_pix = s->dsp.put_no_rnd_pixels_tab;
2641 if (s->mv_dir & MV_DIR_FORWARD) {
2642 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2643 op_pix = s->dsp.avg_pixels_tab;
2644 op_qpix= s->me.qpel_avg;
2646 if (s->mv_dir & MV_DIR_BACKWARD) {
2647 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2652 /* skip dequant / idct if we are really late ;) */
2653 if(s->avctx->skip_idct){
2654 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2655 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2656 || s->avctx->skip_idct >= AVDISCARD_ALL)
2660 /* add dct residue */
2661 if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2662 || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2663 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2664 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2665 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2666 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2668 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2669 if (s->chroma_y_shift){
2670 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2671 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2675 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2676 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2677 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2678 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2681 } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2682 add_dct(s, block[0], 0, dest_y , dct_linesize);
2683 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2684 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2685 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2687 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2688 if(s->chroma_y_shift){//Chroma420
2689 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2690 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2693 dct_linesize = uvlinesize << s->interlaced_dct;
2694 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2696 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2697 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2698 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2699 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2700 if(!s->chroma_x_shift){//Chroma444
2701 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2702 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2703 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2704 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2709 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2710 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2713 /* dct only in intra block */
2714 if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2715 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2716 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2717 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2718 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2720 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2721 if(s->chroma_y_shift){
2722 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2723 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2727 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2728 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2729 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2730 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2734 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2735 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2736 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2737 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2739 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2740 if(s->chroma_y_shift){
2741 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2742 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2745 dct_linesize = uvlinesize << s->interlaced_dct;
2746 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2748 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2749 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2750 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2751 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2752 if(!s->chroma_x_shift){//Chroma444
2753 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2754 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2755 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2756 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2764 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2765 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2766 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2771 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2773 if(s->out_format == FMT_MPEG1) {
2774 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2775 else MPV_decode_mb_internal(s, block, 0, 1);
2778 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2779 else MPV_decode_mb_internal(s, block, 0, 0);
2783 * @param h is the normal height, this will be reduced automatically if needed for the last row
2785 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2786 const int field_pic= s->picture_structure != PICT_FRAME;
2792 if (!s->avctx->hwaccel
2793 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2794 && s->unrestricted_mv
2795 && s->current_picture.f.reference
2797 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2798 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
2799 int sides = 0, edge_h;
2800 int hshift = desc->log2_chroma_w;
2801 int vshift = desc->log2_chroma_h;
2802 if (y==0) sides |= EDGE_TOP;
2803 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2805 edge_h= FFMIN(h, s->v_edge_pos - y);
2807 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2808 s->linesize, s->h_edge_pos, edge_h,
2809 EDGE_WIDTH, EDGE_WIDTH, sides);
2810 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2811 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2812 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2813 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2814 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2815 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2818 h= FFMIN(h, s->avctx->height - y);
2820 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2822 if (s->avctx->draw_horiz_band) {
2824 int offset[AV_NUM_DATA_POINTERS];
2827 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2828 src = &s->current_picture_ptr->f;
2829 else if(s->last_picture_ptr)
2830 src = &s->last_picture_ptr->f;
2834 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2835 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2838 offset[0]= y * s->linesize;
2840 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2841 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2847 s->avctx->draw_horiz_band(s->avctx, src, offset,
2848 y, s->picture_structure, h);
2852 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2853 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2854 const int uvlinesize = s->current_picture.f.linesize[1];
2855 const int mb_size= 4 - s->avctx->lowres;
2857 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2858 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2859 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2860 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2861 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2862 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2863 //block_index is not used by mpeg2, so it is not affected by chroma_format
2865 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2866 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2867 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2869 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2871 if(s->picture_structure==PICT_FRAME){
2872 s->dest[0] += s->mb_y * linesize << mb_size;
2873 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2874 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2876 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2877 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2878 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2879 av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2885 * Permute an 8x8 block.
2886 * @param block the block which will be permuted according to the given permutation vector
2887 * @param permutation the permutation vector
2888 * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2889 * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2890 * (inverse) permutated to scantable order!
2892 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2898 //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2900 for(i=0; i<=last; i++){
2901 const int j= scantable[i];
2906 for(i=0; i<=last; i++){
2907 const int j= scantable[i];
2908 const int perm_j= permutation[j];
2909 block[perm_j]= temp[j];
2913 void ff_mpeg_flush(AVCodecContext *avctx){
2915 MpegEncContext *s = avctx->priv_data;
2917 if(s==NULL || s->picture==NULL)
2920 for(i=0; i<s->picture_count; i++){
2921 if (s->picture[i].f.data[0] &&
2922 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2923 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2924 free_frame_buffer(s, &s->picture[i]);
2926 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2928 s->mb_x= s->mb_y= 0;
2931 s->parse_context.state= -1;
2932 s->parse_context.frame_start_found= 0;
2933 s->parse_context.overread= 0;
2934 s->parse_context.overread_index= 0;
2935 s->parse_context.index= 0;
2936 s->parse_context.last_index= 0;
2937 s->bitstream_buffer_size=0;
2941 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2942 int16_t *block, int n, int qscale)
2944 int i, level, nCoeffs;
2945 const uint16_t *quant_matrix;
2947 nCoeffs= s->block_last_index[n];
2949 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
2950 /* XXX: only mpeg1 */
2951 quant_matrix = s->intra_matrix;
2952 for(i=1;i<=nCoeffs;i++) {
2953 int j= s->intra_scantable.permutated[i];
2958 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2959 level = (level - 1) | 1;
2962 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2963 level = (level - 1) | 1;
2970 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2971 int16_t *block, int n, int qscale)
2973 int i, level, nCoeffs;
2974 const uint16_t *quant_matrix;
2976 nCoeffs= s->block_last_index[n];
2978 quant_matrix = s->inter_matrix;
2979 for(i=0; i<=nCoeffs; i++) {
2980 int j= s->intra_scantable.permutated[i];
2985 level = (((level << 1) + 1) * qscale *
2986 ((int) (quant_matrix[j]))) >> 4;
2987 level = (level - 1) | 1;
2990 level = (((level << 1) + 1) * qscale *
2991 ((int) (quant_matrix[j]))) >> 4;
2992 level = (level - 1) | 1;
2999 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
3000 int16_t *block, int n, int qscale)
3002 int i, level, nCoeffs;
3003 const uint16_t *quant_matrix;
3005 if(s->alternate_scan) nCoeffs= 63;
3006 else nCoeffs= s->block_last_index[n];
3008 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3009 quant_matrix = s->intra_matrix;
3010 for(i=1;i<=nCoeffs;i++) {
3011 int j= s->intra_scantable.permutated[i];
3016 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3019 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3026 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
3027 int16_t *block, int n, int qscale)
3029 int i, level, nCoeffs;
3030 const uint16_t *quant_matrix;
3033 if(s->alternate_scan) nCoeffs= 63;
3034 else nCoeffs= s->block_last_index[n];
3036 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3038 quant_matrix = s->intra_matrix;
3039 for(i=1;i<=nCoeffs;i++) {
3040 int j= s->intra_scantable.permutated[i];
3045 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3048 level = (int)(level * qscale * quant_matrix[j]) >> 3;
3057 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
3058 int16_t *block, int n, int qscale)
3060 int i, level, nCoeffs;
3061 const uint16_t *quant_matrix;
3064 if(s->alternate_scan) nCoeffs= 63;
3065 else nCoeffs= s->block_last_index[n];
3067 quant_matrix = s->inter_matrix;
3068 for(i=0; i<=nCoeffs; i++) {
3069 int j= s->intra_scantable.permutated[i];
3074 level = (((level << 1) + 1) * qscale *
3075 ((int) (quant_matrix[j]))) >> 4;
3078 level = (((level << 1) + 1) * qscale *
3079 ((int) (quant_matrix[j]))) >> 4;
3088 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
3089 int16_t *block, int n, int qscale)
3091 int i, level, qmul, qadd;
3094 assert(s->block_last_index[n]>=0);
3099 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3100 qadd = (qscale - 1) | 1;
3107 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3109 for(i=1; i<=nCoeffs; i++) {
3113 level = level * qmul - qadd;
3115 level = level * qmul + qadd;
3122 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
3123 int16_t *block, int n, int qscale)
3125 int i, level, qmul, qadd;
3128 assert(s->block_last_index[n]>=0);
3130 qadd = (qscale - 1) | 1;
3133 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3135 for(i=0; i<=nCoeffs; i++) {
3139 level = level * qmul - qadd;
3141 level = level * qmul + qadd;
3149 * set qscale and update qscale dependent variables.
3151 void ff_set_qscale(MpegEncContext * s, int qscale)
3155 else if (qscale > 31)
3159 s->chroma_qscale= s->chroma_qscale_table[qscale];
3161 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3162 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
3165 void ff_MPV_report_decode_progress(MpegEncContext *s)
3167 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
3168 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);