2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 ff_mpeg1_dc_scale_table,
125 mpeg2_dc_scale_table1,
126 mpeg2_dc_scale_table2,
127 mpeg2_dc_scale_table3,
130 const enum PixelFormat ff_pixfmt_list_420[] = {
135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
145 uint32_t * restrict state)
153 for (i = 0; i < 3; i++) {
154 uint32_t tmp = *state << 8;
155 *state = tmp + *(p++);
156 if (tmp == 0x100 || p == end)
161 if (p[-1] > 1 ) p += 3;
162 else if (p[-2] ) p += 2;
163 else if (p[-3]|(p[-1]-1)) p++;
170 p = FFMIN(p, end) - 4;
176 /* init common dct for both encoder and decoder */
177 av_cold int ff_dct_common_init(MpegEncContext *s)
179 ff_dsputil_init(&s->dsp, s->avctx);
181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
186 if (s->flags & CODEC_FLAG_BITEXACT)
187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
191 ff_MPV_common_init_mmx(s);
193 ff_MPV_common_init_axp(s);
195 ff_MPV_common_init_mmi(s);
197 ff_MPV_common_init_arm(s);
199 ff_MPV_common_init_altivec(s);
201 ff_MPV_common_init_bfin(s);
204 /* load & permutate scantables
205 * note: only wmv uses different ones
207 if (s->alternate_scan) {
208 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
209 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
212 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
215 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
220 void ff_copy_picture(Picture *dst, Picture *src)
223 dst->f.type = FF_BUFFER_TYPE_COPY;
227 * Release a frame buffer
229 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
231 /* Windows Media Image codecs allocate internal buffers with different
232 * dimensions; ignore user defined callbacks for these
234 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
235 ff_thread_release_buffer(s->avctx, &pic->f);
237 avcodec_default_release_buffer(s->avctx, &pic->f);
238 av_freep(&pic->f.hwaccel_picture_private);
242 * Allocate a frame buffer
244 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
248 if (s->avctx->hwaccel) {
249 assert(!pic->f.hwaccel_picture_private);
250 if (s->avctx->hwaccel->priv_data_size) {
251 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
252 if (!pic->f.hwaccel_picture_private) {
253 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
259 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
260 r = ff_thread_get_buffer(s->avctx, &pic->f);
262 r = avcodec_default_get_buffer(s->avctx, &pic->f);
264 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
265 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
266 r, pic->f.type, pic->f.data[0]);
267 av_freep(&pic->f.hwaccel_picture_private);
271 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
272 s->uvlinesize != pic->f.linesize[1])) {
273 av_log(s->avctx, AV_LOG_ERROR,
274 "get_buffer() failed (stride changed)\n");
275 free_frame_buffer(s, pic);
279 if (pic->f.linesize[1] != pic->f.linesize[2]) {
280 av_log(s->avctx, AV_LOG_ERROR,
281 "get_buffer() failed (uv stride mismatch)\n");
282 free_frame_buffer(s, pic);
290 * Allocate a Picture.
291 * The pixels are allocated/set by calling get_buffer() if shared = 0
293 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
295 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
297 // the + 1 is needed so memset(,,stride*height) does not sig11
299 const int mb_array_size = s->mb_stride * s->mb_height;
300 const int b8_array_size = s->b8_stride * s->mb_height * 2;
301 const int b4_array_size = s->b4_stride * s->mb_height * 4;
306 assert(pic->f.data[0]);
307 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
308 pic->f.type = FF_BUFFER_TYPE_SHARED;
310 assert(!pic->f.data[0]);
312 if (alloc_frame_buffer(s, pic) < 0)
315 s->linesize = pic->f.linesize[0];
316 s->uvlinesize = pic->f.linesize[1];
319 if (pic->f.qscale_table == NULL) {
321 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
322 mb_array_size * sizeof(int16_t), fail)
323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
324 mb_array_size * sizeof(int16_t), fail)
325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
326 mb_array_size * sizeof(int8_t ), fail)
329 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
330 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
332 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
334 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
335 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
337 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
338 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
339 if (s->out_format == FMT_H264) {
340 for (i = 0; i < 2; i++) {
341 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
342 2 * (b4_array_size + 4) * sizeof(int16_t),
344 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
345 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
346 4 * mb_array_size * sizeof(uint8_t), fail)
348 pic->f.motion_subsample_log2 = 2;
349 } else if (s->out_format == FMT_H263 || s->encoding ||
350 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
351 for (i = 0; i < 2; i++) {
352 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
353 2 * (b8_array_size + 4) * sizeof(int16_t),
355 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
356 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
357 4 * mb_array_size * sizeof(uint8_t), fail)
359 pic->f.motion_subsample_log2 = 3;
361 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
362 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
363 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
365 pic->f.qstride = s->mb_stride;
366 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
367 1 * sizeof(AVPanScan), fail)
373 fail: // for the FF_ALLOCZ_OR_GOTO macro
375 free_frame_buffer(s, pic);
380 * Deallocate a picture.
382 static void free_picture(MpegEncContext *s, Picture *pic)
386 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
387 free_frame_buffer(s, pic);
390 av_freep(&pic->mb_var);
391 av_freep(&pic->mc_mb_var);
392 av_freep(&pic->mb_mean);
393 av_freep(&pic->f.mbskip_table);
394 av_freep(&pic->qscale_table_base);
395 av_freep(&pic->mb_type_base);
396 av_freep(&pic->f.dct_coeff);
397 av_freep(&pic->f.pan_scan);
398 pic->f.mb_type = NULL;
399 for (i = 0; i < 2; i++) {
400 av_freep(&pic->motion_val_base[i]);
401 av_freep(&pic->f.ref_index[i]);
404 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
405 for (i = 0; i < 4; i++) {
407 pic->f.data[i] = NULL;
413 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
415 int y_size = s->b8_stride * (2 * s->mb_height + 1);
416 int c_size = s->mb_stride * (s->mb_height + 1);
417 int yc_size = y_size + 2 * c_size;
420 // edge emu needs blocksize + filter length - 1
421 // (= 17x17 for halfpel / 21x21 for h264)
422 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
423 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
425 // FIXME should be linesize instead of s->width * 2
426 // but that is not known before get_buffer()
427 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
428 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
429 s->me.temp = s->me.scratchpad;
430 s->rd_scratchpad = s->me.scratchpad;
431 s->b_scratchpad = s->me.scratchpad;
432 s->obmc_scratchpad = s->me.scratchpad + 16;
434 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
435 ME_MAP_SIZE * sizeof(uint32_t), fail)
436 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
437 ME_MAP_SIZE * sizeof(uint32_t), fail)
438 if (s->avctx->noise_reduction) {
439 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
440 2 * 64 * sizeof(int), fail)
443 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
444 s->block = s->blocks[0];
446 for (i = 0; i < 12; i++) {
447 s->pblocks[i] = &s->block[i];
450 if (s->out_format == FMT_H263) {
452 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
453 yc_size * sizeof(int16_t) * 16, fail);
454 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
455 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
456 s->ac_val[2] = s->ac_val[1] + c_size;
461 return -1; // free() through ff_MPV_common_end()
464 static void free_duplicate_context(MpegEncContext *s)
469 av_freep(&s->edge_emu_buffer);
470 av_freep(&s->me.scratchpad);
474 s->obmc_scratchpad = NULL;
476 av_freep(&s->dct_error_sum);
477 av_freep(&s->me.map);
478 av_freep(&s->me.score_map);
479 av_freep(&s->blocks);
480 av_freep(&s->ac_val_base);
484 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
486 #define COPY(a) bak->a = src->a
487 COPY(edge_emu_buffer);
492 COPY(obmc_scratchpad);
499 COPY(me.map_generation);
511 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
515 // FIXME copy only needed parts
517 backup_duplicate_context(&bak, dst);
518 memcpy(dst, src, sizeof(MpegEncContext));
519 backup_duplicate_context(dst, &bak);
520 for (i = 0; i < 12; i++) {
521 dst->pblocks[i] = &dst->block[i];
523 // STOP_TIMER("update_duplicate_context")
524 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
527 int ff_mpeg_update_thread_context(AVCodecContext *dst,
528 const AVCodecContext *src)
530 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
532 if (dst == src || !s1->context_initialized)
535 // FIXME can parameters change on I-frames?
536 // in that case dst may need a reinit
537 if (!s->context_initialized) {
538 memcpy(s, s1, sizeof(MpegEncContext));
541 s->picture_range_start += MAX_PICTURE_COUNT;
542 s->picture_range_end += MAX_PICTURE_COUNT;
543 s->bitstream_buffer = NULL;
544 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
546 ff_MPV_common_init(s);
549 s->avctx->coded_height = s1->avctx->coded_height;
550 s->avctx->coded_width = s1->avctx->coded_width;
551 s->avctx->width = s1->avctx->width;
552 s->avctx->height = s1->avctx->height;
554 s->coded_picture_number = s1->coded_picture_number;
555 s->picture_number = s1->picture_number;
556 s->input_picture_number = s1->input_picture_number;
558 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
559 memcpy(&s->last_picture, &s1->last_picture,
560 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
562 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
563 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
564 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
566 // Error/bug resilience
567 s->next_p_frame_damaged = s1->next_p_frame_damaged;
568 s->workaround_bugs = s1->workaround_bugs;
571 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
572 (char *) &s1->shape - (char *) &s1->time_increment_bits);
575 s->max_b_frames = s1->max_b_frames;
576 s->low_delay = s1->low_delay;
577 s->dropable = s1->dropable;
579 // DivX handling (doesn't work)
580 s->divx_packed = s1->divx_packed;
582 if (s1->bitstream_buffer) {
583 if (s1->bitstream_buffer_size +
584 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
585 av_fast_malloc(&s->bitstream_buffer,
586 &s->allocated_bitstream_buffer_size,
587 s1->allocated_bitstream_buffer_size);
588 s->bitstream_buffer_size = s1->bitstream_buffer_size;
589 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
590 s1->bitstream_buffer_size);
591 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
592 FF_INPUT_BUFFER_PADDING_SIZE);
595 // MPEG2/interlacing info
596 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
597 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
599 if (!s1->first_field) {
600 s->last_pict_type = s1->pict_type;
601 if (s1->current_picture_ptr)
602 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
604 if (s1->pict_type != AV_PICTURE_TYPE_B) {
605 s->last_non_b_pict_type = s1->pict_type;
613 * Set the given MpegEncContext to common defaults
614 * (same for encoding and decoding).
615 * The changed fields will not depend upon the
616 * prior state of the MpegEncContext.
618 void ff_MPV_common_defaults(MpegEncContext *s)
620 s->y_dc_scale_table =
621 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
622 s->chroma_qscale_table = ff_default_chroma_qscale_table;
623 s->progressive_frame = 1;
624 s->progressive_sequence = 1;
625 s->picture_structure = PICT_FRAME;
627 s->coded_picture_number = 0;
628 s->picture_number = 0;
629 s->input_picture_number = 0;
631 s->picture_in_gop_number = 0;
636 s->picture_range_start = 0;
637 s->picture_range_end = MAX_PICTURE_COUNT;
639 s->slice_context_count = 1;
643 * Set the given MpegEncContext to defaults for decoding.
644 * the changed fields will not depend upon
645 * the prior state of the MpegEncContext.
647 void ff_MPV_decode_defaults(MpegEncContext *s)
649 ff_MPV_common_defaults(s);
653 * init common structure for both encoder and decoder.
654 * this assumes that some variables like width/height are already set
656 av_cold int ff_MPV_common_init(MpegEncContext *s)
658 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
659 int nb_slices = (HAVE_THREADS &&
660 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
661 s->avctx->thread_count : 1;
663 if (s->encoding && s->avctx->slices)
664 nb_slices = s->avctx->slices;
666 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
667 s->mb_height = (s->height + 31) / 32 * 2;
668 else if (s->codec_id != CODEC_ID_H264)
669 s->mb_height = (s->height + 15) / 16;
671 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
672 av_log(s->avctx, AV_LOG_ERROR,
673 "decoding to PIX_FMT_NONE is not supported.\n");
677 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
680 max_slices = FFMIN(MAX_THREADS, s->mb_height);
682 max_slices = MAX_THREADS;
683 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
684 " reducing to %d\n", nb_slices, max_slices);
685 nb_slices = max_slices;
688 if ((s->width || s->height) &&
689 av_image_check_size(s->width, s->height, 0, s->avctx))
692 ff_dct_common_init(s);
694 s->flags = s->avctx->flags;
695 s->flags2 = s->avctx->flags2;
697 if (s->width && s->height) {
698 s->mb_width = (s->width + 15) / 16;
699 s->mb_stride = s->mb_width + 1;
700 s->b8_stride = s->mb_width * 2 + 1;
701 s->b4_stride = s->mb_width * 4 + 1;
702 mb_array_size = s->mb_height * s->mb_stride;
703 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
705 /* set chroma shifts */
706 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
709 /* set default edge pos, will be overriden
710 * in decode_header if needed */
711 s->h_edge_pos = s->mb_width * 16;
712 s->v_edge_pos = s->mb_height * 16;
714 s->mb_num = s->mb_width * s->mb_height;
719 s->block_wrap[3] = s->b8_stride;
721 s->block_wrap[5] = s->mb_stride;
723 y_size = s->b8_stride * (2 * s->mb_height + 1);
724 c_size = s->mb_stride * (s->mb_height + 1);
725 yc_size = y_size + 2 * c_size;
727 /* convert fourcc to upper case */
728 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
730 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
732 s->avctx->coded_frame = &s->current_picture.f;
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
735 fail); // error ressilience code looks cleaner with this
736 for (y = 0; y < s->mb_height; y++)
737 for (x = 0; x < s->mb_width; x++)
738 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
740 s->mb_index2xy[s->mb_height * s->mb_width] =
741 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
744 /* Allocate MV tables */
745 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
746 mv_table_size * 2 * sizeof(int16_t), fail);
747 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
748 mv_table_size * 2 * sizeof(int16_t), fail);
749 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
750 mv_table_size * 2 * sizeof(int16_t), fail);
751 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
752 mv_table_size * 2 * sizeof(int16_t), fail);
753 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
754 mv_table_size * 2 * sizeof(int16_t), fail);
755 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
756 mv_table_size * 2 * sizeof(int16_t), fail);
757 s->p_mv_table = s->p_mv_table_base +
759 s->b_forw_mv_table = s->b_forw_mv_table_base +
761 s->b_back_mv_table = s->b_back_mv_table_base +
763 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
765 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
767 s->b_direct_mv_table = s->b_direct_mv_table_base +
770 if (s->msmpeg4_version) {
771 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
772 2 * 2 * (MAX_LEVEL + 1) *
773 (MAX_RUN + 1) * 2 * sizeof(int), fail);
775 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
777 /* Allocate MB type table */
778 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
779 sizeof(uint16_t), fail); // needed for encoding
781 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
785 64 * 32 * sizeof(int), fail);
786 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
787 64 * 32 * sizeof(int), fail);
788 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
789 64 * 32 * 2 * sizeof(uint16_t), fail);
790 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
791 64 * 32 * 2 * sizeof(uint16_t), fail);
792 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
793 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
794 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
795 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
797 if (s->avctx->noise_reduction) {
798 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
799 2 * 64 * sizeof(uint16_t), fail);
804 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
805 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
806 s->picture_count * sizeof(Picture), fail);
807 for (i = 0; i < s->picture_count; i++) {
808 avcodec_get_frame_defaults(&s->picture[i].f);
811 if (s->width && s->height) {
812 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
813 mb_array_size * sizeof(uint8_t), fail);
815 if (s->codec_id == CODEC_ID_MPEG4 ||
816 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
817 /* interlaced direct mode decoding tables */
818 for (i = 0; i < 2; i++) {
820 for (j = 0; j < 2; j++) {
821 for (k = 0; k < 2; k++) {
822 FF_ALLOCZ_OR_GOTO(s->avctx,
823 s->b_field_mv_table_base[i][j][k],
824 mv_table_size * 2 * sizeof(int16_t),
826 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
829 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
830 mb_array_size * 2 * sizeof(uint8_t),
832 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
833 mv_table_size * 2 * sizeof(int16_t),
835 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
838 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
839 mb_array_size * 2 * sizeof(uint8_t),
843 if (s->out_format == FMT_H263) {
845 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
846 s->coded_block = s->coded_block_base + s->b8_stride + 1;
848 /* cbp, ac_pred, pred_dir */
849 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
850 mb_array_size * sizeof(uint8_t), fail);
851 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
852 mb_array_size * sizeof(uint8_t), fail);
855 if (s->h263_pred || s->h263_plus || !s->encoding) {
857 // MN: we need these for error resilience of intra-frames
858 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
859 yc_size * sizeof(int16_t), fail);
860 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
861 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
862 s->dc_val[2] = s->dc_val[1] + c_size;
863 for (i = 0; i < yc_size; i++)
864 s->dc_val_base[i] = 1024;
867 /* which mb is a intra block */
868 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
869 memset(s->mbintra_table, 1, mb_array_size);
871 /* init macroblock skip table */
872 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
873 // Note the + 1 is for a quicker mpeg4 slice_end detection
875 s->parse_context.state = -1;
876 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
877 s->avctx->debug_mv) {
878 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
879 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
880 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
881 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
882 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
883 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
887 s->context_initialized = 1;
888 s->thread_context[0] = s;
890 if (s->width && s->height) {
892 for (i = 1; i < nb_slices; i++) {
893 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
894 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
897 for (i = 0; i < nb_slices; i++) {
898 if (init_duplicate_context(s->thread_context[i], s) < 0)
900 s->thread_context[i]->start_mb_y =
901 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
902 s->thread_context[i]->end_mb_y =
903 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
906 if (init_duplicate_context(s, s) < 0)
909 s->end_mb_y = s->mb_height;
911 s->slice_context_count = nb_slices;
916 ff_MPV_common_end(s);
920 /* init common structure for both encoder and decoder */
921 void ff_MPV_common_end(MpegEncContext *s)
925 if (s->slice_context_count > 1) {
926 for (i = 0; i < s->slice_context_count; i++) {
927 free_duplicate_context(s->thread_context[i]);
929 for (i = 1; i < s->slice_context_count; i++) {
930 av_freep(&s->thread_context[i]);
932 s->slice_context_count = 1;
933 } else free_duplicate_context(s);
935 av_freep(&s->parse_context.buffer);
936 s->parse_context.buffer_size = 0;
938 av_freep(&s->mb_type);
939 av_freep(&s->p_mv_table_base);
940 av_freep(&s->b_forw_mv_table_base);
941 av_freep(&s->b_back_mv_table_base);
942 av_freep(&s->b_bidir_forw_mv_table_base);
943 av_freep(&s->b_bidir_back_mv_table_base);
944 av_freep(&s->b_direct_mv_table_base);
945 s->p_mv_table = NULL;
946 s->b_forw_mv_table = NULL;
947 s->b_back_mv_table = NULL;
948 s->b_bidir_forw_mv_table = NULL;
949 s->b_bidir_back_mv_table = NULL;
950 s->b_direct_mv_table = NULL;
951 for (i = 0; i < 2; i++) {
952 for (j = 0; j < 2; j++) {
953 for (k = 0; k < 2; k++) {
954 av_freep(&s->b_field_mv_table_base[i][j][k]);
955 s->b_field_mv_table[i][j][k] = NULL;
957 av_freep(&s->b_field_select_table[i][j]);
958 av_freep(&s->p_field_mv_table_base[i][j]);
959 s->p_field_mv_table[i][j] = NULL;
961 av_freep(&s->p_field_select_table[i]);
964 av_freep(&s->dc_val_base);
965 av_freep(&s->coded_block_base);
966 av_freep(&s->mbintra_table);
967 av_freep(&s->cbp_table);
968 av_freep(&s->pred_dir_table);
970 av_freep(&s->mbskip_table);
971 av_freep(&s->bitstream_buffer);
972 s->allocated_bitstream_buffer_size = 0;
974 av_freep(&s->avctx->stats_out);
975 av_freep(&s->ac_stats);
976 av_freep(&s->error_status_table);
977 av_freep(&s->mb_index2xy);
978 av_freep(&s->lambda_table);
979 av_freep(&s->q_intra_matrix);
980 av_freep(&s->q_inter_matrix);
981 av_freep(&s->q_intra_matrix16);
982 av_freep(&s->q_inter_matrix16);
983 av_freep(&s->input_picture);
984 av_freep(&s->reordered_input_picture);
985 av_freep(&s->dct_offset);
987 if (s->picture && !s->avctx->internal->is_copy) {
988 for (i = 0; i < s->picture_count; i++) {
989 free_picture(s, &s->picture[i]);
992 av_freep(&s->picture);
993 s->context_initialized = 0;
994 s->last_picture_ptr =
995 s->next_picture_ptr =
996 s->current_picture_ptr = NULL;
997 s->linesize = s->uvlinesize = 0;
999 for (i = 0; i < 3; i++)
1000 av_freep(&s->visualization_buffer[i]);
1002 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1003 avcodec_default_free_buffers(s->avctx);
1006 void ff_init_rl(RLTable *rl,
1007 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1009 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1010 uint8_t index_run[MAX_RUN + 1];
1011 int last, run, level, start, end, i;
1013 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1014 if (static_store && rl->max_level[0])
1017 /* compute max_level[], max_run[] and index_run[] */
1018 for (last = 0; last < 2; last++) {
1027 memset(max_level, 0, MAX_RUN + 1);
1028 memset(max_run, 0, MAX_LEVEL + 1);
1029 memset(index_run, rl->n, MAX_RUN + 1);
1030 for (i = start; i < end; i++) {
1031 run = rl->table_run[i];
1032 level = rl->table_level[i];
1033 if (index_run[run] == rl->n)
1035 if (level > max_level[run])
1036 max_level[run] = level;
1037 if (run > max_run[level])
1038 max_run[level] = run;
1041 rl->max_level[last] = static_store[last];
1043 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1044 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1046 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1048 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1049 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1051 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1053 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1054 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1058 void ff_init_vlc_rl(RLTable *rl)
1062 for (q = 0; q < 32; q++) {
1064 int qadd = (q - 1) | 1;
1070 for (i = 0; i < rl->vlc.table_size; i++) {
1071 int code = rl->vlc.table[i][0];
1072 int len = rl->vlc.table[i][1];
1075 if (len == 0) { // illegal code
1078 } else if (len < 0) { // more bits needed
1082 if (code == rl->n) { // esc
1086 run = rl->table_run[code] + 1;
1087 level = rl->table_level[code] * qmul + qadd;
1088 if (code >= rl->last) run += 192;
1091 rl->rl_vlc[q][i].len = len;
1092 rl->rl_vlc[q][i].level = level;
1093 rl->rl_vlc[q][i].run = run;
1098 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1102 /* release non reference frames */
1103 for (i = 0; i < s->picture_count; i++) {
1104 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1105 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1106 (remove_current || &s->picture[i] != s->current_picture_ptr)
1107 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1108 free_frame_buffer(s, &s->picture[i]);
1113 int ff_find_unused_picture(MpegEncContext *s, int shared)
1118 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1119 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1123 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1124 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1127 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1128 if (s->picture[i].f.data[0] == NULL)
1133 return AVERROR_INVALIDDATA;
1136 static void update_noise_reduction(MpegEncContext *s)
1140 for (intra = 0; intra < 2; intra++) {
1141 if (s->dct_count[intra] > (1 << 16)) {
1142 for (i = 0; i < 64; i++) {
1143 s->dct_error_sum[intra][i] >>= 1;
1145 s->dct_count[intra] >>= 1;
1148 for (i = 0; i < 64; i++) {
1149 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1150 s->dct_count[intra] +
1151 s->dct_error_sum[intra][i] / 2) /
1152 (s->dct_error_sum[intra][i] + 1);
1158 * generic function for encode/decode called after coding/decoding
1159 * the header and before a frame is coded/decoded.
1161 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1167 assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
1168 s->codec_id == CODEC_ID_SVQ3);
1170 /* mark & release old frames */
1171 if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
1172 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1173 s->last_picture_ptr != s->next_picture_ptr &&
1174 s->last_picture_ptr->f.data[0]) {
1175 if (s->last_picture_ptr->owner2 == s)
1176 free_frame_buffer(s, s->last_picture_ptr);
1179 /* release forgotten pictures */
1180 /* if (mpeg124/h263) */
1182 for (i = 0; i < s->picture_count; i++) {
1183 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1184 &s->picture[i] != s->last_picture_ptr &&
1185 &s->picture[i] != s->next_picture_ptr &&
1186 s->picture[i].f.reference) {
1187 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1188 av_log(avctx, AV_LOG_ERROR,
1189 "releasing zombie picture\n");
1190 free_frame_buffer(s, &s->picture[i]);
1197 ff_release_unused_pictures(s, 1);
1199 if (s->current_picture_ptr &&
1200 s->current_picture_ptr->f.data[0] == NULL) {
1201 // we already have a unused image
1202 // (maybe it was set before reading the header)
1203 pic = s->current_picture_ptr;
1205 i = ff_find_unused_picture(s, 0);
1206 pic = &s->picture[i];
1209 pic->f.reference = 0;
1211 if (s->codec_id == CODEC_ID_H264)
1212 pic->f.reference = s->picture_structure;
1213 else if (s->pict_type != AV_PICTURE_TYPE_B)
1214 pic->f.reference = 3;
1217 pic->f.coded_picture_number = s->coded_picture_number++;
1219 if (ff_alloc_picture(s, pic, 0) < 0)
1222 s->current_picture_ptr = pic;
1223 // FIXME use only the vars from current_pic
1224 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1225 if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
1226 s->codec_id == CODEC_ID_MPEG2VIDEO) {
1227 if (s->picture_structure != PICT_FRAME)
1228 s->current_picture_ptr->f.top_field_first =
1229 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1231 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1232 !s->progressive_sequence;
1233 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1236 s->current_picture_ptr->f.pict_type = s->pict_type;
1237 // if (s->flags && CODEC_FLAG_QSCALE)
1238 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1239 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1241 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1243 if (s->pict_type != AV_PICTURE_TYPE_B) {
1244 s->last_picture_ptr = s->next_picture_ptr;
1246 s->next_picture_ptr = s->current_picture_ptr;
1248 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1249 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1250 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1251 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1252 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1253 s->pict_type, s->dropable); */
1255 if (s->codec_id != CODEC_ID_H264) {
1256 if ((s->last_picture_ptr == NULL ||
1257 s->last_picture_ptr->f.data[0] == NULL) &&
1258 (s->pict_type != AV_PICTURE_TYPE_I ||
1259 s->picture_structure != PICT_FRAME)) {
1260 if (s->pict_type != AV_PICTURE_TYPE_I)
1261 av_log(avctx, AV_LOG_ERROR,
1262 "warning: first frame is no keyframe\n");
1263 else if (s->picture_structure != PICT_FRAME)
1264 av_log(avctx, AV_LOG_INFO,
1265 "allocate dummy last picture for field based first keyframe\n");
1267 /* Allocate a dummy frame */
1268 i = ff_find_unused_picture(s, 0);
1269 s->last_picture_ptr = &s->picture[i];
1270 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1271 s->last_picture_ptr = NULL;
1274 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1275 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1276 s->last_picture_ptr->f.reference = 3;
1278 if ((s->next_picture_ptr == NULL ||
1279 s->next_picture_ptr->f.data[0] == NULL) &&
1280 s->pict_type == AV_PICTURE_TYPE_B) {
1281 /* Allocate a dummy frame */
1282 i = ff_find_unused_picture(s, 0);
1283 s->next_picture_ptr = &s->picture[i];
1284 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1285 s->next_picture_ptr = NULL;
1288 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1289 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1290 s->next_picture_ptr->f.reference = 3;
1294 if (s->last_picture_ptr)
1295 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1296 if (s->next_picture_ptr)
1297 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1299 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1300 (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
1301 if (s->next_picture_ptr)
1302 s->next_picture_ptr->owner2 = s;
1303 if (s->last_picture_ptr)
1304 s->last_picture_ptr->owner2 = s;
1307 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1308 s->last_picture_ptr->f.data[0]));
1310 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1312 for (i = 0; i < 4; i++) {
1313 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1314 s->current_picture.f.data[i] +=
1315 s->current_picture.f.linesize[i];
1317 s->current_picture.f.linesize[i] *= 2;
1318 s->last_picture.f.linesize[i] *= 2;
1319 s->next_picture.f.linesize[i] *= 2;
1323 s->err_recognition = avctx->err_recognition;
1325 /* set dequantizer, we can't do it during init as
1326 * it might change for mpeg4 and we can't do it in the header
1327 * decode as init is not called for mpeg4 there yet */
1328 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1329 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1330 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1331 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1332 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1333 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1335 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1336 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1339 if (s->dct_error_sum) {
1340 assert(s->avctx->noise_reduction && s->encoding);
1341 update_noise_reduction(s);
1344 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1345 return ff_xvmc_field_start(s, avctx);
1350 /* generic function for encode/decode called after a
1351 * frame has been coded/decoded. */
1352 void ff_MPV_frame_end(MpegEncContext *s)
1355 /* redraw edges for the frame if decoding didn't complete */
1356 // just to make sure that all data is rendered.
1357 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1358 ff_xvmc_field_end(s);
1359 } else if ((s->error_count || s->encoding) &&
1360 !s->avctx->hwaccel &&
1361 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1362 s->unrestricted_mv &&
1363 s->current_picture.f.reference &&
1365 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1366 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1367 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1368 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1369 s->h_edge_pos, s->v_edge_pos,
1370 EDGE_WIDTH, EDGE_WIDTH,
1371 EDGE_TOP | EDGE_BOTTOM);
1372 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1373 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1374 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1375 EDGE_TOP | EDGE_BOTTOM);
1376 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1377 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1378 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1379 EDGE_TOP | EDGE_BOTTOM);
1384 s->last_pict_type = s->pict_type;
1385 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1386 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1387 s->last_non_b_pict_type = s->pict_type;
1390 /* copy back current_picture variables */
1391 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1392 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1393 s->picture[i] = s->current_picture;
1397 assert(i < MAX_PICTURE_COUNT);
1401 /* release non-reference frames */
1402 for (i = 0; i < s->picture_count; i++) {
1403 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1404 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1405 free_frame_buffer(s, &s->picture[i]);
1409 // clear copies, to avoid confusion
1411 memset(&s->last_picture, 0, sizeof(Picture));
1412 memset(&s->next_picture, 0, sizeof(Picture));
1413 memset(&s->current_picture, 0, sizeof(Picture));
1415 s->avctx->coded_frame = &s->current_picture_ptr->f;
1417 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1418 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1423 * Draw a line from (ex, ey) -> (sx, sy).
1424 * @param w width of the image
1425 * @param h height of the image
1426 * @param stride stride/linesize of the image
1427 * @param color color of the arrow
1429 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1430 int w, int h, int stride, int color)
1434 sx = av_clip(sx, 0, w - 1);
1435 sy = av_clip(sy, 0, h - 1);
1436 ex = av_clip(ex, 0, w - 1);
1437 ey = av_clip(ey, 0, h - 1);
1439 buf[sy * stride + sx] += color;
1441 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1443 FFSWAP(int, sx, ex);
1444 FFSWAP(int, sy, ey);
1446 buf += sx + sy * stride;
1448 f = ((ey - sy) << 16) / ex;
1449 for (x = 0; x = ex; x++) {
1451 fr = (x * f) & 0xFFFF;
1452 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1453 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1457 FFSWAP(int, sx, ex);
1458 FFSWAP(int, sy, ey);
1460 buf += sx + sy * stride;
1463 f = ((ex - sx) << 16) / ey;
1466 for (y = 0; y = ey; y++) {
1468 fr = (y * f) & 0xFFFF;
1469 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1470 buf[y * stride + x + 1] += (color * fr ) >> 16;
1476 * Draw an arrow from (ex, ey) -> (sx, sy).
1477 * @param w width of the image
1478 * @param h height of the image
1479 * @param stride stride/linesize of the image
1480 * @param color color of the arrow
1482 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1483 int ey, int w, int h, int stride, int color)
1487 sx = av_clip(sx, -100, w + 100);
1488 sy = av_clip(sy, -100, h + 100);
1489 ex = av_clip(ex, -100, w + 100);
1490 ey = av_clip(ey, -100, h + 100);
1495 if (dx * dx + dy * dy > 3 * 3) {
1498 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1500 // FIXME subpixel accuracy
1501 rx = ROUNDED_DIV(rx * 3 << 4, length);
1502 ry = ROUNDED_DIV(ry * 3 << 4, length);
1504 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1505 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1507 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1511 * Print debugging info for the given picture.
1513 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1515 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1518 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1521 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1522 switch (pict->pict_type) {
1523 case AV_PICTURE_TYPE_I:
1524 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1526 case AV_PICTURE_TYPE_P:
1527 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1529 case AV_PICTURE_TYPE_B:
1530 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1532 case AV_PICTURE_TYPE_S:
1533 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1535 case AV_PICTURE_TYPE_SI:
1536 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1538 case AV_PICTURE_TYPE_SP:
1539 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1542 for (y = 0; y < s->mb_height; y++) {
1543 for (x = 0; x < s->mb_width; x++) {
1544 if (s->avctx->debug & FF_DEBUG_SKIP) {
1545 int count = s->mbskip_table[x + y * s->mb_stride];
1548 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1550 if (s->avctx->debug & FF_DEBUG_QP) {
1551 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1552 pict->qscale_table[x + y * s->mb_stride]);
1554 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1555 int mb_type = pict->mb_type[x + y * s->mb_stride];
1556 // Type & MV direction
1557 if (IS_PCM(mb_type))
1558 av_log(s->avctx, AV_LOG_DEBUG, "P");
1559 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1560 av_log(s->avctx, AV_LOG_DEBUG, "A");
1561 else if (IS_INTRA4x4(mb_type))
1562 av_log(s->avctx, AV_LOG_DEBUG, "i");
1563 else if (IS_INTRA16x16(mb_type))
1564 av_log(s->avctx, AV_LOG_DEBUG, "I");
1565 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1566 av_log(s->avctx, AV_LOG_DEBUG, "d");
1567 else if (IS_DIRECT(mb_type))
1568 av_log(s->avctx, AV_LOG_DEBUG, "D");
1569 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1570 av_log(s->avctx, AV_LOG_DEBUG, "g");
1571 else if (IS_GMC(mb_type))
1572 av_log(s->avctx, AV_LOG_DEBUG, "G");
1573 else if (IS_SKIP(mb_type))
1574 av_log(s->avctx, AV_LOG_DEBUG, "S");
1575 else if (!USES_LIST(mb_type, 1))
1576 av_log(s->avctx, AV_LOG_DEBUG, ">");
1577 else if (!USES_LIST(mb_type, 0))
1578 av_log(s->avctx, AV_LOG_DEBUG, "<");
1580 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1581 av_log(s->avctx, AV_LOG_DEBUG, "X");
1585 if (IS_8X8(mb_type))
1586 av_log(s->avctx, AV_LOG_DEBUG, "+");
1587 else if (IS_16X8(mb_type))
1588 av_log(s->avctx, AV_LOG_DEBUG, "-");
1589 else if (IS_8X16(mb_type))
1590 av_log(s->avctx, AV_LOG_DEBUG, "|");
1591 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1592 av_log(s->avctx, AV_LOG_DEBUG, " ");
1594 av_log(s->avctx, AV_LOG_DEBUG, "?");
1597 if (IS_INTERLACED(mb_type))
1598 av_log(s->avctx, AV_LOG_DEBUG, "=");
1600 av_log(s->avctx, AV_LOG_DEBUG, " ");
1602 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1604 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1608 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1609 (s->avctx->debug_mv)) {
1610 const int shift = 1 + s->quarter_sample;
1614 int h_chroma_shift, v_chroma_shift, block_height;
1615 const int width = s->avctx->width;
1616 const int height = s->avctx->height;
1617 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1618 const int mv_stride = (s->mb_width << mv_sample_log2) +
1619 (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1620 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1622 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1623 &h_chroma_shift, &v_chroma_shift);
1624 for (i = 0; i < 3; i++) {
1625 memcpy(s->visualization_buffer[i], pict->data[i],
1626 (i == 0) ? pict->linesize[i] * height:
1627 pict->linesize[i] * height >> v_chroma_shift);
1628 pict->data[i] = s->visualization_buffer[i];
1630 pict->type = FF_BUFFER_TYPE_COPY;
1631 ptr = pict->data[0];
1632 block_height = 16 >> v_chroma_shift;
1634 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1636 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1637 const int mb_index = mb_x + mb_y * s->mb_stride;
1638 if ((s->avctx->debug_mv) && pict->motion_val) {
1640 for (type = 0; type < 3; type++) {
1644 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1645 (pict->pict_type!= AV_PICTURE_TYPE_P))
1650 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1651 (pict->pict_type!= AV_PICTURE_TYPE_B))
1656 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1657 (pict->pict_type!= AV_PICTURE_TYPE_B))
1662 if (!USES_LIST(pict->mb_type[mb_index], direction))
1665 if (IS_8X8(pict->mb_type[mb_index])) {
1667 for (i = 0; i < 4; i++) {
1668 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1669 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1670 int xy = (mb_x * 2 + (i & 1) +
1671 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1672 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1673 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1674 draw_arrow(ptr, sx, sy, mx, my, width,
1675 height, s->linesize, 100);
1677 } else if (IS_16X8(pict->mb_type[mb_index])) {
1679 for (i = 0; i < 2; i++) {
1680 int sx = mb_x * 16 + 8;
1681 int sy = mb_y * 16 + 4 + 8 * i;
1682 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1683 int mx = (pict->motion_val[direction][xy][0] >> shift);
1684 int my = (pict->motion_val[direction][xy][1] >> shift);
1686 if (IS_INTERLACED(pict->mb_type[mb_index]))
1689 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1690 height, s->linesize, 100);
1692 } else if (IS_8X16(pict->mb_type[mb_index])) {
1694 for (i = 0; i < 2; i++) {
1695 int sx = mb_x * 16 + 4 + 8 * i;
1696 int sy = mb_y * 16 + 8;
1697 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1698 int mx = pict->motion_val[direction][xy][0] >> shift;
1699 int my = pict->motion_val[direction][xy][1] >> shift;
1701 if (IS_INTERLACED(pict->mb_type[mb_index]))
1704 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1705 height, s->linesize, 100);
1708 int sx = mb_x * 16 + 8;
1709 int sy = mb_y * 16 + 8;
1710 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1711 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1712 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1713 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1717 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1718 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1719 0x0101010101010101ULL;
1721 for (y = 0; y < block_height; y++) {
1722 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1723 (block_height * mb_y + y) *
1724 pict->linesize[1]) = c;
1725 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1726 (block_height * mb_y + y) *
1727 pict->linesize[2]) = c;
1730 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1732 int mb_type = pict->mb_type[mb_index];
1735 #define COLOR(theta, r) \
1736 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1737 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1741 if (IS_PCM(mb_type)) {
1743 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1744 IS_INTRA16x16(mb_type)) {
1746 } else if (IS_INTRA4x4(mb_type)) {
1748 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1750 } else if (IS_DIRECT(mb_type)) {
1752 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1754 } else if (IS_GMC(mb_type)) {
1756 } else if (IS_SKIP(mb_type)) {
1758 } else if (!USES_LIST(mb_type, 1)) {
1760 } else if (!USES_LIST(mb_type, 0)) {
1763 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1767 u *= 0x0101010101010101ULL;
1768 v *= 0x0101010101010101ULL;
1769 for (y = 0; y < block_height; y++) {
1770 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1771 (block_height * mb_y + y) * pict->linesize[1]) = u;
1772 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1773 (block_height * mb_y + y) * pict->linesize[2]) = v;
1777 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1778 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1779 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1780 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1781 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1783 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1784 for (y = 0; y < 16; y++)
1785 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1786 pict->linesize[0]] ^= 0x80;
1788 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1789 int dm = 1 << (mv_sample_log2 - 2);
1790 for (i = 0; i < 4; i++) {
1791 int sx = mb_x * 16 + 8 * (i & 1);
1792 int sy = mb_y * 16 + 8 * (i >> 1);
1793 int xy = (mb_x * 2 + (i & 1) +
1794 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1796 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1797 if (mv[0] != mv[dm] ||
1798 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1799 for (y = 0; y < 8; y++)
1800 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1801 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1802 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1803 pict->linesize[0]) ^= 0x8080808080808080ULL;
1807 if (IS_INTERLACED(mb_type) &&
1808 s->codec_id == CODEC_ID_H264) {
1812 s->mbskip_table[mb_index] = 0;
1818 static inline int hpel_motion_lowres(MpegEncContext *s,
1819 uint8_t *dest, uint8_t *src,
1820 int field_based, int field_select,
1821 int src_x, int src_y,
1822 int width, int height, int stride,
1823 int h_edge_pos, int v_edge_pos,
1824 int w, int h, h264_chroma_mc_func *pix_op,
1825 int motion_x, int motion_y)
1827 const int lowres = s->avctx->lowres;
1828 const int op_index = FFMIN(lowres, 2);
1829 const int s_mask = (2 << lowres) - 1;
1833 if (s->quarter_sample) {
1838 sx = motion_x & s_mask;
1839 sy = motion_y & s_mask;
1840 src_x += motion_x >> lowres + 1;
1841 src_y += motion_y >> lowres + 1;
1843 src += src_y * stride + src_x;
1845 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1846 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1847 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
1848 (h + 1) << field_based, src_x,
1849 src_y << field_based,
1852 src = s->edge_emu_buffer;
1856 sx = (sx << 2) >> lowres;
1857 sy = (sy << 2) >> lowres;
1860 pix_op[op_index](dest, src, stride, h, sx, sy);
1864 /* apply one mpeg motion vector to the three components */
1865 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1872 uint8_t **ref_picture,
1873 h264_chroma_mc_func *pix_op,
1874 int motion_x, int motion_y,
1877 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1878 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
1880 const int lowres = s->avctx->lowres;
1881 const int op_index = FFMIN(lowres, 2);
1882 const int block_s = 8>>lowres;
1883 const int s_mask = (2 << lowres) - 1;
1884 const int h_edge_pos = s->h_edge_pos >> lowres;
1885 const int v_edge_pos = s->v_edge_pos >> lowres;
1886 linesize = s->current_picture.f.linesize[0] << field_based;
1887 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1889 // FIXME obviously not perfect but qpel will not work in lowres anyway
1890 if (s->quarter_sample) {
1896 motion_y += (bottom_field - field_select) * (1 << lowres - 1);
1899 sx = motion_x & s_mask;
1900 sy = motion_y & s_mask;
1901 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1902 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1904 if (s->out_format == FMT_H263) {
1905 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1906 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1907 uvsrc_x = src_x >> 1;
1908 uvsrc_y = src_y >> 1;
1909 } else if (s->out_format == FMT_H261) {
1910 // even chroma mv's are full pel in H261
1913 uvsx = (2 * mx) & s_mask;
1914 uvsy = (2 * my) & s_mask;
1915 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1916 uvsrc_y = mb_y * block_s + (my >> lowres);
1922 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1923 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1926 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1927 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1928 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1930 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
1931 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1932 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
1933 s->linesize, 17, 17 + field_based,
1934 src_x, src_y << field_based, h_edge_pos,
1936 ptr_y = s->edge_emu_buffer;
1937 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1938 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
1939 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
1941 uvsrc_x, uvsrc_y << field_based,
1942 h_edge_pos >> 1, v_edge_pos >> 1);
1943 s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
1945 uvsrc_x, uvsrc_y << field_based,
1946 h_edge_pos >> 1, v_edge_pos >> 1);
1948 ptr_cr = uvbuf + 16;
1952 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1954 dest_y += s->linesize;
1955 dest_cb += s->uvlinesize;
1956 dest_cr += s->uvlinesize;
1960 ptr_y += s->linesize;
1961 ptr_cb += s->uvlinesize;
1962 ptr_cr += s->uvlinesize;
1965 sx = (sx << 2) >> lowres;
1966 sy = (sy << 2) >> lowres;
1967 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1969 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1970 uvsx = (uvsx << 2) >> lowres;
1971 uvsy = (uvsy << 2) >> lowres;
1972 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
1974 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
1977 // FIXME h261 lowres loop filter
1980 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1981 uint8_t *dest_cb, uint8_t *dest_cr,
1982 uint8_t **ref_picture,
1983 h264_chroma_mc_func * pix_op,
1986 const int lowres = s->avctx->lowres;
1987 const int op_index = FFMIN(lowres, 2);
1988 const int block_s = 8 >> lowres;
1989 const int s_mask = (2 << lowres) - 1;
1990 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1991 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1992 int emu = 0, src_x, src_y, offset, sx, sy;
1995 if (s->quarter_sample) {
2000 /* In case of 8X8, we construct a single chroma motion vector
2001 with a special rounding */
2002 mx = ff_h263_round_chroma(mx);
2003 my = ff_h263_round_chroma(my);
2007 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2008 src_y = s->mb_y * block_s + (my >> lowres + 1);
2010 offset = src_y * s->uvlinesize + src_x;
2011 ptr = ref_picture[1] + offset;
2012 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2013 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2014 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2015 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2016 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2017 ptr = s->edge_emu_buffer;
2021 sx = (sx << 2) >> lowres;
2022 sy = (sy << 2) >> lowres;
2023 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2025 ptr = ref_picture[2] + offset;
2027 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2028 src_x, src_y, h_edge_pos, v_edge_pos);
2029 ptr = s->edge_emu_buffer;
2031 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2035 * motion compensation of a single macroblock
2037 * @param dest_y luma destination pointer
2038 * @param dest_cb chroma cb/u destination pointer
2039 * @param dest_cr chroma cr/v destination pointer
2040 * @param dir direction (0->forward, 1->backward)
2041 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2042 * @param pix_op halfpel motion compensation function (average or put normally)
2043 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2045 static inline void MPV_motion_lowres(MpegEncContext *s,
2046 uint8_t *dest_y, uint8_t *dest_cb,
2048 int dir, uint8_t **ref_picture,
2049 h264_chroma_mc_func *pix_op)
2053 const int lowres = s->avctx->lowres;
2054 const int block_s = 8 >>lowres;
2059 switch (s->mv_type) {
2061 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2063 ref_picture, pix_op,
2064 s->mv[dir][0][0], s->mv[dir][0][1],
2070 for (i = 0; i < 4; i++) {
2071 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2072 s->linesize) * block_s,
2073 ref_picture[0], 0, 0,
2074 (2 * mb_x + (i & 1)) * block_s,
2075 (2 * mb_y + (i >> 1)) * block_s,
2076 s->width, s->height, s->linesize,
2077 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2078 block_s, block_s, pix_op,
2079 s->mv[dir][i][0], s->mv[dir][i][1]);
2081 mx += s->mv[dir][i][0];
2082 my += s->mv[dir][i][1];
2085 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2086 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2090 if (s->picture_structure == PICT_FRAME) {
2092 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2093 1, 0, s->field_select[dir][0],
2094 ref_picture, pix_op,
2095 s->mv[dir][0][0], s->mv[dir][0][1],
2098 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2099 1, 1, s->field_select[dir][1],
2100 ref_picture, pix_op,
2101 s->mv[dir][1][0], s->mv[dir][1][1],
2104 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2105 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2106 ref_picture = s->current_picture_ptr->f.data;
2109 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2110 0, 0, s->field_select[dir][0],
2111 ref_picture, pix_op,
2113 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2117 for (i = 0; i < 2; i++) {
2118 uint8_t **ref2picture;
2120 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2121 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2122 ref2picture = ref_picture;
2124 ref2picture = s->current_picture_ptr->f.data;
2127 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2128 0, 0, s->field_select[dir][i],
2129 ref2picture, pix_op,
2130 s->mv[dir][i][0], s->mv[dir][i][1] +
2131 2 * block_s * i, block_s, mb_y >> 1);
2133 dest_y += 2 * block_s * s->linesize;
2134 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2135 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2139 if (s->picture_structure == PICT_FRAME) {
2140 for (i = 0; i < 2; i++) {
2142 for (j = 0; j < 2; j++) {
2143 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2145 ref_picture, pix_op,
2146 s->mv[dir][2 * i + j][0],
2147 s->mv[dir][2 * i + j][1],
2150 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2153 for (i = 0; i < 2; i++) {
2154 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2155 0, 0, s->picture_structure != i + 1,
2156 ref_picture, pix_op,
2157 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2158 2 * block_s, mb_y >> 1);
2160 // after put we make avg of the same block
2161 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2163 // opposite parity is always in the same
2164 // frame if this is second field
2165 if (!s->first_field) {
2166 ref_picture = s->current_picture_ptr->f.data;
2177 * find the lowest MB row referenced in the MVs
2179 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2181 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2182 int my, off, i, mvs;
2184 if (s->picture_structure != PICT_FRAME) goto unhandled;
2186 switch (s->mv_type) {
2200 for (i = 0; i < mvs; i++) {
2201 my = s->mv[dir][i][1]<<qpel_shift;
2202 my_max = FFMAX(my_max, my);
2203 my_min = FFMIN(my_min, my);
2206 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2208 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2210 return s->mb_height-1;
2213 /* put block[] to dest[] */
2214 static inline void put_dct(MpegEncContext *s,
2215 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2217 s->dct_unquantize_intra(s, block, i, qscale);
2218 s->dsp.idct_put (dest, line_size, block);
2221 /* add block[] to dest[] */
2222 static inline void add_dct(MpegEncContext *s,
2223 DCTELEM *block, int i, uint8_t *dest, int line_size)
2225 if (s->block_last_index[i] >= 0) {
2226 s->dsp.idct_add (dest, line_size, block);
2230 static inline void add_dequant_dct(MpegEncContext *s,
2231 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2233 if (s->block_last_index[i] >= 0) {
2234 s->dct_unquantize_inter(s, block, i, qscale);
2236 s->dsp.idct_add (dest, line_size, block);
2241 * Clean dc, ac, coded_block for the current non-intra MB.
2243 void ff_clean_intra_table_entries(MpegEncContext *s)
2245 int wrap = s->b8_stride;
2246 int xy = s->block_index[0];
2249 s->dc_val[0][xy + 1 ] =
2250 s->dc_val[0][xy + wrap] =
2251 s->dc_val[0][xy + 1 + wrap] = 1024;
2253 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2254 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2255 if (s->msmpeg4_version>=3) {
2256 s->coded_block[xy ] =
2257 s->coded_block[xy + 1 ] =
2258 s->coded_block[xy + wrap] =
2259 s->coded_block[xy + 1 + wrap] = 0;
2262 wrap = s->mb_stride;
2263 xy = s->mb_x + s->mb_y * wrap;
2265 s->dc_val[2][xy] = 1024;
2267 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2268 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2270 s->mbintra_table[xy]= 0;
2273 /* generic function called after a macroblock has been parsed by the
2274 decoder or after it has been encoded by the encoder.
2276 Important variables used:
2277 s->mb_intra : true if intra macroblock
2278 s->mv_dir : motion vector direction
2279 s->mv_type : motion vector type
2280 s->mv : motion vector
2281 s->interlaced_dct : true if interlaced dct used (mpeg2)
2283 static av_always_inline
2284 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2285 int lowres_flag, int is_mpeg12)
2287 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2288 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2289 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2293 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2294 /* save DCT coefficients */
2296 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2297 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2299 for(j=0; j<64; j++){
2300 *dct++ = block[i][s->dsp.idct_permutation[j]];
2301 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2303 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2307 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2309 /* update DC predictors for P macroblocks */
2311 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2312 if(s->mbintra_table[mb_xy])
2313 ff_clean_intra_table_entries(s);
2317 s->last_dc[2] = 128 << s->intra_dc_precision;
2320 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2321 s->mbintra_table[mb_xy]=1;
2323 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2324 uint8_t *dest_y, *dest_cb, *dest_cr;
2325 int dct_linesize, dct_offset;
2326 op_pixels_func (*op_pix)[4];
2327 qpel_mc_func (*op_qpix)[16];
2328 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2329 const int uvlinesize = s->current_picture.f.linesize[1];
2330 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2331 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2333 /* avoid copy if macroblock skipped in last frame too */
2334 /* skip only during decoding as we might trash the buffers during encoding a bit */
2336 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2338 if (s->mb_skipped) {
2340 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2342 } else if(!s->current_picture.f.reference) {
2345 *mbskip_ptr = 0; /* not skipped */
2349 dct_linesize = linesize << s->interlaced_dct;
2350 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2354 dest_cb= s->dest[1];
2355 dest_cr= s->dest[2];
2357 dest_y = s->b_scratchpad;
2358 dest_cb= s->b_scratchpad+16*linesize;
2359 dest_cr= s->b_scratchpad+32*linesize;
2363 /* motion handling */
2364 /* decoding or more than one mb_type (MC was already done otherwise) */
2367 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2368 if (s->mv_dir & MV_DIR_FORWARD) {
2369 ff_thread_await_progress(&s->last_picture_ptr->f,
2370 ff_MPV_lowest_referenced_row(s, 0),
2373 if (s->mv_dir & MV_DIR_BACKWARD) {
2374 ff_thread_await_progress(&s->next_picture_ptr->f,
2375 ff_MPV_lowest_referenced_row(s, 1),
2381 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2383 if (s->mv_dir & MV_DIR_FORWARD) {
2384 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2385 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2387 if (s->mv_dir & MV_DIR_BACKWARD) {
2388 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2391 op_qpix= s->me.qpel_put;
2392 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2393 op_pix = s->dsp.put_pixels_tab;
2395 op_pix = s->dsp.put_no_rnd_pixels_tab;
2397 if (s->mv_dir & MV_DIR_FORWARD) {
2398 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2399 op_pix = s->dsp.avg_pixels_tab;
2400 op_qpix= s->me.qpel_avg;
2402 if (s->mv_dir & MV_DIR_BACKWARD) {
2403 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2408 /* skip dequant / idct if we are really late ;) */
2409 if(s->avctx->skip_idct){
2410 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2411 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2412 || s->avctx->skip_idct >= AVDISCARD_ALL)
2416 /* add dct residue */
2417 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2418 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2419 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2420 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2421 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2422 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2424 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2425 if (s->chroma_y_shift){
2426 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2427 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2431 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2432 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2433 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2434 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2437 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2438 add_dct(s, block[0], 0, dest_y , dct_linesize);
2439 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2440 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2441 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2443 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2444 if(s->chroma_y_shift){//Chroma420
2445 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2446 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2449 dct_linesize = uvlinesize << s->interlaced_dct;
2450 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2452 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2453 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2454 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2455 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2456 if(!s->chroma_x_shift){//Chroma444
2457 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2458 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2459 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2460 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2465 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2466 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2469 /* dct only in intra block */
2470 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2471 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2472 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2473 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2474 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2476 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2477 if(s->chroma_y_shift){
2478 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2479 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2483 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2484 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2485 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2486 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2490 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2491 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2492 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2493 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2495 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2496 if(s->chroma_y_shift){
2497 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2498 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2501 dct_linesize = uvlinesize << s->interlaced_dct;
2502 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2504 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2505 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2506 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2507 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2508 if(!s->chroma_x_shift){//Chroma444
2509 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2510 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2511 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2512 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2520 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2521 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2522 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2527 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2529 if(s->out_format == FMT_MPEG1) {
2530 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2531 else MPV_decode_mb_internal(s, block, 0, 1);
2534 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2535 else MPV_decode_mb_internal(s, block, 0, 0);
2539 * @param h is the normal height, this will be reduced automatically if needed for the last row
2541 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2542 const int field_pic= s->picture_structure != PICT_FRAME;
2548 if (!s->avctx->hwaccel
2549 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2550 && s->unrestricted_mv
2551 && s->current_picture.f.reference
2553 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2554 int sides = 0, edge_h;
2555 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2556 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2557 if (y==0) sides |= EDGE_TOP;
2558 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2560 edge_h= FFMIN(h, s->v_edge_pos - y);
2562 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2563 s->linesize, s->h_edge_pos, edge_h,
2564 EDGE_WIDTH, EDGE_WIDTH, sides);
2565 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2566 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2567 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2568 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2569 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2570 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2573 h= FFMIN(h, s->avctx->height - y);
2575 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2577 if (s->avctx->draw_horiz_band) {
2579 int offset[AV_NUM_DATA_POINTERS];
2582 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2583 src = &s->current_picture_ptr->f;
2584 else if(s->last_picture_ptr)
2585 src = &s->last_picture_ptr->f;
2589 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2590 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2593 offset[0]= y * s->linesize;
2595 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2596 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2602 s->avctx->draw_horiz_band(s->avctx, src, offset,
2603 y, s->picture_structure, h);
2607 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2608 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2609 const int uvlinesize = s->current_picture.f.linesize[1];
2610 const int mb_size= 4 - s->avctx->lowres;
2612 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2613 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2614 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2615 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2616 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2617 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2618 //block_index is not used by mpeg2, so it is not affected by chroma_format
2620 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2621 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2622 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2624 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2626 if(s->picture_structure==PICT_FRAME){
2627 s->dest[0] += s->mb_y * linesize << mb_size;
2628 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2629 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2631 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2632 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2633 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2634 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2639 void ff_mpeg_flush(AVCodecContext *avctx){
2641 MpegEncContext *s = avctx->priv_data;
2643 if(s==NULL || s->picture==NULL)
2646 for(i=0; i<s->picture_count; i++){
2647 if (s->picture[i].f.data[0] &&
2648 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2649 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2650 free_frame_buffer(s, &s->picture[i]);
2652 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2654 s->mb_x= s->mb_y= 0;
2656 s->parse_context.state= -1;
2657 s->parse_context.frame_start_found= 0;
2658 s->parse_context.overread= 0;
2659 s->parse_context.overread_index= 0;
2660 s->parse_context.index= 0;
2661 s->parse_context.last_index= 0;
2662 s->bitstream_buffer_size=0;
2666 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2667 DCTELEM *block, int n, int qscale)
2669 int i, level, nCoeffs;
2670 const uint16_t *quant_matrix;
2672 nCoeffs= s->block_last_index[n];
2675 block[0] = block[0] * s->y_dc_scale;
2677 block[0] = block[0] * s->c_dc_scale;
2678 /* XXX: only mpeg1 */
2679 quant_matrix = s->intra_matrix;
2680 for(i=1;i<=nCoeffs;i++) {
2681 int j= s->intra_scantable.permutated[i];
2686 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2687 level = (level - 1) | 1;
2690 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2691 level = (level - 1) | 1;
2698 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2699 DCTELEM *block, int n, int qscale)
2701 int i, level, nCoeffs;
2702 const uint16_t *quant_matrix;
2704 nCoeffs= s->block_last_index[n];
2706 quant_matrix = s->inter_matrix;
2707 for(i=0; i<=nCoeffs; i++) {
2708 int j= s->intra_scantable.permutated[i];
2713 level = (((level << 1) + 1) * qscale *
2714 ((int) (quant_matrix[j]))) >> 4;
2715 level = (level - 1) | 1;
2718 level = (((level << 1) + 1) * qscale *
2719 ((int) (quant_matrix[j]))) >> 4;
2720 level = (level - 1) | 1;
2727 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2728 DCTELEM *block, int n, int qscale)
2730 int i, level, nCoeffs;
2731 const uint16_t *quant_matrix;
2733 if(s->alternate_scan) nCoeffs= 63;
2734 else nCoeffs= s->block_last_index[n];
2737 block[0] = block[0] * s->y_dc_scale;
2739 block[0] = block[0] * s->c_dc_scale;
2740 quant_matrix = s->intra_matrix;
2741 for(i=1;i<=nCoeffs;i++) {
2742 int j= s->intra_scantable.permutated[i];
2747 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2750 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2757 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2758 DCTELEM *block, int n, int qscale)
2760 int i, level, nCoeffs;
2761 const uint16_t *quant_matrix;
2764 if(s->alternate_scan) nCoeffs= 63;
2765 else nCoeffs= s->block_last_index[n];
2768 block[0] = block[0] * s->y_dc_scale;
2770 block[0] = block[0] * s->c_dc_scale;
2771 quant_matrix = s->intra_matrix;
2772 for(i=1;i<=nCoeffs;i++) {
2773 int j= s->intra_scantable.permutated[i];
2778 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2781 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2790 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2791 DCTELEM *block, int n, int qscale)
2793 int i, level, nCoeffs;
2794 const uint16_t *quant_matrix;
2797 if(s->alternate_scan) nCoeffs= 63;
2798 else nCoeffs= s->block_last_index[n];
2800 quant_matrix = s->inter_matrix;
2801 for(i=0; i<=nCoeffs; i++) {
2802 int j= s->intra_scantable.permutated[i];
2807 level = (((level << 1) + 1) * qscale *
2808 ((int) (quant_matrix[j]))) >> 4;
2811 level = (((level << 1) + 1) * qscale *
2812 ((int) (quant_matrix[j]))) >> 4;
2821 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2822 DCTELEM *block, int n, int qscale)
2824 int i, level, qmul, qadd;
2827 assert(s->block_last_index[n]>=0);
2833 block[0] = block[0] * s->y_dc_scale;
2835 block[0] = block[0] * s->c_dc_scale;
2836 qadd = (qscale - 1) | 1;
2843 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2845 for(i=1; i<=nCoeffs; i++) {
2849 level = level * qmul - qadd;
2851 level = level * qmul + qadd;
2858 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2859 DCTELEM *block, int n, int qscale)
2861 int i, level, qmul, qadd;
2864 assert(s->block_last_index[n]>=0);
2866 qadd = (qscale - 1) | 1;
2869 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2871 for(i=0; i<=nCoeffs; i++) {
2875 level = level * qmul - qadd;
2877 level = level * qmul + qadd;
2885 * set qscale and update qscale dependent variables.
2887 void ff_set_qscale(MpegEncContext * s, int qscale)
2891 else if (qscale > 31)
2895 s->chroma_qscale= s->chroma_qscale_table[qscale];
2897 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2898 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2901 void ff_MPV_report_decode_progress(MpegEncContext *s)
2903 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2904 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);