2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 ff_mpeg1_dc_scale_table,
125 mpeg2_dc_scale_table1,
126 mpeg2_dc_scale_table2,
127 mpeg2_dc_scale_table3,
130 const enum PixelFormat ff_pixfmt_list_420[] = {
135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *restrict p,
145 uint32_t * restrict state)
153 for (i = 0; i < 3; i++) {
154 uint32_t tmp = *state << 8;
155 *state = tmp + *(p++);
156 if (tmp == 0x100 || p == end)
161 if (p[-1] > 1 ) p += 3;
162 else if (p[-2] ) p += 2;
163 else if (p[-3]|(p[-1]-1)) p++;
170 p = FFMIN(p, end) - 4;
176 /* init common dct for both encoder and decoder */
177 av_cold int ff_dct_common_init(MpegEncContext *s)
179 ff_dsputil_init(&s->dsp, s->avctx);
181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
186 if (s->flags & CODEC_FLAG_BITEXACT)
187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
191 ff_MPV_common_init_mmx(s);
193 ff_MPV_common_init_axp(s);
195 ff_MPV_common_init_mmi(s);
197 ff_MPV_common_init_arm(s);
199 ff_MPV_common_init_altivec(s);
201 ff_MPV_common_init_bfin(s);
204 /* load & permutate scantables
205 * note: only wmv uses different ones
207 if (s->alternate_scan) {
208 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
209 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
212 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
215 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
220 void ff_copy_picture(Picture *dst, Picture *src)
223 dst->f.type = FF_BUFFER_TYPE_COPY;
227 * Release a frame buffer
229 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
231 /* Windows Media Image codecs allocate internal buffers with different
232 * dimensions; ignore user defined callbacks for these
234 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
235 ff_thread_release_buffer(s->avctx, &pic->f);
237 avcodec_default_release_buffer(s->avctx, &pic->f);
238 av_freep(&pic->f.hwaccel_picture_private);
242 * Allocate a frame buffer
244 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
248 if (s->avctx->hwaccel) {
249 assert(!pic->f.hwaccel_picture_private);
250 if (s->avctx->hwaccel->priv_data_size) {
251 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
252 if (!pic->f.hwaccel_picture_private) {
253 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
259 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
260 r = ff_thread_get_buffer(s->avctx, &pic->f);
262 r = avcodec_default_get_buffer(s->avctx, &pic->f);
264 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
265 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
266 r, pic->f.type, pic->f.data[0]);
267 av_freep(&pic->f.hwaccel_picture_private);
271 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
272 s->uvlinesize != pic->f.linesize[1])) {
273 av_log(s->avctx, AV_LOG_ERROR,
274 "get_buffer() failed (stride changed)\n");
275 free_frame_buffer(s, pic);
279 if (pic->f.linesize[1] != pic->f.linesize[2]) {
280 av_log(s->avctx, AV_LOG_ERROR,
281 "get_buffer() failed (uv stride mismatch)\n");
282 free_frame_buffer(s, pic);
290 * Allocate a Picture.
291 * The pixels are allocated/set by calling get_buffer() if shared = 0
293 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
295 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
297 // the + 1 is needed so memset(,,stride*height) does not sig11
299 const int mb_array_size = s->mb_stride * s->mb_height;
300 const int b8_array_size = s->b8_stride * s->mb_height * 2;
301 const int b4_array_size = s->b4_stride * s->mb_height * 4;
306 assert(pic->f.data[0]);
307 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
308 pic->f.type = FF_BUFFER_TYPE_SHARED;
310 assert(!pic->f.data[0]);
312 if (alloc_frame_buffer(s, pic) < 0)
315 s->linesize = pic->f.linesize[0];
316 s->uvlinesize = pic->f.linesize[1];
319 if (pic->f.qscale_table == NULL) {
321 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
322 mb_array_size * sizeof(int16_t), fail)
323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
324 mb_array_size * sizeof(int16_t), fail)
325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
326 mb_array_size * sizeof(int8_t ), fail)
329 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
330 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
332 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
334 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
335 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
337 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
338 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
339 if (s->out_format == FMT_H264) {
340 for (i = 0; i < 2; i++) {
341 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
342 2 * (b4_array_size + 4) * sizeof(int16_t),
344 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
345 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
346 4 * mb_array_size * sizeof(uint8_t), fail)
348 pic->f.motion_subsample_log2 = 2;
349 } else if (s->out_format == FMT_H263 || s->encoding ||
350 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
351 for (i = 0; i < 2; i++) {
352 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
353 2 * (b8_array_size + 4) * sizeof(int16_t),
355 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
356 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
357 4 * mb_array_size * sizeof(uint8_t), fail)
359 pic->f.motion_subsample_log2 = 3;
361 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
362 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
363 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
365 pic->f.qstride = s->mb_stride;
366 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
367 1 * sizeof(AVPanScan), fail)
373 fail: // for the FF_ALLOCZ_OR_GOTO macro
375 free_frame_buffer(s, pic);
380 * Deallocate a picture.
382 static void free_picture(MpegEncContext *s, Picture *pic)
386 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
387 free_frame_buffer(s, pic);
390 av_freep(&pic->mb_var);
391 av_freep(&pic->mc_mb_var);
392 av_freep(&pic->mb_mean);
393 av_freep(&pic->f.mbskip_table);
394 av_freep(&pic->qscale_table_base);
395 av_freep(&pic->mb_type_base);
396 av_freep(&pic->f.dct_coeff);
397 av_freep(&pic->f.pan_scan);
398 pic->f.mb_type = NULL;
399 for (i = 0; i < 2; i++) {
400 av_freep(&pic->motion_val_base[i]);
401 av_freep(&pic->f.ref_index[i]);
404 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
405 for (i = 0; i < 4; i++) {
407 pic->f.data[i] = NULL;
413 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
415 int y_size = s->b8_stride * (2 * s->mb_height + 1);
416 int c_size = s->mb_stride * (s->mb_height + 1);
417 int yc_size = y_size + 2 * c_size;
420 // edge emu needs blocksize + filter length - 1
421 // (= 17x17 for halfpel / 21x21 for h264)
422 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
423 (s->width + 64) * 2 * 21 * 2, fail); // (width + edge + align)*interlaced*MBsize*tolerance
425 // FIXME should be linesize instead of s->width * 2
426 // but that is not known before get_buffer()
427 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
428 (s->width + 64) * 4 * 16 * 2 * sizeof(uint8_t), fail)
429 s->me.temp = s->me.scratchpad;
430 s->rd_scratchpad = s->me.scratchpad;
431 s->b_scratchpad = s->me.scratchpad;
432 s->obmc_scratchpad = s->me.scratchpad + 16;
434 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
435 ME_MAP_SIZE * sizeof(uint32_t), fail)
436 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
437 ME_MAP_SIZE * sizeof(uint32_t), fail)
438 if (s->avctx->noise_reduction) {
439 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
440 2 * 64 * sizeof(int), fail)
443 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
444 s->block = s->blocks[0];
446 for (i = 0; i < 12; i++) {
447 s->pblocks[i] = &s->block[i];
450 if (s->out_format == FMT_H263) {
452 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
453 yc_size * sizeof(int16_t) * 16, fail);
454 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
455 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
456 s->ac_val[2] = s->ac_val[1] + c_size;
461 return -1; // free() through ff_MPV_common_end()
464 static void free_duplicate_context(MpegEncContext *s)
469 av_freep(&s->edge_emu_buffer);
470 av_freep(&s->me.scratchpad);
474 s->obmc_scratchpad = NULL;
476 av_freep(&s->dct_error_sum);
477 av_freep(&s->me.map);
478 av_freep(&s->me.score_map);
479 av_freep(&s->blocks);
480 av_freep(&s->ac_val_base);
484 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
486 #define COPY(a) bak->a = src->a
487 COPY(edge_emu_buffer);
492 COPY(obmc_scratchpad);
499 COPY(me.map_generation);
511 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
515 // FIXME copy only needed parts
517 backup_duplicate_context(&bak, dst);
518 memcpy(dst, src, sizeof(MpegEncContext));
519 backup_duplicate_context(dst, &bak);
520 for (i = 0; i < 12; i++) {
521 dst->pblocks[i] = &dst->block[i];
523 // STOP_TIMER("update_duplicate_context")
524 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
527 int ff_mpeg_update_thread_context(AVCodecContext *dst,
528 const AVCodecContext *src)
530 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
532 if (dst == src || !s1->context_initialized)
535 // FIXME can parameters change on I-frames?
536 // in that case dst may need a reinit
537 if (!s->context_initialized) {
538 memcpy(s, s1, sizeof(MpegEncContext));
541 s->picture_range_start += MAX_PICTURE_COUNT;
542 s->picture_range_end += MAX_PICTURE_COUNT;
543 s->bitstream_buffer = NULL;
544 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
546 ff_MPV_common_init(s);
549 s->avctx->coded_height = s1->avctx->coded_height;
550 s->avctx->coded_width = s1->avctx->coded_width;
551 s->avctx->width = s1->avctx->width;
552 s->avctx->height = s1->avctx->height;
554 s->coded_picture_number = s1->coded_picture_number;
555 s->picture_number = s1->picture_number;
556 s->input_picture_number = s1->input_picture_number;
558 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
559 memcpy(&s->last_picture, &s1->last_picture,
560 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
562 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
563 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
564 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
566 // Error/bug resilience
567 s->next_p_frame_damaged = s1->next_p_frame_damaged;
568 s->workaround_bugs = s1->workaround_bugs;
571 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
572 (char *) &s1->shape - (char *) &s1->time_increment_bits);
575 s->max_b_frames = s1->max_b_frames;
576 s->low_delay = s1->low_delay;
577 s->dropable = s1->dropable;
579 // DivX handling (doesn't work)
580 s->divx_packed = s1->divx_packed;
582 if (s1->bitstream_buffer) {
583 if (s1->bitstream_buffer_size +
584 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
585 av_fast_malloc(&s->bitstream_buffer,
586 &s->allocated_bitstream_buffer_size,
587 s1->allocated_bitstream_buffer_size);
588 s->bitstream_buffer_size = s1->bitstream_buffer_size;
589 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
590 s1->bitstream_buffer_size);
591 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
592 FF_INPUT_BUFFER_PADDING_SIZE);
595 // MPEG2/interlacing info
596 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
597 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
599 if (!s1->first_field) {
600 s->last_pict_type = s1->pict_type;
601 if (s1->current_picture_ptr)
602 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
604 if (s1->pict_type != AV_PICTURE_TYPE_B) {
605 s->last_non_b_pict_type = s1->pict_type;
613 * Set the given MpegEncContext to common defaults
614 * (same for encoding and decoding).
615 * The changed fields will not depend upon the
616 * prior state of the MpegEncContext.
618 void ff_MPV_common_defaults(MpegEncContext *s)
620 s->y_dc_scale_table =
621 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
622 s->chroma_qscale_table = ff_default_chroma_qscale_table;
623 s->progressive_frame = 1;
624 s->progressive_sequence = 1;
625 s->picture_structure = PICT_FRAME;
627 s->coded_picture_number = 0;
628 s->picture_number = 0;
629 s->input_picture_number = 0;
631 s->picture_in_gop_number = 0;
636 s->picture_range_start = 0;
637 s->picture_range_end = MAX_PICTURE_COUNT;
639 s->slice_context_count = 1;
643 * Set the given MpegEncContext to defaults for decoding.
644 * the changed fields will not depend upon
645 * the prior state of the MpegEncContext.
647 void ff_MPV_decode_defaults(MpegEncContext *s)
649 ff_MPV_common_defaults(s);
653 * init common structure for both encoder and decoder.
654 * this assumes that some variables like width/height are already set
656 av_cold int ff_MPV_common_init(MpegEncContext *s)
658 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
659 int nb_slices = (HAVE_THREADS &&
660 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
661 s->avctx->thread_count : 1;
663 if (s->encoding && s->avctx->slices)
664 nb_slices = s->avctx->slices;
666 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
667 s->mb_height = (s->height + 31) / 32 * 2;
668 else if (s->codec_id != CODEC_ID_H264)
669 s->mb_height = (s->height + 15) / 16;
671 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
672 av_log(s->avctx, AV_LOG_ERROR,
673 "decoding to PIX_FMT_NONE is not supported.\n");
677 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
680 max_slices = FFMIN(MAX_THREADS, s->mb_height);
682 max_slices = MAX_THREADS;
683 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
684 " reducing to %d\n", nb_slices, max_slices);
685 nb_slices = max_slices;
688 if ((s->width || s->height) &&
689 av_image_check_size(s->width, s->height, 0, s->avctx))
692 ff_dct_common_init(s);
694 s->flags = s->avctx->flags;
695 s->flags2 = s->avctx->flags2;
697 if (s->width && s->height) {
698 s->mb_width = (s->width + 15) / 16;
699 s->mb_stride = s->mb_width + 1;
700 s->b8_stride = s->mb_width * 2 + 1;
701 s->b4_stride = s->mb_width * 4 + 1;
702 mb_array_size = s->mb_height * s->mb_stride;
703 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
705 /* set chroma shifts */
706 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
709 /* set default edge pos, will be overriden
710 * in decode_header if needed */
711 s->h_edge_pos = s->mb_width * 16;
712 s->v_edge_pos = s->mb_height * 16;
714 s->mb_num = s->mb_width * s->mb_height;
719 s->block_wrap[3] = s->b8_stride;
721 s->block_wrap[5] = s->mb_stride;
723 y_size = s->b8_stride * (2 * s->mb_height + 1);
724 c_size = s->mb_stride * (s->mb_height + 1);
725 yc_size = y_size + 2 * c_size;
727 /* convert fourcc to upper case */
728 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
730 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
732 s->avctx->coded_frame = &s->current_picture.f;
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
735 fail); // error ressilience code looks cleaner with this
736 for (y = 0; y < s->mb_height; y++)
737 for (x = 0; x < s->mb_width; x++)
738 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
740 s->mb_index2xy[s->mb_height * s->mb_width] =
741 (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
744 /* Allocate MV tables */
745 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base,
746 mv_table_size * 2 * sizeof(int16_t), fail);
747 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base,
748 mv_table_size * 2 * sizeof(int16_t), fail);
749 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base,
750 mv_table_size * 2 * sizeof(int16_t), fail);
751 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base,
752 mv_table_size * 2 * sizeof(int16_t), fail);
753 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base,
754 mv_table_size * 2 * sizeof(int16_t), fail);
755 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base,
756 mv_table_size * 2 * sizeof(int16_t), fail);
757 s->p_mv_table = s->p_mv_table_base +
759 s->b_forw_mv_table = s->b_forw_mv_table_base +
761 s->b_back_mv_table = s->b_back_mv_table_base +
763 s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base +
765 s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base +
767 s->b_direct_mv_table = s->b_direct_mv_table_base +
770 if (s->msmpeg4_version) {
771 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
772 2 * 2 * (MAX_LEVEL + 1) *
773 (MAX_RUN + 1) * 2 * sizeof(int), fail);
775 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
777 /* Allocate MB type table */
778 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
779 sizeof(uint16_t), fail); // needed for encoding
781 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
784 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix,
785 64 * 32 * sizeof(int), fail);
786 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix,
787 64 * 32 * sizeof(int), fail);
788 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16,
789 64 * 32 * 2 * sizeof(uint16_t), fail);
790 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16,
791 64 * 32 * 2 * sizeof(uint16_t), fail);
792 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
793 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
794 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
795 MAX_PICTURE_COUNT * sizeof(Picture *), fail);
797 if (s->avctx->noise_reduction) {
798 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
799 2 * 64 * sizeof(uint16_t), fail);
804 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
805 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
806 s->picture_count * sizeof(Picture), fail);
807 for (i = 0; i < s->picture_count; i++) {
808 avcodec_get_frame_defaults(&s->picture[i].f);
811 if (s->width && s->height) {
812 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
813 mb_array_size * sizeof(uint8_t), fail);
815 if (s->codec_id == CODEC_ID_MPEG4 ||
816 (s->flags & CODEC_FLAG_INTERLACED_ME)) {
817 /* interlaced direct mode decoding tables */
818 for (i = 0; i < 2; i++) {
820 for (j = 0; j < 2; j++) {
821 for (k = 0; k < 2; k++) {
822 FF_ALLOCZ_OR_GOTO(s->avctx,
823 s->b_field_mv_table_base[i][j][k],
824 mv_table_size * 2 * sizeof(int16_t),
826 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
829 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j],
830 mb_array_size * 2 * sizeof(uint8_t),
832 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j],
833 mv_table_size * 2 * sizeof(int16_t),
835 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
838 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i],
839 mb_array_size * 2 * sizeof(uint8_t),
843 if (s->out_format == FMT_H263) {
845 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
846 s->coded_block = s->coded_block_base + s->b8_stride + 1;
848 /* cbp, ac_pred, pred_dir */
849 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table,
850 mb_array_size * sizeof(uint8_t), fail);
851 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table,
852 mb_array_size * sizeof(uint8_t), fail);
855 if (s->h263_pred || s->h263_plus || !s->encoding) {
857 // MN: we need these for error resilience of intra-frames
858 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base,
859 yc_size * sizeof(int16_t), fail);
860 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
861 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
862 s->dc_val[2] = s->dc_val[1] + c_size;
863 for (i = 0; i < yc_size; i++)
864 s->dc_val_base[i] = 1024;
867 /* which mb is a intra block */
868 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
869 memset(s->mbintra_table, 1, mb_array_size);
871 /* init macroblock skip table */
872 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
873 // Note the + 1 is for a quicker mpeg4 slice_end detection
875 s->parse_context.state = -1;
876 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
877 s->avctx->debug_mv) {
878 s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
879 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
880 s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
881 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
882 s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
883 2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
887 s->context_initialized = 1;
888 s->thread_context[0] = s;
890 if (s->width && s->height) {
892 for (i = 1; i < nb_slices; i++) {
893 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
894 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
897 for (i = 0; i < nb_slices; i++) {
898 if (init_duplicate_context(s->thread_context[i], s) < 0)
900 s->thread_context[i]->start_mb_y =
901 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
902 s->thread_context[i]->end_mb_y =
903 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
906 if (init_duplicate_context(s, s) < 0)
909 s->end_mb_y = s->mb_height;
911 s->slice_context_count = nb_slices;
916 ff_MPV_common_end(s);
920 /* init common structure for both encoder and decoder */
921 void ff_MPV_common_end(MpegEncContext *s)
925 if (s->slice_context_count > 1) {
926 for (i = 0; i < s->slice_context_count; i++) {
927 free_duplicate_context(s->thread_context[i]);
929 for (i = 1; i < s->slice_context_count; i++) {
930 av_freep(&s->thread_context[i]);
932 s->slice_context_count = 1;
933 } else free_duplicate_context(s);
935 av_freep(&s->parse_context.buffer);
936 s->parse_context.buffer_size = 0;
938 av_freep(&s->mb_type);
939 av_freep(&s->p_mv_table_base);
940 av_freep(&s->b_forw_mv_table_base);
941 av_freep(&s->b_back_mv_table_base);
942 av_freep(&s->b_bidir_forw_mv_table_base);
943 av_freep(&s->b_bidir_back_mv_table_base);
944 av_freep(&s->b_direct_mv_table_base);
945 s->p_mv_table = NULL;
946 s->b_forw_mv_table = NULL;
947 s->b_back_mv_table = NULL;
948 s->b_bidir_forw_mv_table = NULL;
949 s->b_bidir_back_mv_table = NULL;
950 s->b_direct_mv_table = NULL;
951 for (i = 0; i < 2; i++) {
952 for (j = 0; j < 2; j++) {
953 for (k = 0; k < 2; k++) {
954 av_freep(&s->b_field_mv_table_base[i][j][k]);
955 s->b_field_mv_table[i][j][k] = NULL;
957 av_freep(&s->b_field_select_table[i][j]);
958 av_freep(&s->p_field_mv_table_base[i][j]);
959 s->p_field_mv_table[i][j] = NULL;
961 av_freep(&s->p_field_select_table[i]);
964 av_freep(&s->dc_val_base);
965 av_freep(&s->coded_block_base);
966 av_freep(&s->mbintra_table);
967 av_freep(&s->cbp_table);
968 av_freep(&s->pred_dir_table);
970 av_freep(&s->mbskip_table);
971 av_freep(&s->bitstream_buffer);
972 s->allocated_bitstream_buffer_size = 0;
974 av_freep(&s->avctx->stats_out);
975 av_freep(&s->ac_stats);
976 av_freep(&s->error_status_table);
977 av_freep(&s->mb_index2xy);
978 av_freep(&s->lambda_table);
979 av_freep(&s->q_intra_matrix);
980 av_freep(&s->q_inter_matrix);
981 av_freep(&s->q_intra_matrix16);
982 av_freep(&s->q_inter_matrix16);
983 av_freep(&s->input_picture);
984 av_freep(&s->reordered_input_picture);
985 av_freep(&s->dct_offset);
987 if (s->picture && !s->avctx->internal->is_copy) {
988 for (i = 0; i < s->picture_count; i++) {
989 free_picture(s, &s->picture[i]);
992 av_freep(&s->picture);
993 s->context_initialized = 0;
994 s->last_picture_ptr =
995 s->next_picture_ptr =
996 s->current_picture_ptr = NULL;
997 s->linesize = s->uvlinesize = 0;
999 for (i = 0; i < 3; i++)
1000 av_freep(&s->visualization_buffer[i]);
1002 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
1003 avcodec_default_free_buffers(s->avctx);
1006 void ff_init_rl(RLTable *rl,
1007 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1009 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1010 uint8_t index_run[MAX_RUN + 1];
1011 int last, run, level, start, end, i;
1013 /* If table is static, we can quit if rl->max_level[0] is not NULL */
1014 if (static_store && rl->max_level[0])
1017 /* compute max_level[], max_run[] and index_run[] */
1018 for (last = 0; last < 2; last++) {
1027 memset(max_level, 0, MAX_RUN + 1);
1028 memset(max_run, 0, MAX_LEVEL + 1);
1029 memset(index_run, rl->n, MAX_RUN + 1);
1030 for (i = start; i < end; i++) {
1031 run = rl->table_run[i];
1032 level = rl->table_level[i];
1033 if (index_run[run] == rl->n)
1035 if (level > max_level[run])
1036 max_level[run] = level;
1037 if (run > max_run[level])
1038 max_run[level] = run;
1041 rl->max_level[last] = static_store[last];
1043 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1044 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1046 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1048 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1049 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1051 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1053 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1054 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1058 void ff_init_vlc_rl(RLTable *rl)
1062 for (q = 0; q < 32; q++) {
1064 int qadd = (q - 1) | 1;
1070 for (i = 0; i < rl->vlc.table_size; i++) {
1071 int code = rl->vlc.table[i][0];
1072 int len = rl->vlc.table[i][1];
1075 if (len == 0) { // illegal code
1078 } else if (len < 0) { // more bits needed
1082 if (code == rl->n) { // esc
1086 run = rl->table_run[code] + 1;
1087 level = rl->table_level[code] * qmul + qadd;
1088 if (code >= rl->last) run += 192;
1091 rl->rl_vlc[q][i].len = len;
1092 rl->rl_vlc[q][i].level = level;
1093 rl->rl_vlc[q][i].run = run;
1098 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1102 /* release non reference frames */
1103 for (i = 0; i < s->picture_count; i++) {
1104 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1105 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1106 (remove_current || &s->picture[i] != s->current_picture_ptr)
1107 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1108 free_frame_buffer(s, &s->picture[i]);
1113 int ff_find_unused_picture(MpegEncContext *s, int shared)
1118 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1119 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1123 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1124 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1127 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1128 if (s->picture[i].f.data[0] == NULL)
1133 return AVERROR_INVALIDDATA;
1136 static void update_noise_reduction(MpegEncContext *s)
1140 for (intra = 0; intra < 2; intra++) {
1141 if (s->dct_count[intra] > (1 << 16)) {
1142 for (i = 0; i < 64; i++) {
1143 s->dct_error_sum[intra][i] >>= 1;
1145 s->dct_count[intra] >>= 1;
1148 for (i = 0; i < 64; i++) {
1149 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1150 s->dct_count[intra] +
1151 s->dct_error_sum[intra][i] / 2) /
1152 (s->dct_error_sum[intra][i] + 1);
1158 * generic function for encode/decode called after coding/decoding
1159 * the header and before a frame is coded/decoded.
1161 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1167 assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
1168 s->codec_id == CODEC_ID_SVQ3);
1170 /* mark & release old frames */
1171 if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
1172 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1173 s->last_picture_ptr != s->next_picture_ptr &&
1174 s->last_picture_ptr->f.data[0]) {
1175 if (s->last_picture_ptr->owner2 == s)
1176 free_frame_buffer(s, s->last_picture_ptr);
1179 /* release forgotten pictures */
1180 /* if (mpeg124/h263) */
1182 for (i = 0; i < s->picture_count; i++) {
1183 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1184 &s->picture[i] != s->last_picture_ptr &&
1185 &s->picture[i] != s->next_picture_ptr &&
1186 s->picture[i].f.reference) {
1187 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1188 av_log(avctx, AV_LOG_ERROR,
1189 "releasing zombie picture\n");
1190 free_frame_buffer(s, &s->picture[i]);
1197 ff_release_unused_pictures(s, 1);
1199 if (s->current_picture_ptr &&
1200 s->current_picture_ptr->f.data[0] == NULL) {
1201 // we already have a unused image
1202 // (maybe it was set before reading the header)
1203 pic = s->current_picture_ptr;
1205 i = ff_find_unused_picture(s, 0);
1206 pic = &s->picture[i];
1209 pic->f.reference = 0;
1211 if (s->codec_id == CODEC_ID_H264)
1212 pic->f.reference = s->picture_structure;
1213 else if (s->pict_type != AV_PICTURE_TYPE_B)
1214 pic->f.reference = 3;
1217 pic->f.coded_picture_number = s->coded_picture_number++;
1219 if (ff_alloc_picture(s, pic, 0) < 0)
1222 s->current_picture_ptr = pic;
1223 // FIXME use only the vars from current_pic
1224 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1225 if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
1226 s->codec_id == CODEC_ID_MPEG2VIDEO) {
1227 if (s->picture_structure != PICT_FRAME)
1228 s->current_picture_ptr->f.top_field_first =
1229 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1231 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1232 !s->progressive_sequence;
1233 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1236 s->current_picture_ptr->f.pict_type = s->pict_type;
1237 // if (s->flags && CODEC_FLAG_QSCALE)
1238 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1239 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1241 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1243 if (s->pict_type != AV_PICTURE_TYPE_B) {
1244 s->last_picture_ptr = s->next_picture_ptr;
1246 s->next_picture_ptr = s->current_picture_ptr;
1248 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1249 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1250 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1251 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1252 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1253 s->pict_type, s->dropable); */
1255 if (s->codec_id != CODEC_ID_H264) {
1256 if ((s->last_picture_ptr == NULL ||
1257 s->last_picture_ptr->f.data[0] == NULL) &&
1258 (s->pict_type != AV_PICTURE_TYPE_I ||
1259 s->picture_structure != PICT_FRAME)) {
1260 if (s->pict_type != AV_PICTURE_TYPE_I)
1261 av_log(avctx, AV_LOG_ERROR,
1262 "warning: first frame is no keyframe\n");
1263 else if (s->picture_structure != PICT_FRAME)
1264 av_log(avctx, AV_LOG_INFO,
1265 "allocate dummy last picture for field based first keyframe\n");
1267 /* Allocate a dummy frame */
1268 i = ff_find_unused_picture(s, 0);
1269 s->last_picture_ptr = &s->picture[i];
1270 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1272 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1273 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1275 if ((s->next_picture_ptr == NULL ||
1276 s->next_picture_ptr->f.data[0] == NULL) &&
1277 s->pict_type == AV_PICTURE_TYPE_B) {
1278 /* Allocate a dummy frame */
1279 i = ff_find_unused_picture(s, 0);
1280 s->next_picture_ptr = &s->picture[i];
1281 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1283 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1284 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1288 if (s->last_picture_ptr)
1289 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1290 if (s->next_picture_ptr)
1291 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1293 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1294 (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
1295 if (s->next_picture_ptr)
1296 s->next_picture_ptr->owner2 = s;
1297 if (s->last_picture_ptr)
1298 s->last_picture_ptr->owner2 = s;
1301 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1302 s->last_picture_ptr->f.data[0]));
1304 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1306 for (i = 0; i < 4; i++) {
1307 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1308 s->current_picture.f.data[i] +=
1309 s->current_picture.f.linesize[i];
1311 s->current_picture.f.linesize[i] *= 2;
1312 s->last_picture.f.linesize[i] *= 2;
1313 s->next_picture.f.linesize[i] *= 2;
1317 s->err_recognition = avctx->err_recognition;
1319 /* set dequantizer, we can't do it during init as
1320 * it might change for mpeg4 and we can't do it in the header
1321 * decode as init is not called for mpeg4 there yet */
1322 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1323 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1324 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1325 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1326 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1327 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1329 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1330 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1333 if (s->dct_error_sum) {
1334 assert(s->avctx->noise_reduction && s->encoding);
1335 update_noise_reduction(s);
1338 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1339 return ff_xvmc_field_start(s, avctx);
1344 /* generic function for encode/decode called after a
1345 * frame has been coded/decoded. */
1346 void ff_MPV_frame_end(MpegEncContext *s)
1349 /* redraw edges for the frame if decoding didn't complete */
1350 // just to make sure that all data is rendered.
1351 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1352 ff_xvmc_field_end(s);
1353 } else if ((s->error_count || s->encoding) &&
1354 !s->avctx->hwaccel &&
1355 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1356 s->unrestricted_mv &&
1357 s->current_picture.f.reference &&
1359 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1360 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1361 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1362 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1363 s->h_edge_pos, s->v_edge_pos,
1364 EDGE_WIDTH, EDGE_WIDTH,
1365 EDGE_TOP | EDGE_BOTTOM);
1366 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1367 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1368 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1369 EDGE_TOP | EDGE_BOTTOM);
1370 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1371 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1372 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1373 EDGE_TOP | EDGE_BOTTOM);
1378 s->last_pict_type = s->pict_type;
1379 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1380 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1381 s->last_non_b_pict_type = s->pict_type;
1384 /* copy back current_picture variables */
1385 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1386 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1387 s->picture[i] = s->current_picture;
1391 assert(i < MAX_PICTURE_COUNT);
1395 /* release non-reference frames */
1396 for (i = 0; i < s->picture_count; i++) {
1397 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1398 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1399 free_frame_buffer(s, &s->picture[i]);
1403 // clear copies, to avoid confusion
1405 memset(&s->last_picture, 0, sizeof(Picture));
1406 memset(&s->next_picture, 0, sizeof(Picture));
1407 memset(&s->current_picture, 0, sizeof(Picture));
1409 s->avctx->coded_frame = &s->current_picture_ptr->f;
1411 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1412 ff_thread_report_progress(&s->current_picture_ptr->f,
1413 s->mb_height - 1, 0);
1418 * Draw a line from (ex, ey) -> (sx, sy).
1419 * @param w width of the image
1420 * @param h height of the image
1421 * @param stride stride/linesize of the image
1422 * @param color color of the arrow
1424 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1425 int w, int h, int stride, int color)
1429 sx = av_clip(sx, 0, w - 1);
1430 sy = av_clip(sy, 0, h - 1);
1431 ex = av_clip(ex, 0, w - 1);
1432 ey = av_clip(ey, 0, h - 1);
1434 buf[sy * stride + sx] += color;
1436 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1438 FFSWAP(int, sx, ex);
1439 FFSWAP(int, sy, ey);
1441 buf += sx + sy * stride;
1443 f = ((ey - sy) << 16) / ex;
1444 for (x = 0; x = ex; x++) {
1446 fr = (x * f) & 0xFFFF;
1447 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1448 buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1452 FFSWAP(int, sx, ex);
1453 FFSWAP(int, sy, ey);
1455 buf += sx + sy * stride;
1458 f = ((ex - sx) << 16) / ey;
1461 for (y = 0; y = ey; y++) {
1463 fr = (y * f) & 0xFFFF;
1464 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1465 buf[y * stride + x + 1] += (color * fr ) >> 16;
1471 * Draw an arrow from (ex, ey) -> (sx, sy).
1472 * @param w width of the image
1473 * @param h height of the image
1474 * @param stride stride/linesize of the image
1475 * @param color color of the arrow
1477 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1478 int ey, int w, int h, int stride, int color)
1482 sx = av_clip(sx, -100, w + 100);
1483 sy = av_clip(sy, -100, h + 100);
1484 ex = av_clip(ex, -100, w + 100);
1485 ey = av_clip(ey, -100, h + 100);
1490 if (dx * dx + dy * dy > 3 * 3) {
1493 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1495 // FIXME subpixel accuracy
1496 rx = ROUNDED_DIV(rx * 3 << 4, length);
1497 ry = ROUNDED_DIV(ry * 3 << 4, length);
1499 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1500 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1502 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1506 * Print debugging info for the given picture.
1508 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1510 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1513 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1516 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1517 switch (pict->pict_type) {
1518 case AV_PICTURE_TYPE_I:
1519 av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1521 case AV_PICTURE_TYPE_P:
1522 av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1524 case AV_PICTURE_TYPE_B:
1525 av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1527 case AV_PICTURE_TYPE_S:
1528 av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1530 case AV_PICTURE_TYPE_SI:
1531 av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1533 case AV_PICTURE_TYPE_SP:
1534 av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1537 for (y = 0; y < s->mb_height; y++) {
1538 for (x = 0; x < s->mb_width; x++) {
1539 if (s->avctx->debug & FF_DEBUG_SKIP) {
1540 int count = s->mbskip_table[x + y * s->mb_stride];
1543 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1545 if (s->avctx->debug & FF_DEBUG_QP) {
1546 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1547 pict->qscale_table[x + y * s->mb_stride]);
1549 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1550 int mb_type = pict->mb_type[x + y * s->mb_stride];
1551 // Type & MV direction
1552 if (IS_PCM(mb_type))
1553 av_log(s->avctx, AV_LOG_DEBUG, "P");
1554 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1555 av_log(s->avctx, AV_LOG_DEBUG, "A");
1556 else if (IS_INTRA4x4(mb_type))
1557 av_log(s->avctx, AV_LOG_DEBUG, "i");
1558 else if (IS_INTRA16x16(mb_type))
1559 av_log(s->avctx, AV_LOG_DEBUG, "I");
1560 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1561 av_log(s->avctx, AV_LOG_DEBUG, "d");
1562 else if (IS_DIRECT(mb_type))
1563 av_log(s->avctx, AV_LOG_DEBUG, "D");
1564 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1565 av_log(s->avctx, AV_LOG_DEBUG, "g");
1566 else if (IS_GMC(mb_type))
1567 av_log(s->avctx, AV_LOG_DEBUG, "G");
1568 else if (IS_SKIP(mb_type))
1569 av_log(s->avctx, AV_LOG_DEBUG, "S");
1570 else if (!USES_LIST(mb_type, 1))
1571 av_log(s->avctx, AV_LOG_DEBUG, ">");
1572 else if (!USES_LIST(mb_type, 0))
1573 av_log(s->avctx, AV_LOG_DEBUG, "<");
1575 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1576 av_log(s->avctx, AV_LOG_DEBUG, "X");
1580 if (IS_8X8(mb_type))
1581 av_log(s->avctx, AV_LOG_DEBUG, "+");
1582 else if (IS_16X8(mb_type))
1583 av_log(s->avctx, AV_LOG_DEBUG, "-");
1584 else if (IS_8X16(mb_type))
1585 av_log(s->avctx, AV_LOG_DEBUG, "|");
1586 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1587 av_log(s->avctx, AV_LOG_DEBUG, " ");
1589 av_log(s->avctx, AV_LOG_DEBUG, "?");
1592 if (IS_INTERLACED(mb_type))
1593 av_log(s->avctx, AV_LOG_DEBUG, "=");
1595 av_log(s->avctx, AV_LOG_DEBUG, " ");
1597 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1599 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1603 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1604 (s->avctx->debug_mv)) {
1605 const int shift = 1 + s->quarter_sample;
1609 int h_chroma_shift, v_chroma_shift, block_height;
1610 const int width = s->avctx->width;
1611 const int height = s->avctx->height;
1612 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1613 const int mv_stride = (s->mb_width << mv_sample_log2) +
1614 (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1615 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1617 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1618 &h_chroma_shift, &v_chroma_shift);
1619 for (i = 0; i < 3; i++) {
1620 memcpy(s->visualization_buffer[i], pict->data[i],
1621 (i == 0) ? pict->linesize[i] * height:
1622 pict->linesize[i] * height >> v_chroma_shift);
1623 pict->data[i] = s->visualization_buffer[i];
1625 pict->type = FF_BUFFER_TYPE_COPY;
1626 ptr = pict->data[0];
1627 block_height = 16 >> v_chroma_shift;
1629 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1631 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1632 const int mb_index = mb_x + mb_y * s->mb_stride;
1633 if ((s->avctx->debug_mv) && pict->motion_val) {
1635 for (type = 0; type < 3; type++) {
1639 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1640 (pict->pict_type!= AV_PICTURE_TYPE_P))
1645 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1646 (pict->pict_type!= AV_PICTURE_TYPE_B))
1651 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1652 (pict->pict_type!= AV_PICTURE_TYPE_B))
1657 if (!USES_LIST(pict->mb_type[mb_index], direction))
1660 if (IS_8X8(pict->mb_type[mb_index])) {
1662 for (i = 0; i < 4; i++) {
1663 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1664 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1665 int xy = (mb_x * 2 + (i & 1) +
1666 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1667 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1668 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1669 draw_arrow(ptr, sx, sy, mx, my, width,
1670 height, s->linesize, 100);
1672 } else if (IS_16X8(pict->mb_type[mb_index])) {
1674 for (i = 0; i < 2; i++) {
1675 int sx = mb_x * 16 + 8;
1676 int sy = mb_y * 16 + 4 + 8 * i;
1677 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1678 int mx = (pict->motion_val[direction][xy][0] >> shift);
1679 int my = (pict->motion_val[direction][xy][1] >> shift);
1681 if (IS_INTERLACED(pict->mb_type[mb_index]))
1684 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1685 height, s->linesize, 100);
1687 } else if (IS_8X16(pict->mb_type[mb_index])) {
1689 for (i = 0; i < 2; i++) {
1690 int sx = mb_x * 16 + 4 + 8 * i;
1691 int sy = mb_y * 16 + 8;
1692 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1693 int mx = pict->motion_val[direction][xy][0] >> shift;
1694 int my = pict->motion_val[direction][xy][1] >> shift;
1696 if (IS_INTERLACED(pict->mb_type[mb_index]))
1699 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1700 height, s->linesize, 100);
1703 int sx = mb_x * 16 + 8;
1704 int sy = mb_y * 16 + 8;
1705 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1706 int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1707 int my = pict->motion_val[direction][xy][1] >> shift + sy;
1708 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1712 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1713 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1714 0x0101010101010101ULL;
1716 for (y = 0; y < block_height; y++) {
1717 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1718 (block_height * mb_y + y) *
1719 pict->linesize[1]) = c;
1720 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1721 (block_height * mb_y + y) *
1722 pict->linesize[2]) = c;
1725 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1727 int mb_type = pict->mb_type[mb_index];
1730 #define COLOR(theta, r) \
1731 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1732 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1736 if (IS_PCM(mb_type)) {
1738 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1739 IS_INTRA16x16(mb_type)) {
1741 } else if (IS_INTRA4x4(mb_type)) {
1743 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1745 } else if (IS_DIRECT(mb_type)) {
1747 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1749 } else if (IS_GMC(mb_type)) {
1751 } else if (IS_SKIP(mb_type)) {
1753 } else if (!USES_LIST(mb_type, 1)) {
1755 } else if (!USES_LIST(mb_type, 0)) {
1758 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1762 u *= 0x0101010101010101ULL;
1763 v *= 0x0101010101010101ULL;
1764 for (y = 0; y < block_height; y++) {
1765 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1766 (block_height * mb_y + y) * pict->linesize[1]) = u;
1767 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1768 (block_height * mb_y + y) * pict->linesize[2]) = v;
1772 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1773 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1774 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1775 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1776 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1778 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1779 for (y = 0; y < 16; y++)
1780 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1781 pict->linesize[0]] ^= 0x80;
1783 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1784 int dm = 1 << (mv_sample_log2 - 2);
1785 for (i = 0; i < 4; i++) {
1786 int sx = mb_x * 16 + 8 * (i & 1);
1787 int sy = mb_y * 16 + 8 * (i >> 1);
1788 int xy = (mb_x * 2 + (i & 1) +
1789 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1791 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1792 if (mv[0] != mv[dm] ||
1793 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1794 for (y = 0; y < 8; y++)
1795 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1796 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1797 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1798 pict->linesize[0]) ^= 0x8080808080808080ULL;
1802 if (IS_INTERLACED(mb_type) &&
1803 s->codec_id == CODEC_ID_H264) {
1807 s->mbskip_table[mb_index] = 0;
1813 static inline int hpel_motion_lowres(MpegEncContext *s,
1814 uint8_t *dest, uint8_t *src,
1815 int field_based, int field_select,
1816 int src_x, int src_y,
1817 int width, int height, int stride,
1818 int h_edge_pos, int v_edge_pos,
1819 int w, int h, h264_chroma_mc_func *pix_op,
1820 int motion_x, int motion_y)
1822 const int lowres = s->avctx->lowres;
1823 const int op_index = FFMIN(lowres, 2);
1824 const int s_mask = (2 << lowres) - 1;
1828 if (s->quarter_sample) {
1833 sx = motion_x & s_mask;
1834 sy = motion_y & s_mask;
1835 src_x += motion_x >> lowres + 1;
1836 src_y += motion_y >> lowres + 1;
1838 src += src_y * stride + src_x;
1840 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1841 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1842 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
1843 (h + 1) << field_based, src_x,
1844 src_y << field_based,
1847 src = s->edge_emu_buffer;
1851 sx = (sx << 2) >> lowres;
1852 sy = (sy << 2) >> lowres;
1855 pix_op[op_index](dest, src, stride, h, sx, sy);
1859 /* apply one mpeg motion vector to the three components */
1860 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1867 uint8_t **ref_picture,
1868 h264_chroma_mc_func *pix_op,
1869 int motion_x, int motion_y,
1872 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1873 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
1875 const int lowres = s->avctx->lowres;
1876 const int op_index = FFMIN(lowres, 2);
1877 const int block_s = 8>>lowres;
1878 const int s_mask = (2 << lowres) - 1;
1879 const int h_edge_pos = s->h_edge_pos >> lowres;
1880 const int v_edge_pos = s->v_edge_pos >> lowres;
1881 linesize = s->current_picture.f.linesize[0] << field_based;
1882 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1884 // FIXME obviously not perfect but qpel will not work in lowres anyway
1885 if (s->quarter_sample) {
1891 motion_y += (bottom_field - field_select) * (1 << lowres - 1);
1894 sx = motion_x & s_mask;
1895 sy = motion_y & s_mask;
1896 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1897 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1899 if (s->out_format == FMT_H263) {
1900 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1901 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1902 uvsrc_x = src_x >> 1;
1903 uvsrc_y = src_y >> 1;
1904 } else if (s->out_format == FMT_H261) {
1905 // even chroma mv's are full pel in H261
1908 uvsx = (2 * mx) & s_mask;
1909 uvsy = (2 * my) & s_mask;
1910 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1911 uvsrc_y = mb_y * block_s + (my >> lowres);
1917 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1918 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1921 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1922 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1923 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1925 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
1926 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1927 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
1928 s->linesize, 17, 17 + field_based,
1929 src_x, src_y << field_based, h_edge_pos,
1931 ptr_y = s->edge_emu_buffer;
1932 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1933 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
1934 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
1936 uvsrc_x, uvsrc_y << field_based,
1937 h_edge_pos >> 1, v_edge_pos >> 1);
1938 s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
1940 uvsrc_x, uvsrc_y << field_based,
1941 h_edge_pos >> 1, v_edge_pos >> 1);
1943 ptr_cr = uvbuf + 16;
1947 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1949 dest_y += s->linesize;
1950 dest_cb += s->uvlinesize;
1951 dest_cr += s->uvlinesize;
1955 ptr_y += s->linesize;
1956 ptr_cb += s->uvlinesize;
1957 ptr_cr += s->uvlinesize;
1960 sx = (sx << 2) >> lowres;
1961 sy = (sy << 2) >> lowres;
1962 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1964 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1965 uvsx = (uvsx << 2) >> lowres;
1966 uvsy = (uvsy << 2) >> lowres;
1967 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift,
1969 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift,
1972 // FIXME h261 lowres loop filter
1975 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1976 uint8_t *dest_cb, uint8_t *dest_cr,
1977 uint8_t **ref_picture,
1978 h264_chroma_mc_func * pix_op,
1981 const int lowres = s->avctx->lowres;
1982 const int op_index = FFMIN(lowres, 2);
1983 const int block_s = 8 >> lowres;
1984 const int s_mask = (2 << lowres) - 1;
1985 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1986 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1987 int emu = 0, src_x, src_y, offset, sx, sy;
1990 if (s->quarter_sample) {
1995 /* In case of 8X8, we construct a single chroma motion vector
1996 with a special rounding */
1997 mx = ff_h263_round_chroma(mx);
1998 my = ff_h263_round_chroma(my);
2002 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2003 src_y = s->mb_y * block_s + (my >> lowres + 1);
2005 offset = src_y * s->uvlinesize + src_x;
2006 ptr = ref_picture[1] + offset;
2007 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2008 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2009 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2010 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2011 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2012 ptr = s->edge_emu_buffer;
2016 sx = (sx << 2) >> lowres;
2017 sy = (sy << 2) >> lowres;
2018 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2020 ptr = ref_picture[2] + offset;
2022 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2023 src_x, src_y, h_edge_pos, v_edge_pos);
2024 ptr = s->edge_emu_buffer;
2026 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2030 * motion compensation of a single macroblock
2032 * @param dest_y luma destination pointer
2033 * @param dest_cb chroma cb/u destination pointer
2034 * @param dest_cr chroma cr/v destination pointer
2035 * @param dir direction (0->forward, 1->backward)
2036 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2037 * @param pix_op halfpel motion compensation function (average or put normally)
2038 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2040 static inline void MPV_motion_lowres(MpegEncContext *s,
2041 uint8_t *dest_y, uint8_t *dest_cb,
2043 int dir, uint8_t **ref_picture,
2044 h264_chroma_mc_func *pix_op)
2048 const int lowres = s->avctx->lowres;
2049 const int block_s = 8 >>lowres;
2054 switch (s->mv_type) {
2056 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2058 ref_picture, pix_op,
2059 s->mv[dir][0][0], s->mv[dir][0][1],
2065 for (i = 0; i < 4; i++) {
2066 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2067 s->linesize) * block_s,
2068 ref_picture[0], 0, 0,
2069 (2 * mb_x + (i & 1)) * block_s,
2070 (2 * mb_y + (i >> 1)) * block_s,
2071 s->width, s->height, s->linesize,
2072 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2073 block_s, block_s, pix_op,
2074 s->mv[dir][i][0], s->mv[dir][i][1]);
2076 mx += s->mv[dir][i][0];
2077 my += s->mv[dir][i][1];
2080 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2081 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2085 if (s->picture_structure == PICT_FRAME) {
2087 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2088 1, 0, s->field_select[dir][0],
2089 ref_picture, pix_op,
2090 s->mv[dir][0][0], s->mv[dir][0][1],
2093 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2094 1, 1, s->field_select[dir][1],
2095 ref_picture, pix_op,
2096 s->mv[dir][1][0], s->mv[dir][1][1],
2099 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2100 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2101 ref_picture = s->current_picture_ptr->f.data;
2104 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2105 0, 0, s->field_select[dir][0],
2106 ref_picture, pix_op,
2108 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2112 for (i = 0; i < 2; i++) {
2113 uint8_t **ref2picture;
2115 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2116 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2117 ref2picture = ref_picture;
2119 ref2picture = s->current_picture_ptr->f.data;
2122 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2123 0, 0, s->field_select[dir][i],
2124 ref2picture, pix_op,
2125 s->mv[dir][i][0], s->mv[dir][i][1] +
2126 2 * block_s * i, block_s, mb_y >> 1);
2128 dest_y += 2 * block_s * s->linesize;
2129 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2130 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2134 if (s->picture_structure == PICT_FRAME) {
2135 for (i = 0; i < 2; i++) {
2137 for (j = 0; j < 2; j++) {
2138 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2140 ref_picture, pix_op,
2141 s->mv[dir][2 * i + j][0],
2142 s->mv[dir][2 * i + j][1],
2145 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2148 for (i = 0; i < 2; i++) {
2149 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2150 0, 0, s->picture_structure != i + 1,
2151 ref_picture, pix_op,
2152 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2153 2 * block_s, mb_y >> 1);
2155 // after put we make avg of the same block
2156 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2158 // opposite parity is always in the same
2159 // frame if this is second field
2160 if (!s->first_field) {
2161 ref_picture = s->current_picture_ptr->f.data;
2172 * find the lowest MB row referenced in the MVs
2174 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2176 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2177 int my, off, i, mvs;
2179 if (s->picture_structure != PICT_FRAME) goto unhandled;
2181 switch (s->mv_type) {
2195 for (i = 0; i < mvs; i++) {
2196 my = s->mv[dir][i][1]<<qpel_shift;
2197 my_max = FFMAX(my_max, my);
2198 my_min = FFMIN(my_min, my);
2201 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2203 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2205 return s->mb_height-1;
2208 /* put block[] to dest[] */
2209 static inline void put_dct(MpegEncContext *s,
2210 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2212 s->dct_unquantize_intra(s, block, i, qscale);
2213 s->dsp.idct_put (dest, line_size, block);
2216 /* add block[] to dest[] */
2217 static inline void add_dct(MpegEncContext *s,
2218 DCTELEM *block, int i, uint8_t *dest, int line_size)
2220 if (s->block_last_index[i] >= 0) {
2221 s->dsp.idct_add (dest, line_size, block);
2225 static inline void add_dequant_dct(MpegEncContext *s,
2226 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2228 if (s->block_last_index[i] >= 0) {
2229 s->dct_unquantize_inter(s, block, i, qscale);
2231 s->dsp.idct_add (dest, line_size, block);
2236 * Clean dc, ac, coded_block for the current non-intra MB.
2238 void ff_clean_intra_table_entries(MpegEncContext *s)
2240 int wrap = s->b8_stride;
2241 int xy = s->block_index[0];
2244 s->dc_val[0][xy + 1 ] =
2245 s->dc_val[0][xy + wrap] =
2246 s->dc_val[0][xy + 1 + wrap] = 1024;
2248 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2249 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2250 if (s->msmpeg4_version>=3) {
2251 s->coded_block[xy ] =
2252 s->coded_block[xy + 1 ] =
2253 s->coded_block[xy + wrap] =
2254 s->coded_block[xy + 1 + wrap] = 0;
2257 wrap = s->mb_stride;
2258 xy = s->mb_x + s->mb_y * wrap;
2260 s->dc_val[2][xy] = 1024;
2262 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2263 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2265 s->mbintra_table[xy]= 0;
2268 /* generic function called after a macroblock has been parsed by the
2269 decoder or after it has been encoded by the encoder.
2271 Important variables used:
2272 s->mb_intra : true if intra macroblock
2273 s->mv_dir : motion vector direction
2274 s->mv_type : motion vector type
2275 s->mv : motion vector
2276 s->interlaced_dct : true if interlaced dct used (mpeg2)
2278 static av_always_inline
2279 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2280 int lowres_flag, int is_mpeg12)
2282 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2283 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2284 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2288 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2289 /* save DCT coefficients */
2291 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2292 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2294 for(j=0; j<64; j++){
2295 *dct++ = block[i][s->dsp.idct_permutation[j]];
2296 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2298 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2302 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2304 /* update DC predictors for P macroblocks */
2306 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2307 if(s->mbintra_table[mb_xy])
2308 ff_clean_intra_table_entries(s);
2312 s->last_dc[2] = 128 << s->intra_dc_precision;
2315 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2316 s->mbintra_table[mb_xy]=1;
2318 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2319 uint8_t *dest_y, *dest_cb, *dest_cr;
2320 int dct_linesize, dct_offset;
2321 op_pixels_func (*op_pix)[4];
2322 qpel_mc_func (*op_qpix)[16];
2323 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2324 const int uvlinesize = s->current_picture.f.linesize[1];
2325 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2326 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2328 /* avoid copy if macroblock skipped in last frame too */
2329 /* skip only during decoding as we might trash the buffers during encoding a bit */
2331 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2333 if (s->mb_skipped) {
2335 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2337 } else if(!s->current_picture.f.reference) {
2340 *mbskip_ptr = 0; /* not skipped */
2344 dct_linesize = linesize << s->interlaced_dct;
2345 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2349 dest_cb= s->dest[1];
2350 dest_cr= s->dest[2];
2352 dest_y = s->b_scratchpad;
2353 dest_cb= s->b_scratchpad+16*linesize;
2354 dest_cr= s->b_scratchpad+32*linesize;
2358 /* motion handling */
2359 /* decoding or more than one mb_type (MC was already done otherwise) */
2362 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2363 if (s->mv_dir & MV_DIR_FORWARD) {
2364 ff_thread_await_progress(&s->last_picture_ptr->f,
2365 ff_MPV_lowest_referenced_row(s, 0),
2368 if (s->mv_dir & MV_DIR_BACKWARD) {
2369 ff_thread_await_progress(&s->next_picture_ptr->f,
2370 ff_MPV_lowest_referenced_row(s, 1),
2376 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2378 if (s->mv_dir & MV_DIR_FORWARD) {
2379 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2380 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2382 if (s->mv_dir & MV_DIR_BACKWARD) {
2383 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2386 op_qpix= s->me.qpel_put;
2387 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2388 op_pix = s->dsp.put_pixels_tab;
2390 op_pix = s->dsp.put_no_rnd_pixels_tab;
2392 if (s->mv_dir & MV_DIR_FORWARD) {
2393 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2394 op_pix = s->dsp.avg_pixels_tab;
2395 op_qpix= s->me.qpel_avg;
2397 if (s->mv_dir & MV_DIR_BACKWARD) {
2398 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2403 /* skip dequant / idct if we are really late ;) */
2404 if(s->avctx->skip_idct){
2405 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2406 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2407 || s->avctx->skip_idct >= AVDISCARD_ALL)
2411 /* add dct residue */
2412 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2413 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2414 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2415 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2416 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2417 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2419 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2420 if (s->chroma_y_shift){
2421 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2422 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2426 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2427 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2428 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2429 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2432 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2433 add_dct(s, block[0], 0, dest_y , dct_linesize);
2434 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2435 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2436 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2438 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2439 if(s->chroma_y_shift){//Chroma420
2440 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2441 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2444 dct_linesize = uvlinesize << s->interlaced_dct;
2445 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2447 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2448 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2449 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2450 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2451 if(!s->chroma_x_shift){//Chroma444
2452 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2453 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2454 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2455 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2460 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2461 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2464 /* dct only in intra block */
2465 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2466 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2467 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2468 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2469 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2471 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2472 if(s->chroma_y_shift){
2473 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2474 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2478 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2479 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2480 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2481 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2485 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2486 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2487 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2488 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2490 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2491 if(s->chroma_y_shift){
2492 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2493 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2496 dct_linesize = uvlinesize << s->interlaced_dct;
2497 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2499 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2500 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2501 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2502 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2503 if(!s->chroma_x_shift){//Chroma444
2504 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2505 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2506 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2507 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2515 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2516 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2517 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2522 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2524 if(s->out_format == FMT_MPEG1) {
2525 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2526 else MPV_decode_mb_internal(s, block, 0, 1);
2529 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2530 else MPV_decode_mb_internal(s, block, 0, 0);
2534 * @param h is the normal height, this will be reduced automatically if needed for the last row
2536 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2537 const int field_pic= s->picture_structure != PICT_FRAME;
2543 if (!s->avctx->hwaccel
2544 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2545 && s->unrestricted_mv
2546 && s->current_picture.f.reference
2548 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2549 int sides = 0, edge_h;
2550 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2551 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2552 if (y==0) sides |= EDGE_TOP;
2553 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2555 edge_h= FFMIN(h, s->v_edge_pos - y);
2557 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2558 s->linesize, s->h_edge_pos, edge_h,
2559 EDGE_WIDTH, EDGE_WIDTH, sides);
2560 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2561 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2562 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2563 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2564 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2565 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2568 h= FFMIN(h, s->avctx->height - y);
2570 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2572 if (s->avctx->draw_horiz_band) {
2574 int offset[AV_NUM_DATA_POINTERS];
2577 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2578 src = &s->current_picture_ptr->f;
2579 else if(s->last_picture_ptr)
2580 src = &s->last_picture_ptr->f;
2584 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2585 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2588 offset[0]= y * s->linesize;
2590 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2591 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2597 s->avctx->draw_horiz_band(s->avctx, src, offset,
2598 y, s->picture_structure, h);
2602 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2603 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2604 const int uvlinesize = s->current_picture.f.linesize[1];
2605 const int mb_size= 4 - s->avctx->lowres;
2607 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2608 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2609 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2610 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2611 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2612 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2613 //block_index is not used by mpeg2, so it is not affected by chroma_format
2615 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2616 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2617 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2619 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2621 if(s->picture_structure==PICT_FRAME){
2622 s->dest[0] += s->mb_y * linesize << mb_size;
2623 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2624 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2626 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2627 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2628 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2629 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2634 void ff_mpeg_flush(AVCodecContext *avctx){
2636 MpegEncContext *s = avctx->priv_data;
2638 if(s==NULL || s->picture==NULL)
2641 for(i=0; i<s->picture_count; i++){
2642 if (s->picture[i].f.data[0] &&
2643 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2644 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2645 free_frame_buffer(s, &s->picture[i]);
2647 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2649 s->mb_x= s->mb_y= 0;
2651 s->parse_context.state= -1;
2652 s->parse_context.frame_start_found= 0;
2653 s->parse_context.overread= 0;
2654 s->parse_context.overread_index= 0;
2655 s->parse_context.index= 0;
2656 s->parse_context.last_index= 0;
2657 s->bitstream_buffer_size=0;
2661 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2662 DCTELEM *block, int n, int qscale)
2664 int i, level, nCoeffs;
2665 const uint16_t *quant_matrix;
2667 nCoeffs= s->block_last_index[n];
2670 block[0] = block[0] * s->y_dc_scale;
2672 block[0] = block[0] * s->c_dc_scale;
2673 /* XXX: only mpeg1 */
2674 quant_matrix = s->intra_matrix;
2675 for(i=1;i<=nCoeffs;i++) {
2676 int j= s->intra_scantable.permutated[i];
2681 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2682 level = (level - 1) | 1;
2685 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2686 level = (level - 1) | 1;
2693 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2694 DCTELEM *block, int n, int qscale)
2696 int i, level, nCoeffs;
2697 const uint16_t *quant_matrix;
2699 nCoeffs= s->block_last_index[n];
2701 quant_matrix = s->inter_matrix;
2702 for(i=0; i<=nCoeffs; i++) {
2703 int j= s->intra_scantable.permutated[i];
2708 level = (((level << 1) + 1) * qscale *
2709 ((int) (quant_matrix[j]))) >> 4;
2710 level = (level - 1) | 1;
2713 level = (((level << 1) + 1) * qscale *
2714 ((int) (quant_matrix[j]))) >> 4;
2715 level = (level - 1) | 1;
2722 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2723 DCTELEM *block, int n, int qscale)
2725 int i, level, nCoeffs;
2726 const uint16_t *quant_matrix;
2728 if(s->alternate_scan) nCoeffs= 63;
2729 else nCoeffs= s->block_last_index[n];
2732 block[0] = block[0] * s->y_dc_scale;
2734 block[0] = block[0] * s->c_dc_scale;
2735 quant_matrix = s->intra_matrix;
2736 for(i=1;i<=nCoeffs;i++) {
2737 int j= s->intra_scantable.permutated[i];
2742 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2745 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2752 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2753 DCTELEM *block, int n, int qscale)
2755 int i, level, nCoeffs;
2756 const uint16_t *quant_matrix;
2759 if(s->alternate_scan) nCoeffs= 63;
2760 else nCoeffs= s->block_last_index[n];
2763 block[0] = block[0] * s->y_dc_scale;
2765 block[0] = block[0] * s->c_dc_scale;
2766 quant_matrix = s->intra_matrix;
2767 for(i=1;i<=nCoeffs;i++) {
2768 int j= s->intra_scantable.permutated[i];
2773 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2776 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2785 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2786 DCTELEM *block, int n, int qscale)
2788 int i, level, nCoeffs;
2789 const uint16_t *quant_matrix;
2792 if(s->alternate_scan) nCoeffs= 63;
2793 else nCoeffs= s->block_last_index[n];
2795 quant_matrix = s->inter_matrix;
2796 for(i=0; i<=nCoeffs; i++) {
2797 int j= s->intra_scantable.permutated[i];
2802 level = (((level << 1) + 1) * qscale *
2803 ((int) (quant_matrix[j]))) >> 4;
2806 level = (((level << 1) + 1) * qscale *
2807 ((int) (quant_matrix[j]))) >> 4;
2816 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2817 DCTELEM *block, int n, int qscale)
2819 int i, level, qmul, qadd;
2822 assert(s->block_last_index[n]>=0);
2828 block[0] = block[0] * s->y_dc_scale;
2830 block[0] = block[0] * s->c_dc_scale;
2831 qadd = (qscale - 1) | 1;
2838 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2840 for(i=1; i<=nCoeffs; i++) {
2844 level = level * qmul - qadd;
2846 level = level * qmul + qadd;
2853 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2854 DCTELEM *block, int n, int qscale)
2856 int i, level, qmul, qadd;
2859 assert(s->block_last_index[n]>=0);
2861 qadd = (qscale - 1) | 1;
2864 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2866 for(i=0; i<=nCoeffs; i++) {
2870 level = level * qmul - qadd;
2872 level = level * qmul + qadd;
2880 * set qscale and update qscale dependent variables.
2882 void ff_set_qscale(MpegEncContext * s, int qscale)
2886 else if (qscale > 31)
2890 s->chroma_qscale= s->chroma_qscale_table[qscale];
2892 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2893 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2896 void ff_MPV_report_decode_progress(MpegEncContext *s)
2898 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2899 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);