2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32] = {
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
72 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
75 const uint8_t ff_mpeg1_dc_scale_table[128] = {
76 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
84 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
87 static const uint8_t mpeg2_dc_scale_table1[128] = {
88 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
89 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
96 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
99 static const uint8_t mpeg2_dc_scale_table2[128] = {
100 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
101 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
108 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
111 static const uint8_t mpeg2_dc_scale_table3[128] = {
112 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
113 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
120 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
123 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 ff_mpeg1_dc_scale_table,
125 mpeg2_dc_scale_table1,
126 mpeg2_dc_scale_table2,
127 mpeg2_dc_scale_table3,
130 const enum PixelFormat ff_pixfmt_list_420[] = {
135 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
143 const uint8_t *avpriv_mpv_find_start_code(const uint8_t *av_restrict p,
145 uint32_t *av_restrict state)
153 for (i = 0; i < 3; i++) {
154 uint32_t tmp = *state << 8;
155 *state = tmp + *(p++);
156 if (tmp == 0x100 || p == end)
161 if (p[-1] > 1 ) p += 3;
162 else if (p[-2] ) p += 2;
163 else if (p[-3]|(p[-1]-1)) p++;
170 p = FFMIN(p, end) - 4;
176 /* init common dct for both encoder and decoder */
177 av_cold int ff_dct_common_init(MpegEncContext *s)
179 ff_dsputil_init(&s->dsp, s->avctx);
181 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
182 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
183 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
184 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
185 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
186 if (s->flags & CODEC_FLAG_BITEXACT)
187 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
188 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
191 ff_MPV_common_init_mmx(s);
193 ff_MPV_common_init_axp(s);
195 ff_MPV_common_init_mmi(s);
197 ff_MPV_common_init_arm(s);
199 ff_MPV_common_init_altivec(s);
201 ff_MPV_common_init_bfin(s);
204 /* load & permutate scantables
205 * note: only wmv uses different ones
207 if (s->alternate_scan) {
208 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
209 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
211 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
212 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
214 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
215 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
220 void ff_copy_picture(Picture *dst, Picture *src)
223 dst->f.type = FF_BUFFER_TYPE_COPY;
227 * Release a frame buffer
229 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
231 /* Windows Media Image codecs allocate internal buffers with different
232 * dimensions; ignore user defined callbacks for these
234 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
235 ff_thread_release_buffer(s->avctx, &pic->f);
237 avcodec_default_release_buffer(s->avctx, &pic->f);
238 av_freep(&pic->f.hwaccel_picture_private);
242 * Allocate a frame buffer
244 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
248 if (s->avctx->hwaccel) {
249 assert(!pic->f.hwaccel_picture_private);
250 if (s->avctx->hwaccel->priv_data_size) {
251 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
252 if (!pic->f.hwaccel_picture_private) {
253 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
259 if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
260 r = ff_thread_get_buffer(s->avctx, &pic->f);
262 r = avcodec_default_get_buffer(s->avctx, &pic->f);
264 if (r < 0 || !pic->f.type || !pic->f.data[0]) {
265 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
266 r, pic->f.type, pic->f.data[0]);
267 av_freep(&pic->f.hwaccel_picture_private);
271 if (s->linesize && (s->linesize != pic->f.linesize[0] ||
272 s->uvlinesize != pic->f.linesize[1])) {
273 av_log(s->avctx, AV_LOG_ERROR,
274 "get_buffer() failed (stride changed)\n");
275 free_frame_buffer(s, pic);
279 if (pic->f.linesize[1] != pic->f.linesize[2]) {
280 av_log(s->avctx, AV_LOG_ERROR,
281 "get_buffer() failed (uv stride mismatch)\n");
282 free_frame_buffer(s, pic);
290 * Allocate a Picture.
291 * The pixels are allocated/set by calling get_buffer() if shared = 0
293 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
295 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
297 // the + 1 is needed so memset(,,stride*height) does not sig11
299 const int mb_array_size = s->mb_stride * s->mb_height;
300 const int b8_array_size = s->b8_stride * s->mb_height * 2;
301 const int b4_array_size = s->b4_stride * s->mb_height * 4;
306 assert(pic->f.data[0]);
307 assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
308 pic->f.type = FF_BUFFER_TYPE_SHARED;
310 assert(!pic->f.data[0]);
312 if (alloc_frame_buffer(s, pic) < 0)
315 s->linesize = pic->f.linesize[0];
316 s->uvlinesize = pic->f.linesize[1];
319 if (pic->f.qscale_table == NULL) {
321 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
322 mb_array_size * sizeof(int16_t), fail)
323 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var,
324 mb_array_size * sizeof(int16_t), fail)
325 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean,
326 mb_array_size * sizeof(int8_t ), fail)
329 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table,
330 mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
331 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base,
332 (big_mb_num + s->mb_stride) * sizeof(uint8_t),
334 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base,
335 (big_mb_num + s->mb_stride) * sizeof(uint32_t),
337 pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
338 pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
339 if (s->out_format == FMT_H264) {
340 for (i = 0; i < 2; i++) {
341 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
342 2 * (b4_array_size + 4) * sizeof(int16_t),
344 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
345 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
346 4 * mb_array_size * sizeof(uint8_t), fail)
348 pic->f.motion_subsample_log2 = 2;
349 } else if (s->out_format == FMT_H263 || s->encoding ||
350 (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
351 for (i = 0; i < 2; i++) {
352 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i],
353 2 * (b8_array_size + 4) * sizeof(int16_t),
355 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
356 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
357 4 * mb_array_size * sizeof(uint8_t), fail)
359 pic->f.motion_subsample_log2 = 3;
361 if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
362 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff,
363 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
365 pic->f.qstride = s->mb_stride;
366 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan,
367 1 * sizeof(AVPanScan), fail)
373 fail: // for the FF_ALLOCZ_OR_GOTO macro
375 free_frame_buffer(s, pic);
380 * Deallocate a picture.
382 static void free_picture(MpegEncContext *s, Picture *pic)
386 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
387 free_frame_buffer(s, pic);
390 av_freep(&pic->mb_var);
391 av_freep(&pic->mc_mb_var);
392 av_freep(&pic->mb_mean);
393 av_freep(&pic->f.mbskip_table);
394 av_freep(&pic->qscale_table_base);
395 av_freep(&pic->mb_type_base);
396 av_freep(&pic->f.dct_coeff);
397 av_freep(&pic->f.pan_scan);
398 pic->f.mb_type = NULL;
399 for (i = 0; i < 2; i++) {
400 av_freep(&pic->motion_val_base[i]);
401 av_freep(&pic->f.ref_index[i]);
404 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
405 for (i = 0; i < 4; i++) {
407 pic->f.data[i] = NULL;
413 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
415 int y_size = s->b8_stride * (2 * s->mb_height + 1);
416 int c_size = s->mb_stride * (s->mb_height + 1);
417 int yc_size = y_size + 2 * c_size;
420 // edge emu needs blocksize + filter length - 1
421 // (= 17x17 for halfpel / 21x21 for h264)
422 FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer,
423 (s->width + 95) * 2 * 21 * 4, fail); // (width + edge + align)*interlaced*MBsize*tolerance
425 // FIXME should be linesize instead of s->width * 2
426 // but that is not known before get_buffer()
427 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
428 (s->width + 95) * 4 * 16 * 2 * sizeof(uint8_t), fail)
429 s->me.temp = s->me.scratchpad;
430 s->rd_scratchpad = s->me.scratchpad;
431 s->b_scratchpad = s->me.scratchpad;
432 s->obmc_scratchpad = s->me.scratchpad + 16;
434 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
435 ME_MAP_SIZE * sizeof(uint32_t), fail)
436 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
437 ME_MAP_SIZE * sizeof(uint32_t), fail)
438 if (s->avctx->noise_reduction) {
439 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
440 2 * 64 * sizeof(int), fail)
443 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
444 s->block = s->blocks[0];
446 for (i = 0; i < 12; i++) {
447 s->pblocks[i] = &s->block[i];
450 if (s->out_format == FMT_H263) {
452 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
453 yc_size * sizeof(int16_t) * 16, fail);
454 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
455 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
456 s->ac_val[2] = s->ac_val[1] + c_size;
461 return -1; // free() through ff_MPV_common_end()
464 static void free_duplicate_context(MpegEncContext *s)
469 av_freep(&s->edge_emu_buffer);
470 av_freep(&s->me.scratchpad);
474 s->obmc_scratchpad = NULL;
476 av_freep(&s->dct_error_sum);
477 av_freep(&s->me.map);
478 av_freep(&s->me.score_map);
479 av_freep(&s->blocks);
480 av_freep(&s->ac_val_base);
484 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
486 #define COPY(a) bak->a = src->a
487 COPY(edge_emu_buffer);
492 COPY(obmc_scratchpad);
499 COPY(me.map_generation);
511 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
515 // FIXME copy only needed parts
517 backup_duplicate_context(&bak, dst);
518 memcpy(dst, src, sizeof(MpegEncContext));
519 backup_duplicate_context(dst, &bak);
520 for (i = 0; i < 12; i++) {
521 dst->pblocks[i] = &dst->block[i];
523 // STOP_TIMER("update_duplicate_context")
524 // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
527 int ff_mpeg_update_thread_context(AVCodecContext *dst,
528 const AVCodecContext *src)
530 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
535 // FIXME can parameters change on I-frames?
536 // in that case dst may need a reinit
537 if (!s->context_initialized) {
538 memcpy(s, s1, sizeof(MpegEncContext));
541 s->bitstream_buffer = NULL;
542 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
544 if (s1->context_initialized){
545 s->picture_range_start += MAX_PICTURE_COUNT;
546 s->picture_range_end += MAX_PICTURE_COUNT;
547 ff_MPV_common_init(s);
551 s->avctx->coded_height = s1->avctx->coded_height;
552 s->avctx->coded_width = s1->avctx->coded_width;
553 s->avctx->width = s1->avctx->width;
554 s->avctx->height = s1->avctx->height;
556 s->coded_picture_number = s1->coded_picture_number;
557 s->picture_number = s1->picture_number;
558 s->input_picture_number = s1->input_picture_number;
560 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
561 memcpy(&s->last_picture, &s1->last_picture,
562 (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
564 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
565 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
566 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
568 // Error/bug resilience
569 s->next_p_frame_damaged = s1->next_p_frame_damaged;
570 s->workaround_bugs = s1->workaround_bugs;
571 s->padding_bug_score = s1->padding_bug_score;
574 memcpy(&s->time_increment_bits, &s1->time_increment_bits,
575 (char *) &s1->shape - (char *) &s1->time_increment_bits);
578 s->max_b_frames = s1->max_b_frames;
579 s->low_delay = s1->low_delay;
580 s->dropable = s1->dropable;
582 // DivX handling (doesn't work)
583 s->divx_packed = s1->divx_packed;
585 if (s1->bitstream_buffer) {
586 if (s1->bitstream_buffer_size +
587 FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
588 av_fast_malloc(&s->bitstream_buffer,
589 &s->allocated_bitstream_buffer_size,
590 s1->allocated_bitstream_buffer_size);
591 s->bitstream_buffer_size = s1->bitstream_buffer_size;
592 memcpy(s->bitstream_buffer, s1->bitstream_buffer,
593 s1->bitstream_buffer_size);
594 memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
595 FF_INPUT_BUFFER_PADDING_SIZE);
598 // MPEG2/interlacing info
599 memcpy(&s->progressive_sequence, &s1->progressive_sequence,
600 (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
602 if (!s1->first_field) {
603 s->last_pict_type = s1->pict_type;
604 if (s1->current_picture_ptr)
605 s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
607 if (s1->pict_type != AV_PICTURE_TYPE_B) {
608 s->last_non_b_pict_type = s1->pict_type;
616 * Set the given MpegEncContext to common defaults
617 * (same for encoding and decoding).
618 * The changed fields will not depend upon the
619 * prior state of the MpegEncContext.
621 void ff_MPV_common_defaults(MpegEncContext *s)
623 s->y_dc_scale_table =
624 s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
625 s->chroma_qscale_table = ff_default_chroma_qscale_table;
626 s->progressive_frame = 1;
627 s->progressive_sequence = 1;
628 s->picture_structure = PICT_FRAME;
630 s->coded_picture_number = 0;
631 s->picture_number = 0;
632 s->input_picture_number = 0;
634 s->picture_in_gop_number = 0;
639 s->picture_range_start = 0;
640 s->picture_range_end = MAX_PICTURE_COUNT;
642 s->slice_context_count = 1;
646 * Set the given MpegEncContext to defaults for decoding.
647 * the changed fields will not depend upon
648 * the prior state of the MpegEncContext.
650 void ff_MPV_decode_defaults(MpegEncContext *s)
652 ff_MPV_common_defaults(s);
656 * init common structure for both encoder and decoder.
657 * this assumes that some variables like width/height are already set
659 av_cold int ff_MPV_common_init(MpegEncContext *s)
661 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
662 int nb_slices = (HAVE_THREADS &&
663 s->avctx->active_thread_type & FF_THREAD_SLICE) ?
664 s->avctx->thread_count : 1;
666 if (s->encoding && s->avctx->slices)
667 nb_slices = s->avctx->slices;
669 if (s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
670 s->mb_height = (s->height + 31) / 32 * 2;
671 else if (s->codec_id != CODEC_ID_H264)
672 s->mb_height = (s->height + 15) / 16;
674 if (s->avctx->pix_fmt == PIX_FMT_NONE) {
675 av_log(s->avctx, AV_LOG_ERROR,
676 "decoding to PIX_FMT_NONE is not supported.\n");
680 if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
683 max_slices = FFMIN(MAX_THREADS, s->mb_height);
685 max_slices = MAX_THREADS;
686 av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
687 " reducing to %d\n", nb_slices, max_slices);
688 nb_slices = max_slices;
691 if ((s->width || s->height) &&
692 av_image_check_size(s->width, s->height, 0, s->avctx))
695 ff_dct_common_init(s);
697 s->flags = s->avctx->flags;
698 s->flags2 = s->avctx->flags2;
700 s->mb_width = (s->width + 15) / 16;
701 s->mb_stride = s->mb_width + 1;
702 s->b8_stride = s->mb_width * 2 + 1;
703 s->b4_stride = s->mb_width * 4 + 1;
704 mb_array_size = s->mb_height * s->mb_stride;
705 mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
707 /* set chroma shifts */
708 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift,
711 /* set default edge pos, will be overridden in decode_header if needed */
712 s->h_edge_pos = s->mb_width * 16;
713 s->v_edge_pos = s->mb_height * 16;
715 s->mb_num = s->mb_width * s->mb_height;
720 s->block_wrap[3] = s->b8_stride;
722 s->block_wrap[5] = s->mb_stride;
724 y_size = s->b8_stride * (2 * s->mb_height + 1);
725 c_size = s->mb_stride * (s->mb_height + 1);
726 yc_size = y_size + 2 * c_size;
728 /* convert fourcc to upper case */
729 s->codec_tag = avpriv_toupper4(s->avctx->codec_tag);
730 s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag);
732 s->avctx->coded_frame = &s->current_picture.f;
734 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
735 for (y = 0; y < s->mb_height; y++)
736 for (x = 0; x < s->mb_width; x++)
737 s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
739 s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
742 /* Allocate MV tables */
743 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
744 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
745 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
746 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
747 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
748 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
749 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
750 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
751 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
752 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
753 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
754 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
756 if(s->msmpeg4_version){
757 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
759 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
761 /* Allocate MB type table */
762 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
764 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
766 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
767 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix , 64*32 * sizeof(int), fail)
768 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
769 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
770 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
771 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16 , 64*32*2 * sizeof(uint16_t), fail)
772 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
773 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
775 if(s->avctx->noise_reduction){
776 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
779 FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
780 mb_array_size * sizeof(float), fail);
781 FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
782 mb_array_size * sizeof(float), fail);
785 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
786 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
787 s->picture_count * sizeof(Picture), fail);
788 for (i = 0; i < s->picture_count; i++) {
789 avcodec_get_frame_defaults(&s->picture[i].f);
792 FF_ALLOC_OR_GOTO(s->avctx, s->er_temp_buffer,
793 mb_array_size * sizeof(uint8_t), fail);
794 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table,
795 mb_array_size * sizeof(uint8_t), fail);
797 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
798 /* interlaced direct mode decoding tables */
799 for (i = 0; i < 2; i++) {
801 for (j = 0; j < 2; j++) {
802 for (k = 0; k < 2; k++) {
803 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
804 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
806 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
807 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
808 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
810 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
813 if (s->out_format == FMT_H263) {
815 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
816 s->coded_block = s->coded_block_base + s->b8_stride + 1;
818 /* cbp, ac_pred, pred_dir */
819 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
820 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
823 if (s->h263_pred || s->h263_plus || !s->encoding) {
825 // MN: we need these for error resilience of intra-frames
826 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
827 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
828 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
829 s->dc_val[2] = s->dc_val[1] + c_size;
830 for (i = 0; i < yc_size; i++)
831 s->dc_val_base[i] = 1024;
834 /* which mb is a intra block */
835 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
836 memset(s->mbintra_table, 1, mb_array_size);
838 /* init macroblock skip table */
839 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
840 // Note the + 1 is for a quicker mpeg4 slice_end detection
842 s->parse_context.state = -1;
844 s->context_initialized = 1;
845 s->thread_context[0] = s;
847 // if (s->width && s->height) {
849 for (i = 1; i < nb_slices; i++) {
850 s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
851 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
854 for (i = 0; i < nb_slices; i++) {
855 if (init_duplicate_context(s->thread_context[i], s) < 0)
857 s->thread_context[i]->start_mb_y =
858 (s->mb_height * (i) + nb_slices / 2) / nb_slices;
859 s->thread_context[i]->end_mb_y =
860 (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
863 if (init_duplicate_context(s, s) < 0)
866 s->end_mb_y = s->mb_height;
868 s->slice_context_count = nb_slices;
873 ff_MPV_common_end(s);
877 /* init common structure for both encoder and decoder */
878 void ff_MPV_common_end(MpegEncContext *s)
882 if (s->slice_context_count > 1) {
883 for (i = 0; i < s->slice_context_count; i++) {
884 free_duplicate_context(s->thread_context[i]);
886 for (i = 1; i < s->slice_context_count; i++) {
887 av_freep(&s->thread_context[i]);
889 s->slice_context_count = 1;
890 } else free_duplicate_context(s);
892 av_freep(&s->parse_context.buffer);
893 s->parse_context.buffer_size = 0;
895 av_freep(&s->mb_type);
896 av_freep(&s->p_mv_table_base);
897 av_freep(&s->b_forw_mv_table_base);
898 av_freep(&s->b_back_mv_table_base);
899 av_freep(&s->b_bidir_forw_mv_table_base);
900 av_freep(&s->b_bidir_back_mv_table_base);
901 av_freep(&s->b_direct_mv_table_base);
902 s->p_mv_table = NULL;
903 s->b_forw_mv_table = NULL;
904 s->b_back_mv_table = NULL;
905 s->b_bidir_forw_mv_table = NULL;
906 s->b_bidir_back_mv_table = NULL;
907 s->b_direct_mv_table = NULL;
908 for (i = 0; i < 2; i++) {
909 for (j = 0; j < 2; j++) {
910 for (k = 0; k < 2; k++) {
911 av_freep(&s->b_field_mv_table_base[i][j][k]);
912 s->b_field_mv_table[i][j][k] = NULL;
914 av_freep(&s->b_field_select_table[i][j]);
915 av_freep(&s->p_field_mv_table_base[i][j]);
916 s->p_field_mv_table[i][j] = NULL;
918 av_freep(&s->p_field_select_table[i]);
921 av_freep(&s->dc_val_base);
922 av_freep(&s->coded_block_base);
923 av_freep(&s->mbintra_table);
924 av_freep(&s->cbp_table);
925 av_freep(&s->pred_dir_table);
927 av_freep(&s->mbskip_table);
928 av_freep(&s->bitstream_buffer);
929 s->allocated_bitstream_buffer_size = 0;
931 av_freep(&s->avctx->stats_out);
932 av_freep(&s->ac_stats);
933 av_freep(&s->error_status_table);
934 av_freep(&s->er_temp_buffer);
935 av_freep(&s->mb_index2xy);
936 av_freep(&s->lambda_table);
937 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
938 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
939 s->q_chroma_intra_matrix= NULL;
940 s->q_chroma_intra_matrix16= NULL;
941 av_freep(&s->q_intra_matrix);
942 av_freep(&s->q_inter_matrix);
943 av_freep(&s->q_intra_matrix16);
944 av_freep(&s->q_inter_matrix16);
945 av_freep(&s->input_picture);
946 av_freep(&s->reordered_input_picture);
947 av_freep(&s->dct_offset);
948 av_freep(&s->cplx_tab);
949 av_freep(&s->bits_tab);
951 if (s->picture && !s->avctx->internal->is_copy) {
952 for (i = 0; i < s->picture_count; i++) {
953 free_picture(s, &s->picture[i]);
956 av_freep(&s->picture);
957 s->context_initialized = 0;
958 s->last_picture_ptr =
959 s->next_picture_ptr =
960 s->current_picture_ptr = NULL;
961 s->linesize = s->uvlinesize = 0;
963 for (i = 0; i < 3; i++)
964 av_freep(&s->visualization_buffer[i]);
966 if (!(s->avctx->active_thread_type & FF_THREAD_FRAME))
967 avcodec_default_free_buffers(s->avctx);
970 void ff_init_rl(RLTable *rl,
971 uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
973 int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
974 uint8_t index_run[MAX_RUN + 1];
975 int last, run, level, start, end, i;
977 /* If table is static, we can quit if rl->max_level[0] is not NULL */
978 if (static_store && rl->max_level[0])
981 /* compute max_level[], max_run[] and index_run[] */
982 for (last = 0; last < 2; last++) {
991 memset(max_level, 0, MAX_RUN + 1);
992 memset(max_run, 0, MAX_LEVEL + 1);
993 memset(index_run, rl->n, MAX_RUN + 1);
994 for (i = start; i < end; i++) {
995 run = rl->table_run[i];
996 level = rl->table_level[i];
997 if (index_run[run] == rl->n)
999 if (level > max_level[run])
1000 max_level[run] = level;
1001 if (run > max_run[level])
1002 max_run[level] = run;
1005 rl->max_level[last] = static_store[last];
1007 rl->max_level[last] = av_malloc(MAX_RUN + 1);
1008 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1010 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1012 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1013 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1015 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1017 rl->index_run[last] = av_malloc(MAX_RUN + 1);
1018 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1022 void ff_init_vlc_rl(RLTable *rl)
1026 for (q = 0; q < 32; q++) {
1028 int qadd = (q - 1) | 1;
1034 for (i = 0; i < rl->vlc.table_size; i++) {
1035 int code = rl->vlc.table[i][0];
1036 int len = rl->vlc.table[i][1];
1039 if (len == 0) { // illegal code
1042 } else if (len < 0) { // more bits needed
1046 if (code == rl->n) { // esc
1050 run = rl->table_run[code] + 1;
1051 level = rl->table_level[code] * qmul + qadd;
1052 if (code >= rl->last) run += 192;
1055 rl->rl_vlc[q][i].len = len;
1056 rl->rl_vlc[q][i].level = level;
1057 rl->rl_vlc[q][i].run = run;
1062 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1066 /* release non reference frames */
1067 for (i = 0; i < s->picture_count; i++) {
1068 if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1069 (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1070 (remove_current || &s->picture[i] != s->current_picture_ptr)
1071 /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1072 free_frame_buffer(s, &s->picture[i]);
1077 int ff_find_unused_picture(MpegEncContext *s, int shared)
1082 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1083 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1087 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1088 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
1091 for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1092 if (s->picture[i].f.data[0] == NULL)
1097 av_log(s->avctx, AV_LOG_FATAL,
1098 "Internal error, picture buffer overflow\n");
1099 /* We could return -1, but the codec would crash trying to draw into a
1100 * non-existing frame anyway. This is safer than waiting for a random crash.
1101 * Also the return of this is never useful, an encoder must only allocate
1102 * as much as allowed in the specification. This has no relationship to how
1103 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1104 * enough for such valid streams).
1105 * Plus, a decoder has to check stream validity and remove frames if too
1106 * many reference frames are around. Waiting for "OOM" is not correct at
1107 * all. Similarly, missing reference frames have to be replaced by
1108 * interpolated/MC frames, anything else is a bug in the codec ...
1114 static void update_noise_reduction(MpegEncContext *s)
1118 for (intra = 0; intra < 2; intra++) {
1119 if (s->dct_count[intra] > (1 << 16)) {
1120 for (i = 0; i < 64; i++) {
1121 s->dct_error_sum[intra][i] >>= 1;
1123 s->dct_count[intra] >>= 1;
1126 for (i = 0; i < 64; i++) {
1127 s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1128 s->dct_count[intra] +
1129 s->dct_error_sum[intra][i] / 2) /
1130 (s->dct_error_sum[intra][i] + 1);
1136 * generic function for encode/decode called after coding/decoding
1137 * the header and before a frame is coded/decoded.
1139 int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1145 assert(s->last_picture_ptr == NULL || s->out_format != FMT_H264 ||
1146 s->codec_id == CODEC_ID_SVQ3);
1148 if (!ff_thread_can_start_frame(avctx)) {
1149 av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1153 /* mark & release old frames */
1154 if (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3) {
1155 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1156 s->last_picture_ptr != s->next_picture_ptr &&
1157 s->last_picture_ptr->f.data[0]) {
1158 if (s->last_picture_ptr->owner2 == s)
1159 free_frame_buffer(s, s->last_picture_ptr);
1162 /* release forgotten pictures */
1163 /* if (mpeg124/h263) */
1165 for (i = 0; i < s->picture_count; i++) {
1166 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1167 &s->picture[i] != s->last_picture_ptr &&
1168 &s->picture[i] != s->next_picture_ptr &&
1169 s->picture[i].f.reference) {
1170 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1171 av_log(avctx, AV_LOG_ERROR,
1172 "releasing zombie picture\n");
1173 free_frame_buffer(s, &s->picture[i]);
1180 ff_release_unused_pictures(s, 1);
1182 if (s->current_picture_ptr &&
1183 s->current_picture_ptr->f.data[0] == NULL) {
1184 // we already have a unused image
1185 // (maybe it was set before reading the header)
1186 pic = s->current_picture_ptr;
1188 i = ff_find_unused_picture(s, 0);
1191 pic = &s->picture[i];
1194 pic->f.reference = 0;
1196 if (s->codec_id == CODEC_ID_H264)
1197 pic->f.reference = s->picture_structure;
1198 else if (s->pict_type != AV_PICTURE_TYPE_B)
1199 pic->f.reference = 3;
1202 pic->f.coded_picture_number = s->coded_picture_number++;
1204 if (ff_alloc_picture(s, pic, 0) < 0)
1207 s->current_picture_ptr = pic;
1208 // FIXME use only the vars from current_pic
1209 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1210 if (s->codec_id == CODEC_ID_MPEG1VIDEO ||
1211 s->codec_id == CODEC_ID_MPEG2VIDEO) {
1212 if (s->picture_structure != PICT_FRAME)
1213 s->current_picture_ptr->f.top_field_first =
1214 (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1216 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
1217 !s->progressive_sequence;
1218 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1221 s->current_picture_ptr->f.pict_type = s->pict_type;
1222 // if (s->flags && CODEC_FLAG_QSCALE)
1223 // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1224 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1226 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1228 if (s->pict_type != AV_PICTURE_TYPE_B) {
1229 s->last_picture_ptr = s->next_picture_ptr;
1231 s->next_picture_ptr = s->current_picture_ptr;
1233 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1234 s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1235 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1236 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1237 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1238 s->pict_type, s->dropable); */
1240 if (s->codec_id != CODEC_ID_H264) {
1241 if ((s->last_picture_ptr == NULL ||
1242 s->last_picture_ptr->f.data[0] == NULL) &&
1243 (s->pict_type != AV_PICTURE_TYPE_I ||
1244 s->picture_structure != PICT_FRAME)) {
1245 if (s->pict_type != AV_PICTURE_TYPE_I)
1246 av_log(avctx, AV_LOG_ERROR,
1247 "warning: first frame is no keyframe\n");
1248 else if (s->picture_structure != PICT_FRAME)
1249 av_log(avctx, AV_LOG_INFO,
1250 "allocate dummy last picture for field based first keyframe\n");
1252 /* Allocate a dummy frame */
1253 i = ff_find_unused_picture(s, 0);
1256 s->last_picture_ptr = &s->picture[i];
1257 s->last_picture_ptr->f.key_frame = 0;
1258 if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1259 s->last_picture_ptr = NULL;
1263 if(s->codec_id == CODEC_ID_FLV1 || s->codec_id == CODEC_ID_H263){
1264 for(i=0; i<avctx->height; i++)
1265 memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1268 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1269 ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1270 s->last_picture_ptr->f.reference = 3;
1272 if ((s->next_picture_ptr == NULL ||
1273 s->next_picture_ptr->f.data[0] == NULL) &&
1274 s->pict_type == AV_PICTURE_TYPE_B) {
1275 /* Allocate a dummy frame */
1276 i = ff_find_unused_picture(s, 0);
1279 s->next_picture_ptr = &s->picture[i];
1280 s->next_picture_ptr->f.key_frame = 0;
1281 if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1282 s->next_picture_ptr = NULL;
1285 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1286 ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1287 s->next_picture_ptr->f.reference = 3;
1291 if (s->last_picture_ptr)
1292 ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1293 if (s->next_picture_ptr)
1294 ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1296 if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME) &&
1297 (s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3)) {
1298 if (s->next_picture_ptr)
1299 s->next_picture_ptr->owner2 = s;
1300 if (s->last_picture_ptr)
1301 s->last_picture_ptr->owner2 = s;
1304 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1305 s->last_picture_ptr->f.data[0]));
1307 if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1309 for (i = 0; i < 4; i++) {
1310 if (s->picture_structure == PICT_BOTTOM_FIELD) {
1311 s->current_picture.f.data[i] +=
1312 s->current_picture.f.linesize[i];
1314 s->current_picture.f.linesize[i] *= 2;
1315 s->last_picture.f.linesize[i] *= 2;
1316 s->next_picture.f.linesize[i] *= 2;
1320 s->err_recognition = avctx->err_recognition;
1322 /* set dequantizer, we can't do it during init as
1323 * it might change for mpeg4 and we can't do it in the header
1324 * decode as init is not called for mpeg4 there yet */
1325 if (s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1326 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1327 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1328 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1329 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1330 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1332 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1333 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1336 if (s->dct_error_sum) {
1337 assert(s->avctx->noise_reduction && s->encoding);
1338 update_noise_reduction(s);
1341 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1342 return ff_xvmc_field_start(s, avctx);
1347 /* generic function for encode/decode called after a
1348 * frame has been coded/decoded. */
1349 void ff_MPV_frame_end(MpegEncContext *s)
1352 /* redraw edges for the frame if decoding didn't complete */
1353 // just to make sure that all data is rendered.
1354 if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1355 ff_xvmc_field_end(s);
1356 } else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1357 !s->avctx->hwaccel &&
1358 !(s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) &&
1359 s->unrestricted_mv &&
1360 s->current_picture.f.reference &&
1362 !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1363 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1364 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1365 s->dsp.draw_edges(s->current_picture.f.data[0], s->current_picture.f.linesize[0],
1366 s->h_edge_pos, s->v_edge_pos,
1367 EDGE_WIDTH, EDGE_WIDTH,
1368 EDGE_TOP | EDGE_BOTTOM);
1369 s->dsp.draw_edges(s->current_picture.f.data[1], s->current_picture.f.linesize[1],
1370 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1371 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1372 EDGE_TOP | EDGE_BOTTOM);
1373 s->dsp.draw_edges(s->current_picture.f.data[2], s->current_picture.f.linesize[2],
1374 s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1375 EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1376 EDGE_TOP | EDGE_BOTTOM);
1381 s->last_pict_type = s->pict_type;
1382 s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f.quality;
1383 if (s->pict_type!= AV_PICTURE_TYPE_B) {
1384 s->last_non_b_pict_type = s->pict_type;
1387 /* copy back current_picture variables */
1388 for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1389 if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1390 s->picture[i] = s->current_picture;
1394 assert(i < MAX_PICTURE_COUNT);
1398 /* release non-reference frames */
1399 for (i = 0; i < s->picture_count; i++) {
1400 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1401 /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1402 free_frame_buffer(s, &s->picture[i]);
1406 // clear copies, to avoid confusion
1408 memset(&s->last_picture, 0, sizeof(Picture));
1409 memset(&s->next_picture, 0, sizeof(Picture));
1410 memset(&s->current_picture, 0, sizeof(Picture));
1412 s->avctx->coded_frame = &s->current_picture_ptr->f;
1414 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1415 ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
1420 * Draw a line from (ex, ey) -> (sx, sy).
1421 * @param w width of the image
1422 * @param h height of the image
1423 * @param stride stride/linesize of the image
1424 * @param color color of the arrow
1426 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1427 int w, int h, int stride, int color)
1431 sx = av_clip(sx, 0, w - 1);
1432 sy = av_clip(sy, 0, h - 1);
1433 ex = av_clip(ex, 0, w - 1);
1434 ey = av_clip(ey, 0, h - 1);
1436 buf[sy * stride + sx] += color;
1438 if (FFABS(ex - sx) > FFABS(ey - sy)) {
1440 FFSWAP(int, sx, ex);
1441 FFSWAP(int, sy, ey);
1443 buf += sx + sy * stride;
1445 f = ((ey - sy) << 16) / ex;
1446 for(x= 0; x <= ex; x++){
1448 fr = (x * f) & 0xFFFF;
1449 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1450 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1454 FFSWAP(int, sx, ex);
1455 FFSWAP(int, sy, ey);
1457 buf += sx + sy * stride;
1460 f = ((ex - sx) << 16) / ey;
1463 for(y= 0; y <= ey; y++){
1465 fr = (y*f) & 0xFFFF;
1466 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1467 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1473 * Draw an arrow from (ex, ey) -> (sx, sy).
1474 * @param w width of the image
1475 * @param h height of the image
1476 * @param stride stride/linesize of the image
1477 * @param color color of the arrow
1479 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1480 int ey, int w, int h, int stride, int color)
1484 sx = av_clip(sx, -100, w + 100);
1485 sy = av_clip(sy, -100, h + 100);
1486 ex = av_clip(ex, -100, w + 100);
1487 ey = av_clip(ey, -100, h + 100);
1492 if (dx * dx + dy * dy > 3 * 3) {
1495 int length = ff_sqrt((rx * rx + ry * ry) << 8);
1497 // FIXME subpixel accuracy
1498 rx = ROUNDED_DIV(rx * 3 << 4, length);
1499 ry = ROUNDED_DIV(ry * 3 << 4, length);
1501 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1502 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1504 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1508 * Print debugging info for the given picture.
1510 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
1512 if (s->avctx->hwaccel || !pict || !pict->mb_type)
1515 if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1518 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1519 av_get_picture_type_char(pict->pict_type));
1520 for (y = 0; y < s->mb_height; y++) {
1521 for (x = 0; x < s->mb_width; x++) {
1522 if (s->avctx->debug & FF_DEBUG_SKIP) {
1523 int count = s->mbskip_table[x + y * s->mb_stride];
1526 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1528 if (s->avctx->debug & FF_DEBUG_QP) {
1529 av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1530 pict->qscale_table[x + y * s->mb_stride]);
1532 if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1533 int mb_type = pict->mb_type[x + y * s->mb_stride];
1534 // Type & MV direction
1535 if (IS_PCM(mb_type))
1536 av_log(s->avctx, AV_LOG_DEBUG, "P");
1537 else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1538 av_log(s->avctx, AV_LOG_DEBUG, "A");
1539 else if (IS_INTRA4x4(mb_type))
1540 av_log(s->avctx, AV_LOG_DEBUG, "i");
1541 else if (IS_INTRA16x16(mb_type))
1542 av_log(s->avctx, AV_LOG_DEBUG, "I");
1543 else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1544 av_log(s->avctx, AV_LOG_DEBUG, "d");
1545 else if (IS_DIRECT(mb_type))
1546 av_log(s->avctx, AV_LOG_DEBUG, "D");
1547 else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1548 av_log(s->avctx, AV_LOG_DEBUG, "g");
1549 else if (IS_GMC(mb_type))
1550 av_log(s->avctx, AV_LOG_DEBUG, "G");
1551 else if (IS_SKIP(mb_type))
1552 av_log(s->avctx, AV_LOG_DEBUG, "S");
1553 else if (!USES_LIST(mb_type, 1))
1554 av_log(s->avctx, AV_LOG_DEBUG, ">");
1555 else if (!USES_LIST(mb_type, 0))
1556 av_log(s->avctx, AV_LOG_DEBUG, "<");
1558 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1559 av_log(s->avctx, AV_LOG_DEBUG, "X");
1563 if (IS_8X8(mb_type))
1564 av_log(s->avctx, AV_LOG_DEBUG, "+");
1565 else if (IS_16X8(mb_type))
1566 av_log(s->avctx, AV_LOG_DEBUG, "-");
1567 else if (IS_8X16(mb_type))
1568 av_log(s->avctx, AV_LOG_DEBUG, "|");
1569 else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1570 av_log(s->avctx, AV_LOG_DEBUG, " ");
1572 av_log(s->avctx, AV_LOG_DEBUG, "?");
1575 if (IS_INTERLACED(mb_type))
1576 av_log(s->avctx, AV_LOG_DEBUG, "=");
1578 av_log(s->avctx, AV_LOG_DEBUG, " ");
1580 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1582 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1586 if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1587 (s->avctx->debug_mv)) {
1588 const int shift = 1 + s->quarter_sample;
1592 int h_chroma_shift, v_chroma_shift, block_height;
1593 const int width = s->avctx->width;
1594 const int height = s->avctx->height;
1595 const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1596 const int mv_stride = (s->mb_width << mv_sample_log2) +
1597 (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1598 s->low_delay = 0; // needed to see the vectors without trashing the buffers
1600 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,
1601 &h_chroma_shift, &v_chroma_shift);
1602 for (i = 0; i < 3; i++) {
1603 size_t size= (i == 0) ? pict->linesize[i] * height:
1604 pict->linesize[i] * height >> v_chroma_shift;
1605 s->visualization_buffer[i]= av_realloc(s->visualization_buffer[i], size);
1606 memcpy(s->visualization_buffer[i], pict->data[i], size);
1607 pict->data[i] = s->visualization_buffer[i];
1609 pict->type = FF_BUFFER_TYPE_COPY;
1611 ptr = pict->data[0];
1612 block_height = 16 >> v_chroma_shift;
1614 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1616 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1617 const int mb_index = mb_x + mb_y * s->mb_stride;
1618 if ((s->avctx->debug_mv) && pict->motion_val) {
1620 for (type = 0; type < 3; type++) {
1624 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1625 (pict->pict_type!= AV_PICTURE_TYPE_P))
1630 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1631 (pict->pict_type!= AV_PICTURE_TYPE_B))
1636 if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1637 (pict->pict_type!= AV_PICTURE_TYPE_B))
1642 if (!USES_LIST(pict->mb_type[mb_index], direction))
1645 if (IS_8X8(pict->mb_type[mb_index])) {
1647 for (i = 0; i < 4; i++) {
1648 int sx = mb_x * 16 + 4 + 8 * (i & 1);
1649 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1650 int xy = (mb_x * 2 + (i & 1) +
1651 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1652 int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1653 int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1654 draw_arrow(ptr, sx, sy, mx, my, width,
1655 height, s->linesize, 100);
1657 } else if (IS_16X8(pict->mb_type[mb_index])) {
1659 for (i = 0; i < 2; i++) {
1660 int sx = mb_x * 16 + 8;
1661 int sy = mb_y * 16 + 4 + 8 * i;
1662 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1663 int mx = (pict->motion_val[direction][xy][0] >> shift);
1664 int my = (pict->motion_val[direction][xy][1] >> shift);
1666 if (IS_INTERLACED(pict->mb_type[mb_index]))
1669 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1670 height, s->linesize, 100);
1672 } else if (IS_8X16(pict->mb_type[mb_index])) {
1674 for (i = 0; i < 2; i++) {
1675 int sx = mb_x * 16 + 4 + 8 * i;
1676 int sy = mb_y * 16 + 8;
1677 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1678 int mx = pict->motion_val[direction][xy][0] >> shift;
1679 int my = pict->motion_val[direction][xy][1] >> shift;
1681 if (IS_INTERLACED(pict->mb_type[mb_index]))
1684 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1685 height, s->linesize, 100);
1688 int sx= mb_x * 16 + 8;
1689 int sy= mb_y * 16 + 8;
1690 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
1691 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1692 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1693 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1697 if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1698 uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1699 0x0101010101010101ULL;
1701 for (y = 0; y < block_height; y++) {
1702 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1703 (block_height * mb_y + y) *
1704 pict->linesize[1]) = c;
1705 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1706 (block_height * mb_y + y) *
1707 pict->linesize[2]) = c;
1710 if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1712 int mb_type = pict->mb_type[mb_index];
1715 #define COLOR(theta, r) \
1716 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1717 v = (int)(128 + r * sin(theta * 3.141592 / 180));
1721 if (IS_PCM(mb_type)) {
1723 } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1724 IS_INTRA16x16(mb_type)) {
1726 } else if (IS_INTRA4x4(mb_type)) {
1728 } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1730 } else if (IS_DIRECT(mb_type)) {
1732 } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1734 } else if (IS_GMC(mb_type)) {
1736 } else if (IS_SKIP(mb_type)) {
1738 } else if (!USES_LIST(mb_type, 1)) {
1740 } else if (!USES_LIST(mb_type, 0)) {
1743 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1747 u *= 0x0101010101010101ULL;
1748 v *= 0x0101010101010101ULL;
1749 for (y = 0; y < block_height; y++) {
1750 *(uint64_t *)(pict->data[1] + 8 * mb_x +
1751 (block_height * mb_y + y) * pict->linesize[1]) = u;
1752 *(uint64_t *)(pict->data[2] + 8 * mb_x +
1753 (block_height * mb_y + y) * pict->linesize[2]) = v;
1757 if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1758 *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1759 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1760 *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1761 (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1763 if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1764 for (y = 0; y < 16; y++)
1765 pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1766 pict->linesize[0]] ^= 0x80;
1768 if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
1769 int dm = 1 << (mv_sample_log2 - 2);
1770 for (i = 0; i < 4; i++) {
1771 int sx = mb_x * 16 + 8 * (i & 1);
1772 int sy = mb_y * 16 + 8 * (i >> 1);
1773 int xy = (mb_x * 2 + (i & 1) +
1774 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1776 int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
1777 if (mv[0] != mv[dm] ||
1778 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
1779 for (y = 0; y < 8; y++)
1780 pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
1781 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
1782 *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
1783 pict->linesize[0]) ^= 0x8080808080808080ULL;
1787 if (IS_INTERLACED(mb_type) &&
1788 s->codec_id == CODEC_ID_H264) {
1792 s->mbskip_table[mb_index] = 0;
1798 static inline int hpel_motion_lowres(MpegEncContext *s,
1799 uint8_t *dest, uint8_t *src,
1800 int field_based, int field_select,
1801 int src_x, int src_y,
1802 int width, int height, int stride,
1803 int h_edge_pos, int v_edge_pos,
1804 int w, int h, h264_chroma_mc_func *pix_op,
1805 int motion_x, int motion_y)
1807 const int lowres = s->avctx->lowres;
1808 const int op_index = FFMIN(lowres, 2);
1809 const int s_mask = (2 << lowres) - 1;
1813 if (s->quarter_sample) {
1818 sx = motion_x & s_mask;
1819 sy = motion_y & s_mask;
1820 src_x += motion_x >> lowres + 1;
1821 src_y += motion_y >> lowres + 1;
1823 src += src_y * stride + src_x;
1825 if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1826 (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1827 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
1828 (h + 1) << field_based, src_x,
1829 src_y << field_based,
1832 src = s->edge_emu_buffer;
1836 sx = (sx << 2) >> lowres;
1837 sy = (sy << 2) >> lowres;
1840 pix_op[op_index](dest, src, stride, h, sx, sy);
1844 /* apply one mpeg motion vector to the three components */
1845 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1852 uint8_t **ref_picture,
1853 h264_chroma_mc_func *pix_op,
1854 int motion_x, int motion_y,
1857 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1858 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
1860 const int lowres = s->avctx->lowres;
1861 const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
1862 const int block_s = 8>>lowres;
1863 const int s_mask = (2 << lowres) - 1;
1864 const int h_edge_pos = s->h_edge_pos >> lowres;
1865 const int v_edge_pos = s->v_edge_pos >> lowres;
1866 linesize = s->current_picture.f.linesize[0] << field_based;
1867 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1869 // FIXME obviously not perfect but qpel will not work in lowres anyway
1870 if (s->quarter_sample) {
1876 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
1879 sx = motion_x & s_mask;
1880 sy = motion_y & s_mask;
1881 src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1882 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1884 if (s->out_format == FMT_H263) {
1885 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1886 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1887 uvsrc_x = src_x >> 1;
1888 uvsrc_y = src_y >> 1;
1889 } else if (s->out_format == FMT_H261) {
1890 // even chroma mv's are full pel in H261
1893 uvsx = (2 * mx) & s_mask;
1894 uvsy = (2 * my) & s_mask;
1895 uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1896 uvsrc_y = mb_y * block_s + (my >> lowres);
1898 if(s->chroma_y_shift){
1903 uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1904 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1906 if(s->chroma_x_shift){
1910 uvsy = motion_y & s_mask;
1912 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1915 uvsx = motion_x & s_mask;
1916 uvsy = motion_y & s_mask;
1923 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1924 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1925 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1927 if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
1928 (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1929 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
1930 s->linesize, 17, 17 + field_based,
1931 src_x, src_y << field_based, h_edge_pos,
1933 ptr_y = s->edge_emu_buffer;
1934 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1935 uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
1936 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
1938 uvsrc_x, uvsrc_y << field_based,
1939 h_edge_pos >> 1, v_edge_pos >> 1);
1940 s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
1942 uvsrc_x, uvsrc_y << field_based,
1943 h_edge_pos >> 1, v_edge_pos >> 1);
1945 ptr_cr = uvbuf + 16;
1949 // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1951 dest_y += s->linesize;
1952 dest_cb += s->uvlinesize;
1953 dest_cr += s->uvlinesize;
1957 ptr_y += s->linesize;
1958 ptr_cb += s->uvlinesize;
1959 ptr_cr += s->uvlinesize;
1962 sx = (sx << 2) >> lowres;
1963 sy = (sy << 2) >> lowres;
1964 pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1966 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
1967 uvsx = (uvsx << 2) >> lowres;
1968 uvsy = (uvsy << 2) >> lowres;
1969 if (h >> s->chroma_y_shift) {
1970 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1971 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1974 // FIXME h261 lowres loop filter
1977 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1978 uint8_t *dest_cb, uint8_t *dest_cr,
1979 uint8_t **ref_picture,
1980 h264_chroma_mc_func * pix_op,
1983 const int lowres = s->avctx->lowres;
1984 const int op_index = FFMIN(lowres, 2);
1985 const int block_s = 8 >> lowres;
1986 const int s_mask = (2 << lowres) - 1;
1987 const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1988 const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1989 int emu = 0, src_x, src_y, offset, sx, sy;
1992 if (s->quarter_sample) {
1997 /* In case of 8X8, we construct a single chroma motion vector
1998 with a special rounding */
1999 mx = ff_h263_round_chroma(mx);
2000 my = ff_h263_round_chroma(my);
2004 src_x = s->mb_x * block_s + (mx >> lowres + 1);
2005 src_y = s->mb_y * block_s + (my >> lowres + 1);
2007 offset = src_y * s->uvlinesize + src_x;
2008 ptr = ref_picture[1] + offset;
2009 if (s->flags & CODEC_FLAG_EMU_EDGE) {
2010 if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2011 (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2012 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
2013 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2014 ptr = s->edge_emu_buffer;
2018 sx = (sx << 2) >> lowres;
2019 sy = (sy << 2) >> lowres;
2020 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2022 ptr = ref_picture[2] + offset;
2024 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2025 src_x, src_y, h_edge_pos, v_edge_pos);
2026 ptr = s->edge_emu_buffer;
2028 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2032 * motion compensation of a single macroblock
2034 * @param dest_y luma destination pointer
2035 * @param dest_cb chroma cb/u destination pointer
2036 * @param dest_cr chroma cr/v destination pointer
2037 * @param dir direction (0->forward, 1->backward)
2038 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2039 * @param pix_op halfpel motion compensation function (average or put normally)
2040 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2042 static inline void MPV_motion_lowres(MpegEncContext *s,
2043 uint8_t *dest_y, uint8_t *dest_cb,
2045 int dir, uint8_t **ref_picture,
2046 h264_chroma_mc_func *pix_op)
2050 const int lowres = s->avctx->lowres;
2051 const int block_s = 8 >>lowres;
2056 switch (s->mv_type) {
2058 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2060 ref_picture, pix_op,
2061 s->mv[dir][0][0], s->mv[dir][0][1],
2067 for (i = 0; i < 4; i++) {
2068 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2069 s->linesize) * block_s,
2070 ref_picture[0], 0, 0,
2071 (2 * mb_x + (i & 1)) * block_s,
2072 (2 * mb_y + (i >> 1)) * block_s,
2073 s->width, s->height, s->linesize,
2074 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2075 block_s, block_s, pix_op,
2076 s->mv[dir][i][0], s->mv[dir][i][1]);
2078 mx += s->mv[dir][i][0];
2079 my += s->mv[dir][i][1];
2082 if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2083 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2087 if (s->picture_structure == PICT_FRAME) {
2089 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2090 1, 0, s->field_select[dir][0],
2091 ref_picture, pix_op,
2092 s->mv[dir][0][0], s->mv[dir][0][1],
2095 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2096 1, 1, s->field_select[dir][1],
2097 ref_picture, pix_op,
2098 s->mv[dir][1][0], s->mv[dir][1][1],
2101 if (s->picture_structure != s->field_select[dir][0] + 1 &&
2102 s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2103 ref_picture = s->current_picture_ptr->f.data;
2106 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2107 0, 0, s->field_select[dir][0],
2108 ref_picture, pix_op,
2110 s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2114 for (i = 0; i < 2; i++) {
2115 uint8_t **ref2picture;
2117 if (s->picture_structure == s->field_select[dir][i] + 1 ||
2118 s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2119 ref2picture = ref_picture;
2121 ref2picture = s->current_picture_ptr->f.data;
2124 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2125 0, 0, s->field_select[dir][i],
2126 ref2picture, pix_op,
2127 s->mv[dir][i][0], s->mv[dir][i][1] +
2128 2 * block_s * i, block_s, mb_y >> 1);
2130 dest_y += 2 * block_s * s->linesize;
2131 dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2132 dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2136 if (s->picture_structure == PICT_FRAME) {
2137 for (i = 0; i < 2; i++) {
2139 for (j = 0; j < 2; j++) {
2140 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2142 ref_picture, pix_op,
2143 s->mv[dir][2 * i + j][0],
2144 s->mv[dir][2 * i + j][1],
2147 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2150 for (i = 0; i < 2; i++) {
2151 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2152 0, 0, s->picture_structure != i + 1,
2153 ref_picture, pix_op,
2154 s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2155 2 * block_s, mb_y >> 1);
2157 // after put we make avg of the same block
2158 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
2160 // opposite parity is always in the same
2161 // frame if this is second field
2162 if (!s->first_field) {
2163 ref_picture = s->current_picture_ptr->f.data;
2174 * find the lowest MB row referenced in the MVs
2176 int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir)
2178 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2179 int my, off, i, mvs;
2181 if (s->picture_structure != PICT_FRAME) goto unhandled;
2183 switch (s->mv_type) {
2197 for (i = 0; i < mvs; i++) {
2198 my = s->mv[dir][i][1]<<qpel_shift;
2199 my_max = FFMAX(my_max, my);
2200 my_min = FFMIN(my_min, my);
2203 off = (FFMAX(-my_min, my_max) + 63) >> 6;
2205 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2207 return s->mb_height-1;
2210 /* put block[] to dest[] */
2211 static inline void put_dct(MpegEncContext *s,
2212 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2214 s->dct_unquantize_intra(s, block, i, qscale);
2215 s->dsp.idct_put (dest, line_size, block);
2218 /* add block[] to dest[] */
2219 static inline void add_dct(MpegEncContext *s,
2220 DCTELEM *block, int i, uint8_t *dest, int line_size)
2222 if (s->block_last_index[i] >= 0) {
2223 s->dsp.idct_add (dest, line_size, block);
2227 static inline void add_dequant_dct(MpegEncContext *s,
2228 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2230 if (s->block_last_index[i] >= 0) {
2231 s->dct_unquantize_inter(s, block, i, qscale);
2233 s->dsp.idct_add (dest, line_size, block);
2238 * Clean dc, ac, coded_block for the current non-intra MB.
2240 void ff_clean_intra_table_entries(MpegEncContext *s)
2242 int wrap = s->b8_stride;
2243 int xy = s->block_index[0];
2246 s->dc_val[0][xy + 1 ] =
2247 s->dc_val[0][xy + wrap] =
2248 s->dc_val[0][xy + 1 + wrap] = 1024;
2250 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2251 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2252 if (s->msmpeg4_version>=3) {
2253 s->coded_block[xy ] =
2254 s->coded_block[xy + 1 ] =
2255 s->coded_block[xy + wrap] =
2256 s->coded_block[xy + 1 + wrap] = 0;
2259 wrap = s->mb_stride;
2260 xy = s->mb_x + s->mb_y * wrap;
2262 s->dc_val[2][xy] = 1024;
2264 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2265 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2267 s->mbintra_table[xy]= 0;
2270 /* generic function called after a macroblock has been parsed by the
2271 decoder or after it has been encoded by the encoder.
2273 Important variables used:
2274 s->mb_intra : true if intra macroblock
2275 s->mv_dir : motion vector direction
2276 s->mv_type : motion vector type
2277 s->mv : motion vector
2278 s->interlaced_dct : true if interlaced dct used (mpeg2)
2280 static av_always_inline
2281 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2282 int lowres_flag, int is_mpeg12)
2284 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2285 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2286 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2290 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2291 /* save DCT coefficients */
2293 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2294 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2296 for(j=0; j<64; j++){
2297 *dct++ = block[i][s->dsp.idct_permutation[j]];
2298 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2300 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2304 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2306 /* update DC predictors for P macroblocks */
2308 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2309 if(s->mbintra_table[mb_xy])
2310 ff_clean_intra_table_entries(s);
2314 s->last_dc[2] = 128 << s->intra_dc_precision;
2317 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2318 s->mbintra_table[mb_xy]=1;
2320 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2321 uint8_t *dest_y, *dest_cb, *dest_cr;
2322 int dct_linesize, dct_offset;
2323 op_pixels_func (*op_pix)[4];
2324 qpel_mc_func (*op_qpix)[16];
2325 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2326 const int uvlinesize = s->current_picture.f.linesize[1];
2327 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2328 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2330 /* avoid copy if macroblock skipped in last frame too */
2331 /* skip only during decoding as we might trash the buffers during encoding a bit */
2333 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2335 if (s->mb_skipped) {
2337 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2339 } else if(!s->current_picture.f.reference) {
2342 *mbskip_ptr = 0; /* not skipped */
2346 dct_linesize = linesize << s->interlaced_dct;
2347 dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2351 dest_cb= s->dest[1];
2352 dest_cr= s->dest[2];
2354 dest_y = s->b_scratchpad;
2355 dest_cb= s->b_scratchpad+16*linesize;
2356 dest_cr= s->b_scratchpad+32*linesize;
2360 /* motion handling */
2361 /* decoding or more than one mb_type (MC was already done otherwise) */
2364 if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2365 if (s->mv_dir & MV_DIR_FORWARD) {
2366 ff_thread_await_progress(&s->last_picture_ptr->f,
2367 ff_MPV_lowest_referenced_row(s, 0),
2370 if (s->mv_dir & MV_DIR_BACKWARD) {
2371 ff_thread_await_progress(&s->next_picture_ptr->f,
2372 ff_MPV_lowest_referenced_row(s, 1),
2378 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2380 if (s->mv_dir & MV_DIR_FORWARD) {
2381 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2382 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2384 if (s->mv_dir & MV_DIR_BACKWARD) {
2385 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2388 op_qpix= s->me.qpel_put;
2389 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2390 op_pix = s->dsp.put_pixels_tab;
2392 op_pix = s->dsp.put_no_rnd_pixels_tab;
2394 if (s->mv_dir & MV_DIR_FORWARD) {
2395 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2396 op_pix = s->dsp.avg_pixels_tab;
2397 op_qpix= s->me.qpel_avg;
2399 if (s->mv_dir & MV_DIR_BACKWARD) {
2400 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2405 /* skip dequant / idct if we are really late ;) */
2406 if(s->avctx->skip_idct){
2407 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2408 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2409 || s->avctx->skip_idct >= AVDISCARD_ALL)
2413 /* add dct residue */
2414 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2415 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2416 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2417 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2418 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2419 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2421 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2422 if (s->chroma_y_shift){
2423 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2424 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2428 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2429 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2430 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2431 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2434 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2435 add_dct(s, block[0], 0, dest_y , dct_linesize);
2436 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2437 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2438 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2440 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2441 if(s->chroma_y_shift){//Chroma420
2442 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2443 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2446 dct_linesize = uvlinesize << s->interlaced_dct;
2447 dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2449 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2450 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2451 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2452 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2453 if(!s->chroma_x_shift){//Chroma444
2454 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2455 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2456 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2457 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2462 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2463 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2466 /* dct only in intra block */
2467 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2468 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2469 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2470 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2471 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2473 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2474 if(s->chroma_y_shift){
2475 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2476 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2480 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2481 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2482 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2483 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2487 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2488 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2489 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2490 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2492 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2493 if(s->chroma_y_shift){
2494 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2495 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2498 dct_linesize = uvlinesize << s->interlaced_dct;
2499 dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2501 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2502 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2503 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2504 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2505 if(!s->chroma_x_shift){//Chroma444
2506 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2507 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2508 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2509 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2517 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2518 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2519 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2524 void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2526 if(s->out_format == FMT_MPEG1) {
2527 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2528 else MPV_decode_mb_internal(s, block, 0, 1);
2531 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2532 else MPV_decode_mb_internal(s, block, 0, 0);
2536 * @param h is the normal height, this will be reduced automatically if needed for the last row
2538 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2539 const int field_pic= s->picture_structure != PICT_FRAME;
2545 if (!s->avctx->hwaccel
2546 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2547 && s->unrestricted_mv
2548 && s->current_picture.f.reference
2550 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2551 int sides = 0, edge_h;
2552 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2553 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2554 if (y==0) sides |= EDGE_TOP;
2555 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2557 edge_h= FFMIN(h, s->v_edge_pos - y);
2559 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2560 s->linesize, s->h_edge_pos, edge_h,
2561 EDGE_WIDTH, EDGE_WIDTH, sides);
2562 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2563 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2564 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2565 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2566 s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2567 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2570 h= FFMIN(h, s->avctx->height - y);
2572 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2574 if (s->avctx->draw_horiz_band) {
2576 int offset[AV_NUM_DATA_POINTERS];
2579 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2580 src = &s->current_picture_ptr->f;
2581 else if(s->last_picture_ptr)
2582 src = &s->last_picture_ptr->f;
2586 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2587 for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2590 offset[0]= y * s->linesize;
2592 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2593 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2599 s->avctx->draw_horiz_band(s->avctx, src, offset,
2600 y, s->picture_structure, h);
2604 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2605 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2606 const int uvlinesize = s->current_picture.f.linesize[1];
2607 const int mb_size= 4 - s->avctx->lowres;
2609 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2610 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2611 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2612 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2613 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2614 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2615 //block_index is not used by mpeg2, so it is not affected by chroma_format
2617 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2618 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2619 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2621 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2623 if(s->picture_structure==PICT_FRAME){
2624 s->dest[0] += s->mb_y * linesize << mb_size;
2625 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2626 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2628 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2629 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2630 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2631 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2636 void ff_mpeg_flush(AVCodecContext *avctx){
2638 MpegEncContext *s = avctx->priv_data;
2640 if(s==NULL || s->picture==NULL)
2643 for(i=0; i<s->picture_count; i++){
2644 if (s->picture[i].f.data[0] &&
2645 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2646 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2647 free_frame_buffer(s, &s->picture[i]);
2649 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2651 s->mb_x= s->mb_y= 0;
2654 s->parse_context.state= -1;
2655 s->parse_context.frame_start_found= 0;
2656 s->parse_context.overread= 0;
2657 s->parse_context.overread_index= 0;
2658 s->parse_context.index= 0;
2659 s->parse_context.last_index= 0;
2660 s->bitstream_buffer_size=0;
2664 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2665 DCTELEM *block, int n, int qscale)
2667 int i, level, nCoeffs;
2668 const uint16_t *quant_matrix;
2670 nCoeffs= s->block_last_index[n];
2672 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
2673 /* XXX: only mpeg1 */
2674 quant_matrix = s->intra_matrix;
2675 for(i=1;i<=nCoeffs;i++) {
2676 int j= s->intra_scantable.permutated[i];
2681 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2682 level = (level - 1) | 1;
2685 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2686 level = (level - 1) | 1;
2693 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2694 DCTELEM *block, int n, int qscale)
2696 int i, level, nCoeffs;
2697 const uint16_t *quant_matrix;
2699 nCoeffs= s->block_last_index[n];
2701 quant_matrix = s->inter_matrix;
2702 for(i=0; i<=nCoeffs; i++) {
2703 int j= s->intra_scantable.permutated[i];
2708 level = (((level << 1) + 1) * qscale *
2709 ((int) (quant_matrix[j]))) >> 4;
2710 level = (level - 1) | 1;
2713 level = (((level << 1) + 1) * qscale *
2714 ((int) (quant_matrix[j]))) >> 4;
2715 level = (level - 1) | 1;
2722 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2723 DCTELEM *block, int n, int qscale)
2725 int i, level, nCoeffs;
2726 const uint16_t *quant_matrix;
2728 if(s->alternate_scan) nCoeffs= 63;
2729 else nCoeffs= s->block_last_index[n];
2731 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
2732 quant_matrix = s->intra_matrix;
2733 for(i=1;i<=nCoeffs;i++) {
2734 int j= s->intra_scantable.permutated[i];
2739 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2742 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2749 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2750 DCTELEM *block, int n, int qscale)
2752 int i, level, nCoeffs;
2753 const uint16_t *quant_matrix;
2756 if(s->alternate_scan) nCoeffs= 63;
2757 else nCoeffs= s->block_last_index[n];
2759 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
2761 quant_matrix = s->intra_matrix;
2762 for(i=1;i<=nCoeffs;i++) {
2763 int j= s->intra_scantable.permutated[i];
2768 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2771 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2780 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2781 DCTELEM *block, int n, int qscale)
2783 int i, level, nCoeffs;
2784 const uint16_t *quant_matrix;
2787 if(s->alternate_scan) nCoeffs= 63;
2788 else nCoeffs= s->block_last_index[n];
2790 quant_matrix = s->inter_matrix;
2791 for(i=0; i<=nCoeffs; i++) {
2792 int j= s->intra_scantable.permutated[i];
2797 level = (((level << 1) + 1) * qscale *
2798 ((int) (quant_matrix[j]))) >> 4;
2801 level = (((level << 1) + 1) * qscale *
2802 ((int) (quant_matrix[j]))) >> 4;
2811 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2812 DCTELEM *block, int n, int qscale)
2814 int i, level, qmul, qadd;
2817 assert(s->block_last_index[n]>=0);
2822 block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
2823 qadd = (qscale - 1) | 1;
2830 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2832 for(i=1; i<=nCoeffs; i++) {
2836 level = level * qmul - qadd;
2838 level = level * qmul + qadd;
2845 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2846 DCTELEM *block, int n, int qscale)
2848 int i, level, qmul, qadd;
2851 assert(s->block_last_index[n]>=0);
2853 qadd = (qscale - 1) | 1;
2856 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2858 for(i=0; i<=nCoeffs; i++) {
2862 level = level * qmul - qadd;
2864 level = level * qmul + qadd;
2872 * set qscale and update qscale dependent variables.
2874 void ff_set_qscale(MpegEncContext * s, int qscale)
2878 else if (qscale > 31)
2882 s->chroma_qscale= s->chroma_qscale_table[qscale];
2884 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2885 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2888 void ff_MPV_report_decode_progress(MpegEncContext *s)
2890 if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->error_occurred)
2891 ff_thread_report_progress(&s->current_picture_ptr->f, s->mb_y, 0);